Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * execMain.c
4 : * top level executor interface routines
5 : *
6 : * INTERFACE ROUTINES
7 : * ExecutorStart()
8 : * ExecutorRun()
9 : * ExecutorFinish()
10 : * ExecutorEnd()
11 : *
12 : * These four procedures are the external interface to the executor.
13 : * In each case, the query descriptor is required as an argument.
14 : *
15 : * ExecutorStart must be called at the beginning of execution of any
16 : * query plan and ExecutorEnd must always be called at the end of
17 : * execution of a plan (unless it is aborted due to error).
18 : *
19 : * ExecutorRun accepts direction and count arguments that specify whether
20 : * the plan is to be executed forwards, backwards, and for how many tuples.
21 : * In some cases ExecutorRun may be called multiple times to process all
22 : * the tuples for a plan. It is also acceptable to stop short of executing
23 : * the whole plan (but only if it is a SELECT).
24 : *
25 : * ExecutorFinish must be called after the final ExecutorRun call and
26 : * before ExecutorEnd. This can be omitted only in case of EXPLAIN,
27 : * which should also omit ExecutorRun.
28 : *
29 : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
30 : * Portions Copyright (c) 1994, Regents of the University of California
31 : *
32 : *
33 : * IDENTIFICATION
34 : * src/backend/executor/execMain.c
35 : *
36 : *-------------------------------------------------------------------------
37 : */
38 : #include "postgres.h"
39 :
40 : #include "access/sysattr.h"
41 : #include "access/table.h"
42 : #include "access/tableam.h"
43 : #include "access/xact.h"
44 : #include "catalog/namespace.h"
45 : #include "catalog/partition.h"
46 : #include "commands/matview.h"
47 : #include "commands/trigger.h"
48 : #include "executor/executor.h"
49 : #include "executor/execPartition.h"
50 : #include "executor/nodeSubplan.h"
51 : #include "foreign/fdwapi.h"
52 : #include "mb/pg_wchar.h"
53 : #include "miscadmin.h"
54 : #include "nodes/queryjumble.h"
55 : #include "parser/parse_relation.h"
56 : #include "pgstat.h"
57 : #include "rewrite/rewriteHandler.h"
58 : #include "storage/lmgr.h"
59 : #include "tcop/utility.h"
60 : #include "utils/acl.h"
61 : #include "utils/backend_status.h"
62 : #include "utils/lsyscache.h"
63 : #include "utils/partcache.h"
64 : #include "utils/plancache.h"
65 : #include "utils/rls.h"
66 : #include "utils/snapmgr.h"
67 :
68 :
69 : /* Hooks for plugins to get control in ExecutorStart/Run/Finish/End */
70 : ExecutorStart_hook_type ExecutorStart_hook = NULL;
71 : ExecutorRun_hook_type ExecutorRun_hook = NULL;
72 : ExecutorFinish_hook_type ExecutorFinish_hook = NULL;
73 : ExecutorEnd_hook_type ExecutorEnd_hook = NULL;
74 :
75 : /* Hook for plugin to get control in ExecCheckPermissions() */
76 : ExecutorCheckPerms_hook_type ExecutorCheckPerms_hook = NULL;
77 :
78 : /* decls for local routines only used within this module */
79 : static void InitPlan(QueryDesc *queryDesc, int eflags);
80 : static void CheckValidRowMarkRel(Relation rel, RowMarkType markType);
81 : static void ExecPostprocessPlan(EState *estate);
82 : static void ExecEndPlan(PlanState *planstate, EState *estate);
83 : static void ExecutePlan(QueryDesc *queryDesc,
84 : CmdType operation,
85 : bool sendTuples,
86 : uint64 numberTuples,
87 : ScanDirection direction,
88 : DestReceiver *dest);
89 : static bool ExecCheckOneRelPerms(RTEPermissionInfo *perminfo);
90 : static bool ExecCheckPermissionsModified(Oid relOid, Oid userid,
91 : Bitmapset *modifiedCols,
92 : AclMode requiredPerms);
93 : static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt);
94 : static void EvalPlanQualStart(EPQState *epqstate, Plan *planTree);
95 : static void ReportNotNullViolationError(ResultRelInfo *resultRelInfo,
96 : TupleTableSlot *slot,
97 : EState *estate, int attnum);
98 :
99 : /* end of local decls */
100 :
101 :
102 : /* ----------------------------------------------------------------
103 : * ExecutorStart
104 : *
105 : * This routine must be called at the beginning of any execution of any
106 : * query plan
107 : *
108 : * Takes a QueryDesc previously created by CreateQueryDesc (which is separate
109 : * only because some places use QueryDescs for utility commands). The tupDesc
110 : * field of the QueryDesc is filled in to describe the tuples that will be
111 : * returned, and the internal fields (estate and planstate) are set up.
112 : *
113 : * eflags contains flag bits as described in executor.h.
114 : *
115 : * NB: the CurrentMemoryContext when this is called will become the parent
116 : * of the per-query context used for this Executor invocation.
117 : *
118 : * We provide a function hook variable that lets loadable plugins
119 : * get control when ExecutorStart is called. Such a plugin would
120 : * normally call standard_ExecutorStart().
121 : *
122 : * Return value indicates if the plan has been initialized successfully so
123 : * that queryDesc->planstate contains a valid PlanState tree. It may not
124 : * if the plan got invalidated during InitPlan().
125 : * ----------------------------------------------------------------
126 : */
127 : bool
128 680198 : ExecutorStart(QueryDesc *queryDesc, int eflags)
129 : {
130 : bool plan_valid;
131 :
132 : /*
133 : * In some cases (e.g. an EXECUTE statement or an execute message with the
134 : * extended query protocol) the query_id won't be reported, so do it now.
135 : *
136 : * Note that it's harmless to report the query_id multiple times, as the
137 : * call will be ignored if the top level query_id has already been
138 : * reported.
139 : */
140 680198 : pgstat_report_query_id(queryDesc->plannedstmt->queryId, false);
141 :
142 680198 : if (ExecutorStart_hook)
143 115134 : plan_valid = (*ExecutorStart_hook) (queryDesc, eflags);
144 : else
145 565064 : plan_valid = standard_ExecutorStart(queryDesc, eflags);
146 :
147 678214 : return plan_valid;
148 : }
149 :
150 : bool
151 680198 : standard_ExecutorStart(QueryDesc *queryDesc, int eflags)
152 : {
153 : EState *estate;
154 : MemoryContext oldcontext;
155 :
156 : /* sanity checks: queryDesc must not be started already */
157 : Assert(queryDesc != NULL);
158 : Assert(queryDesc->estate == NULL);
159 :
160 : /* caller must ensure the query's snapshot is active */
161 : Assert(GetActiveSnapshot() == queryDesc->snapshot);
162 :
163 : /*
164 : * If the transaction is read-only, we need to check if any writes are
165 : * planned to non-temporary tables. EXPLAIN is considered read-only.
166 : *
167 : * Don't allow writes in parallel mode. Supporting UPDATE and DELETE
168 : * would require (a) storing the combo CID hash in shared memory, rather
169 : * than synchronizing it just once at the start of parallelism, and (b) an
170 : * alternative to heap_update()'s reliance on xmax for mutual exclusion.
171 : * INSERT may have no such troubles, but we forbid it to simplify the
172 : * checks.
173 : *
174 : * We have lower-level defenses in CommandCounterIncrement and elsewhere
175 : * against performing unsafe operations in parallel mode, but this gives a
176 : * more user-friendly error message.
177 : */
178 680198 : if ((XactReadOnly || IsInParallelMode()) &&
179 161190 : !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
180 161190 : ExecCheckXactReadOnly(queryDesc->plannedstmt);
181 :
182 : /*
183 : * Build EState, switch into per-query memory context for startup.
184 : */
185 680182 : estate = CreateExecutorState();
186 680182 : queryDesc->estate = estate;
187 :
188 680182 : oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
189 :
190 : /*
191 : * Fill in external parameters, if any, from queryDesc; and allocate
192 : * workspace for internal parameters
193 : */
194 680182 : estate->es_param_list_info = queryDesc->params;
195 :
196 680182 : if (queryDesc->plannedstmt->paramExecTypes != NIL)
197 : {
198 : int nParamExec;
199 :
200 203874 : nParamExec = list_length(queryDesc->plannedstmt->paramExecTypes);
201 203874 : estate->es_param_exec_vals = (ParamExecData *)
202 203874 : palloc0(nParamExec * sizeof(ParamExecData));
203 : }
204 :
205 : /* We now require all callers to provide sourceText */
206 : Assert(queryDesc->sourceText != NULL);
207 680182 : estate->es_sourceText = queryDesc->sourceText;
208 :
209 : /*
210 : * Fill in the query environment, if any, from queryDesc.
211 : */
212 680182 : estate->es_queryEnv = queryDesc->queryEnv;
213 :
214 : /*
215 : * If non-read-only query, set the command ID to mark output tuples with
216 : */
217 680182 : switch (queryDesc->operation)
218 : {
219 558268 : case CMD_SELECT:
220 :
221 : /*
222 : * SELECT FOR [KEY] UPDATE/SHARE and modifying CTEs need to mark
223 : * tuples
224 : */
225 558268 : if (queryDesc->plannedstmt->rowMarks != NIL ||
226 550394 : queryDesc->plannedstmt->hasModifyingCTE)
227 8008 : estate->es_output_cid = GetCurrentCommandId(true);
228 :
229 : /*
230 : * A SELECT without modifying CTEs can't possibly queue triggers,
231 : * so force skip-triggers mode. This is just a marginal efficiency
232 : * hack, since AfterTriggerBeginQuery/AfterTriggerEndQuery aren't
233 : * all that expensive, but we might as well do it.
234 : */
235 558268 : if (!queryDesc->plannedstmt->hasModifyingCTE)
236 558128 : eflags |= EXEC_FLAG_SKIP_TRIGGERS;
237 558268 : break;
238 :
239 121914 : case CMD_INSERT:
240 : case CMD_DELETE:
241 : case CMD_UPDATE:
242 : case CMD_MERGE:
243 121914 : estate->es_output_cid = GetCurrentCommandId(true);
244 121914 : break;
245 :
246 0 : default:
247 0 : elog(ERROR, "unrecognized operation code: %d",
248 : (int) queryDesc->operation);
249 : break;
250 : }
251 :
252 : /*
253 : * Copy other important information into the EState
254 : */
255 680182 : estate->es_snapshot = RegisterSnapshot(queryDesc->snapshot);
256 680182 : estate->es_crosscheck_snapshot = RegisterSnapshot(queryDesc->crosscheck_snapshot);
257 680182 : estate->es_top_eflags = eflags;
258 680182 : estate->es_instrument = queryDesc->instrument_options;
259 680182 : estate->es_jit_flags = queryDesc->plannedstmt->jitFlags;
260 :
261 : /*
262 : * Set up an AFTER-trigger statement context, unless told not to, or
263 : * unless it's EXPLAIN-only mode (when ExecutorFinish won't be called).
264 : */
265 680182 : if (!(eflags & (EXEC_FLAG_SKIP_TRIGGERS | EXEC_FLAG_EXPLAIN_ONLY)))
266 120436 : AfterTriggerBeginQuery();
267 :
268 : /*
269 : * Initialize the plan state tree
270 : */
271 680182 : InitPlan(queryDesc, eflags);
272 :
273 678214 : MemoryContextSwitchTo(oldcontext);
274 :
275 678214 : return ExecPlanStillValid(queryDesc->estate);
276 : }
277 :
278 : /*
279 : * ExecutorStartCachedPlan
280 : * Start execution for a given query in the CachedPlanSource, replanning
281 : * if the plan is invalidated due to deferred locks taken during the
282 : * plan's initialization
283 : *
284 : * This function handles cases where the CachedPlan given in queryDesc->cplan
285 : * might become invalid during the initialization of the plan given in
286 : * queryDesc->plannedstmt, particularly when prunable relations in it are
287 : * locked after performing initial pruning. If the locks invalidate the plan,
288 : * the function calls UpdateCachedPlan() to replan all queries in the
289 : * CachedPlan, and then retries initialization.
290 : *
291 : * The function repeats the process until ExecutorStart() successfully
292 : * initializes the plan, that is without the CachedPlan becoming invalid.
293 : */
294 : void
295 140090 : ExecutorStartCachedPlan(QueryDesc *queryDesc, int eflags,
296 : CachedPlanSource *plansource,
297 : int query_index)
298 : {
299 140090 : if (unlikely(queryDesc->cplan == NULL))
300 0 : elog(ERROR, "ExecutorStartCachedPlan(): missing CachedPlan");
301 140090 : if (unlikely(plansource == NULL))
302 0 : elog(ERROR, "ExecutorStartCachedPlan(): missing CachedPlanSource");
303 :
304 : /*
305 : * Loop and retry with an updated plan until no further invalidation
306 : * occurs.
307 : */
308 : while (1)
309 : {
310 140094 : if (!ExecutorStart(queryDesc, eflags))
311 : {
312 : /*
313 : * Clean up the current execution state before creating the new
314 : * plan to retry ExecutorStart(). Mark execution as aborted to
315 : * ensure that AFTER trigger state is properly reset.
316 : */
317 4 : queryDesc->estate->es_aborted = true;
318 4 : ExecutorEnd(queryDesc);
319 :
320 : /* Retry ExecutorStart() with an updated plan tree. */
321 4 : queryDesc->plannedstmt = UpdateCachedPlan(plansource, query_index,
322 : queryDesc->queryEnv);
323 : }
324 : else
325 :
326 : /*
327 : * Exit the loop if the plan is initialized successfully and no
328 : * sinval messages were received that invalidated the CachedPlan.
329 : */
330 140084 : break;
331 : }
332 140084 : }
333 :
334 : /* ----------------------------------------------------------------
335 : * ExecutorRun
336 : *
337 : * This is the main routine of the executor module. It accepts
338 : * the query descriptor from the traffic cop and executes the
339 : * query plan.
340 : *
341 : * ExecutorStart must have been called already.
342 : *
343 : * If direction is NoMovementScanDirection then nothing is done
344 : * except to start up/shut down the destination. Otherwise,
345 : * we retrieve up to 'count' tuples in the specified direction.
346 : *
347 : * Note: count = 0 is interpreted as no portal limit, i.e., run to
348 : * completion. Also note that the count limit is only applied to
349 : * retrieved tuples, not for instance to those inserted/updated/deleted
350 : * by a ModifyTable plan node.
351 : *
352 : * There is no return value, but output tuples (if any) are sent to
353 : * the destination receiver specified in the QueryDesc; and the number
354 : * of tuples processed at the top level can be found in
355 : * estate->es_processed. The total number of tuples processed in all
356 : * the ExecutorRun calls can be found in estate->es_total_processed.
357 : *
358 : * We provide a function hook variable that lets loadable plugins
359 : * get control when ExecutorRun is called. Such a plugin would
360 : * normally call standard_ExecutorRun().
361 : *
362 : * ----------------------------------------------------------------
363 : */
364 : void
365 670458 : ExecutorRun(QueryDesc *queryDesc,
366 : ScanDirection direction, uint64 count)
367 : {
368 670458 : if (ExecutorRun_hook)
369 112056 : (*ExecutorRun_hook) (queryDesc, direction, count);
370 : else
371 558402 : standard_ExecutorRun(queryDesc, direction, count);
372 647048 : }
373 :
374 : void
375 670458 : standard_ExecutorRun(QueryDesc *queryDesc,
376 : ScanDirection direction, uint64 count)
377 : {
378 : EState *estate;
379 : CmdType operation;
380 : DestReceiver *dest;
381 : bool sendTuples;
382 : MemoryContext oldcontext;
383 :
384 : /* sanity checks */
385 : Assert(queryDesc != NULL);
386 :
387 670458 : estate = queryDesc->estate;
388 :
389 : Assert(estate != NULL);
390 : Assert(!estate->es_aborted);
391 : Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
392 :
393 : /* caller must ensure the query's snapshot is active */
394 : Assert(GetActiveSnapshot() == estate->es_snapshot);
395 :
396 : /*
397 : * Switch into per-query memory context
398 : */
399 670458 : oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
400 :
401 : /* Allow instrumentation of Executor overall runtime */
402 670458 : if (queryDesc->totaltime)
403 76588 : InstrStartNode(queryDesc->totaltime);
404 :
405 : /*
406 : * extract information from the query descriptor and the query feature.
407 : */
408 670458 : operation = queryDesc->operation;
409 670458 : dest = queryDesc->dest;
410 :
411 : /*
412 : * startup tuple receiver, if we will be emitting tuples
413 : */
414 670458 : estate->es_processed = 0;
415 :
416 790110 : sendTuples = (operation == CMD_SELECT ||
417 119652 : queryDesc->plannedstmt->hasReturning);
418 :
419 670458 : if (sendTuples)
420 554938 : dest->rStartup(dest, operation, queryDesc->tupDesc);
421 :
422 : /*
423 : * Run plan, unless direction is NoMovement.
424 : *
425 : * Note: pquery.c selects NoMovement if a prior call already reached
426 : * end-of-data in the user-specified fetch direction. This is important
427 : * because various parts of the executor can misbehave if called again
428 : * after reporting EOF. For example, heapam.c would actually restart a
429 : * heapscan and return all its data afresh. There is also some doubt
430 : * about whether a parallel plan would operate properly if an additional,
431 : * necessarily non-parallel execution request occurs after completing a
432 : * parallel execution. (That case should work, but it's untested.)
433 : */
434 670420 : if (!ScanDirectionIsNoMovement(direction))
435 669200 : ExecutePlan(queryDesc,
436 : operation,
437 : sendTuples,
438 : count,
439 : direction,
440 : dest);
441 :
442 : /*
443 : * Update es_total_processed to keep track of the number of tuples
444 : * processed across multiple ExecutorRun() calls.
445 : */
446 647048 : estate->es_total_processed += estate->es_processed;
447 :
448 : /*
449 : * shutdown tuple receiver, if we started it
450 : */
451 647048 : if (sendTuples)
452 534660 : dest->rShutdown(dest);
453 :
454 647048 : if (queryDesc->totaltime)
455 73882 : InstrStopNode(queryDesc->totaltime, estate->es_processed);
456 :
457 647048 : MemoryContextSwitchTo(oldcontext);
458 647048 : }
459 :
460 : /* ----------------------------------------------------------------
461 : * ExecutorFinish
462 : *
463 : * This routine must be called after the last ExecutorRun call.
464 : * It performs cleanup such as firing AFTER triggers. It is
465 : * separate from ExecutorEnd because EXPLAIN ANALYZE needs to
466 : * include these actions in the total runtime.
467 : *
468 : * We provide a function hook variable that lets loadable plugins
469 : * get control when ExecutorFinish is called. Such a plugin would
470 : * normally call standard_ExecutorFinish().
471 : *
472 : * ----------------------------------------------------------------
473 : */
474 : void
475 632752 : ExecutorFinish(QueryDesc *queryDesc)
476 : {
477 632752 : if (ExecutorFinish_hook)
478 101444 : (*ExecutorFinish_hook) (queryDesc);
479 : else
480 531308 : standard_ExecutorFinish(queryDesc);
481 631698 : }
482 :
483 : void
484 632752 : standard_ExecutorFinish(QueryDesc *queryDesc)
485 : {
486 : EState *estate;
487 : MemoryContext oldcontext;
488 :
489 : /* sanity checks */
490 : Assert(queryDesc != NULL);
491 :
492 632752 : estate = queryDesc->estate;
493 :
494 : Assert(estate != NULL);
495 : Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
496 :
497 : /*
498 : * This should be run once and only once per Executor instance and never
499 : * if the execution was aborted.
500 : */
501 : Assert(!estate->es_finished && !estate->es_aborted);
502 :
503 : /* Switch into per-query memory context */
504 632752 : oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
505 :
506 : /* Allow instrumentation of Executor overall runtime */
507 632752 : if (queryDesc->totaltime)
508 73882 : InstrStartNode(queryDesc->totaltime);
509 :
510 : /* Run ModifyTable nodes to completion */
511 632752 : ExecPostprocessPlan(estate);
512 :
513 : /* Execute queued AFTER triggers, unless told not to */
514 632752 : if (!(estate->es_top_eflags & EXEC_FLAG_SKIP_TRIGGERS))
515 115912 : AfterTriggerEndQuery(estate);
516 :
517 631698 : if (queryDesc->totaltime)
518 73572 : InstrStopNode(queryDesc->totaltime, 0);
519 :
520 631698 : MemoryContextSwitchTo(oldcontext);
521 :
522 631698 : estate->es_finished = true;
523 631698 : }
524 :
525 : /* ----------------------------------------------------------------
526 : * ExecutorEnd
527 : *
528 : * This routine must be called at the end of execution of any
529 : * query plan
530 : *
531 : * We provide a function hook variable that lets loadable plugins
532 : * get control when ExecutorEnd is called. Such a plugin would
533 : * normally call standard_ExecutorEnd().
534 : *
535 : * ----------------------------------------------------------------
536 : */
537 : void
538 651706 : ExecutorEnd(QueryDesc *queryDesc)
539 : {
540 651706 : if (ExecutorEnd_hook)
541 106754 : (*ExecutorEnd_hook) (queryDesc);
542 : else
543 544952 : standard_ExecutorEnd(queryDesc);
544 651706 : }
545 :
546 : void
547 651706 : standard_ExecutorEnd(QueryDesc *queryDesc)
548 : {
549 : EState *estate;
550 : MemoryContext oldcontext;
551 :
552 : /* sanity checks */
553 : Assert(queryDesc != NULL);
554 :
555 651706 : estate = queryDesc->estate;
556 :
557 : Assert(estate != NULL);
558 :
559 651706 : if (estate->es_parallel_workers_to_launch > 0)
560 682 : pgstat_update_parallel_workers_stats((PgStat_Counter) estate->es_parallel_workers_to_launch,
561 682 : (PgStat_Counter) estate->es_parallel_workers_launched);
562 :
563 : /*
564 : * Check that ExecutorFinish was called, unless in EXPLAIN-only mode or if
565 : * execution was aborted.
566 : */
567 : Assert(estate->es_finished || estate->es_aborted ||
568 : (estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
569 :
570 : /*
571 : * Switch into per-query memory context to run ExecEndPlan
572 : */
573 651706 : oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
574 :
575 651706 : ExecEndPlan(queryDesc->planstate, estate);
576 :
577 : /* do away with our snapshots */
578 651706 : UnregisterSnapshot(estate->es_snapshot);
579 651706 : UnregisterSnapshot(estate->es_crosscheck_snapshot);
580 :
581 : /*
582 : * Reset AFTER trigger module if the query execution was aborted.
583 : */
584 651706 : if (estate->es_aborted &&
585 4 : !(estate->es_top_eflags &
586 : (EXEC_FLAG_SKIP_TRIGGERS | EXEC_FLAG_EXPLAIN_ONLY)))
587 0 : AfterTriggerAbortQuery();
588 :
589 : /*
590 : * Must switch out of context before destroying it
591 : */
592 651706 : MemoryContextSwitchTo(oldcontext);
593 :
594 : /*
595 : * Release EState and per-query memory context. This should release
596 : * everything the executor has allocated.
597 : */
598 651706 : FreeExecutorState(estate);
599 :
600 : /* Reset queryDesc fields that no longer point to anything */
601 651706 : queryDesc->tupDesc = NULL;
602 651706 : queryDesc->estate = NULL;
603 651706 : queryDesc->planstate = NULL;
604 651706 : queryDesc->totaltime = NULL;
605 651706 : }
606 :
607 : /* ----------------------------------------------------------------
608 : * ExecutorRewind
609 : *
610 : * This routine may be called on an open queryDesc to rewind it
611 : * to the start.
612 : * ----------------------------------------------------------------
613 : */
614 : void
615 104 : ExecutorRewind(QueryDesc *queryDesc)
616 : {
617 : EState *estate;
618 : MemoryContext oldcontext;
619 :
620 : /* sanity checks */
621 : Assert(queryDesc != NULL);
622 :
623 104 : estate = queryDesc->estate;
624 :
625 : Assert(estate != NULL);
626 :
627 : /* It's probably not sensible to rescan updating queries */
628 : Assert(queryDesc->operation == CMD_SELECT);
629 :
630 : /*
631 : * Switch into per-query memory context
632 : */
633 104 : oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
634 :
635 : /*
636 : * rescan plan
637 : */
638 104 : ExecReScan(queryDesc->planstate);
639 :
640 104 : MemoryContextSwitchTo(oldcontext);
641 104 : }
642 :
643 :
644 : /*
645 : * ExecCheckPermissions
646 : * Check access permissions of relations mentioned in a query
647 : *
648 : * Returns true if permissions are adequate. Otherwise, throws an appropriate
649 : * error if ereport_on_violation is true, or simply returns false otherwise.
650 : *
651 : * Note that this does NOT address row-level security policies (aka: RLS). If
652 : * rows will be returned to the user as a result of this permission check
653 : * passing, then RLS also needs to be consulted (and check_enable_rls()).
654 : *
655 : * See rewrite/rowsecurity.c.
656 : *
657 : * NB: rangeTable is no longer used by us, but kept around for the hooks that
658 : * might still want to look at the RTEs.
659 : */
660 : bool
661 696296 : ExecCheckPermissions(List *rangeTable, List *rteperminfos,
662 : bool ereport_on_violation)
663 : {
664 : ListCell *l;
665 696296 : bool result = true;
666 :
667 : #ifdef USE_ASSERT_CHECKING
668 : Bitmapset *indexset = NULL;
669 :
670 : /* Check that rteperminfos is consistent with rangeTable */
671 : foreach(l, rangeTable)
672 : {
673 : RangeTblEntry *rte = lfirst_node(RangeTblEntry, l);
674 :
675 : if (rte->perminfoindex != 0)
676 : {
677 : /* Sanity checks */
678 :
679 : /*
680 : * Only relation RTEs and subquery RTEs that were once relation
681 : * RTEs (views) have their perminfoindex set.
682 : */
683 : Assert(rte->rtekind == RTE_RELATION ||
684 : (rte->rtekind == RTE_SUBQUERY &&
685 : rte->relkind == RELKIND_VIEW));
686 :
687 : /*
688 : * Ensure that we have at least an AccessShareLock on relations
689 : * whose permissions need to be checked.
690 : *
691 : * Skip this check in a parallel worker because locks won't be
692 : * taken until ExecInitNode() performs plan initialization.
693 : *
694 : * XXX: ExecCheckPermissions() in a parallel worker may be
695 : * redundant with the checks done in the leader process, so this
696 : * should be reviewed to ensure it’s necessary.
697 : */
698 : Assert(IsParallelWorker() ||
699 : CheckRelationOidLockedByMe(rte->relid, AccessShareLock,
700 : true));
701 :
702 : (void) getRTEPermissionInfo(rteperminfos, rte);
703 : /* Many-to-one mapping not allowed */
704 : Assert(!bms_is_member(rte->perminfoindex, indexset));
705 : indexset = bms_add_member(indexset, rte->perminfoindex);
706 : }
707 : }
708 :
709 : /* All rteperminfos are referenced */
710 : Assert(bms_num_members(indexset) == list_length(rteperminfos));
711 : #endif
712 :
713 1332736 : foreach(l, rteperminfos)
714 : {
715 638066 : RTEPermissionInfo *perminfo = lfirst_node(RTEPermissionInfo, l);
716 :
717 : Assert(OidIsValid(perminfo->relid));
718 638066 : result = ExecCheckOneRelPerms(perminfo);
719 638066 : if (!result)
720 : {
721 1626 : if (ereport_on_violation)
722 1614 : aclcheck_error(ACLCHECK_NO_PRIV,
723 1614 : get_relkind_objtype(get_rel_relkind(perminfo->relid)),
724 1614 : get_rel_name(perminfo->relid));
725 12 : return false;
726 : }
727 : }
728 :
729 694670 : if (ExecutorCheckPerms_hook)
730 12 : result = (*ExecutorCheckPerms_hook) (rangeTable, rteperminfos,
731 : ereport_on_violation);
732 694670 : return result;
733 : }
734 :
735 : /*
736 : * ExecCheckOneRelPerms
737 : * Check access permissions for a single relation.
738 : */
739 : static bool
740 638066 : ExecCheckOneRelPerms(RTEPermissionInfo *perminfo)
741 : {
742 : AclMode requiredPerms;
743 : AclMode relPerms;
744 : AclMode remainingPerms;
745 : Oid userid;
746 638066 : Oid relOid = perminfo->relid;
747 :
748 638066 : requiredPerms = perminfo->requiredPerms;
749 : Assert(requiredPerms != 0);
750 :
751 : /*
752 : * userid to check as: current user unless we have a setuid indication.
753 : *
754 : * Note: GetUserId() is presently fast enough that there's no harm in
755 : * calling it separately for each relation. If that stops being true, we
756 : * could call it once in ExecCheckPermissions and pass the userid down
757 : * from there. But for now, no need for the extra clutter.
758 : */
759 1276132 : userid = OidIsValid(perminfo->checkAsUser) ?
760 638066 : perminfo->checkAsUser : GetUserId();
761 :
762 : /*
763 : * We must have *all* the requiredPerms bits, but some of the bits can be
764 : * satisfied from column-level rather than relation-level permissions.
765 : * First, remove any bits that are satisfied by relation permissions.
766 : */
767 638066 : relPerms = pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL);
768 638066 : remainingPerms = requiredPerms & ~relPerms;
769 638066 : if (remainingPerms != 0)
770 : {
771 2540 : int col = -1;
772 :
773 : /*
774 : * If we lack any permissions that exist only as relation permissions,
775 : * we can fail straight away.
776 : */
777 2540 : if (remainingPerms & ~(ACL_SELECT | ACL_INSERT | ACL_UPDATE))
778 150 : return false;
779 :
780 : /*
781 : * Check to see if we have the needed privileges at column level.
782 : *
783 : * Note: failures just report a table-level error; it would be nicer
784 : * to report a column-level error if we have some but not all of the
785 : * column privileges.
786 : */
787 2390 : if (remainingPerms & ACL_SELECT)
788 : {
789 : /*
790 : * When the query doesn't explicitly reference any columns (for
791 : * example, SELECT COUNT(*) FROM table), allow the query if we
792 : * have SELECT on any column of the rel, as per SQL spec.
793 : */
794 1422 : if (bms_is_empty(perminfo->selectedCols))
795 : {
796 54 : if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
797 : ACLMASK_ANY) != ACLCHECK_OK)
798 12 : return false;
799 : }
800 :
801 2318 : while ((col = bms_next_member(perminfo->selectedCols, col)) >= 0)
802 : {
803 : /* bit #s are offset by FirstLowInvalidHeapAttributeNumber */
804 1812 : AttrNumber attno = col + FirstLowInvalidHeapAttributeNumber;
805 :
806 1812 : if (attno == InvalidAttrNumber)
807 : {
808 : /* Whole-row reference, must have priv on all cols */
809 54 : if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
810 : ACLMASK_ALL) != ACLCHECK_OK)
811 30 : return false;
812 : }
813 : else
814 : {
815 1758 : if (pg_attribute_aclcheck(relOid, attno, userid,
816 : ACL_SELECT) != ACLCHECK_OK)
817 874 : return false;
818 : }
819 : }
820 : }
821 :
822 : /*
823 : * Basically the same for the mod columns, for both INSERT and UPDATE
824 : * privilege as specified by remainingPerms.
825 : */
826 1474 : if (remainingPerms & ACL_INSERT &&
827 308 : !ExecCheckPermissionsModified(relOid,
828 : userid,
829 : perminfo->insertedCols,
830 : ACL_INSERT))
831 176 : return false;
832 :
833 1298 : if (remainingPerms & ACL_UPDATE &&
834 876 : !ExecCheckPermissionsModified(relOid,
835 : userid,
836 : perminfo->updatedCols,
837 : ACL_UPDATE))
838 384 : return false;
839 : }
840 636440 : return true;
841 : }
842 :
843 : /*
844 : * ExecCheckPermissionsModified
845 : * Check INSERT or UPDATE access permissions for a single relation (these
846 : * are processed uniformly).
847 : */
848 : static bool
849 1184 : ExecCheckPermissionsModified(Oid relOid, Oid userid, Bitmapset *modifiedCols,
850 : AclMode requiredPerms)
851 : {
852 1184 : int col = -1;
853 :
854 : /*
855 : * When the query doesn't explicitly update any columns, allow the query
856 : * if we have permission on any column of the rel. This is to handle
857 : * SELECT FOR UPDATE as well as possible corner cases in UPDATE.
858 : */
859 1184 : if (bms_is_empty(modifiedCols))
860 : {
861 48 : if (pg_attribute_aclcheck_all(relOid, userid, requiredPerms,
862 : ACLMASK_ANY) != ACLCHECK_OK)
863 48 : return false;
864 : }
865 :
866 1892 : while ((col = bms_next_member(modifiedCols, col)) >= 0)
867 : {
868 : /* bit #s are offset by FirstLowInvalidHeapAttributeNumber */
869 1268 : AttrNumber attno = col + FirstLowInvalidHeapAttributeNumber;
870 :
871 1268 : if (attno == InvalidAttrNumber)
872 : {
873 : /* whole-row reference can't happen here */
874 0 : elog(ERROR, "whole-row update is not implemented");
875 : }
876 : else
877 : {
878 1268 : if (pg_attribute_aclcheck(relOid, attno, userid,
879 : requiredPerms) != ACLCHECK_OK)
880 512 : return false;
881 : }
882 : }
883 624 : return true;
884 : }
885 :
886 : /*
887 : * Check that the query does not imply any writes to non-temp tables;
888 : * unless we're in parallel mode, in which case don't even allow writes
889 : * to temp tables.
890 : *
891 : * Note: in a Hot Standby this would need to reject writes to temp
892 : * tables just as we do in parallel mode; but an HS standby can't have created
893 : * any temp tables in the first place, so no need to check that.
894 : */
895 : static void
896 161190 : ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
897 : {
898 : ListCell *l;
899 :
900 : /*
901 : * Fail if write permissions are requested in parallel mode for table
902 : * (temp or non-temp), otherwise fail for any non-temp table.
903 : */
904 337856 : foreach(l, plannedstmt->permInfos)
905 : {
906 176682 : RTEPermissionInfo *perminfo = lfirst_node(RTEPermissionInfo, l);
907 :
908 176682 : if ((perminfo->requiredPerms & (~ACL_SELECT)) == 0)
909 176654 : continue;
910 :
911 28 : if (isTempNamespace(get_rel_namespace(perminfo->relid)))
912 12 : continue;
913 :
914 16 : PreventCommandIfReadOnly(CreateCommandName((Node *) plannedstmt));
915 : }
916 :
917 161174 : if (plannedstmt->commandType != CMD_SELECT || plannedstmt->hasModifyingCTE)
918 12 : PreventCommandIfParallelMode(CreateCommandName((Node *) plannedstmt));
919 161174 : }
920 :
921 :
922 : /* ----------------------------------------------------------------
923 : * InitPlan
924 : *
925 : * Initializes the query plan: open files, allocate storage
926 : * and start up the rule manager
927 : *
928 : * If the plan originates from a CachedPlan (given in queryDesc->cplan),
929 : * it can become invalid during runtime "initial" pruning when the
930 : * remaining set of locks is taken. The function returns early in that
931 : * case without initializing the plan, and the caller is expected to
932 : * retry with a new valid plan.
933 : * ----------------------------------------------------------------
934 : */
935 : static void
936 680182 : InitPlan(QueryDesc *queryDesc, int eflags)
937 : {
938 680182 : CmdType operation = queryDesc->operation;
939 680182 : PlannedStmt *plannedstmt = queryDesc->plannedstmt;
940 680182 : CachedPlan *cachedplan = queryDesc->cplan;
941 680182 : Plan *plan = plannedstmt->planTree;
942 680182 : List *rangeTable = plannedstmt->rtable;
943 680182 : EState *estate = queryDesc->estate;
944 : PlanState *planstate;
945 : TupleDesc tupType;
946 : ListCell *l;
947 : int i;
948 :
949 : /*
950 : * Do permissions checks
951 : */
952 680182 : ExecCheckPermissions(rangeTable, plannedstmt->permInfos, true);
953 :
954 : /*
955 : * initialize the node's execution state
956 : */
957 678652 : ExecInitRangeTable(estate, rangeTable, plannedstmt->permInfos,
958 678652 : bms_copy(plannedstmt->unprunableRelids));
959 :
960 678652 : estate->es_plannedstmt = plannedstmt;
961 678652 : estate->es_cachedplan = cachedplan;
962 678652 : estate->es_part_prune_infos = plannedstmt->partPruneInfos;
963 :
964 : /*
965 : * Perform runtime "initial" pruning to identify which child subplans,
966 : * corresponding to the children of plan nodes that contain
967 : * PartitionPruneInfo such as Append, will not be executed. The results,
968 : * which are bitmapsets of indexes of the child subplans that will be
969 : * executed, are saved in es_part_prune_results. These results correspond
970 : * to each PartitionPruneInfo entry, and the es_part_prune_results list is
971 : * parallel to es_part_prune_infos.
972 : */
973 678652 : ExecDoInitialPruning(estate);
974 :
975 678652 : if (!ExecPlanStillValid(estate))
976 4 : return;
977 :
978 : /*
979 : * Next, build the ExecRowMark array from the PlanRowMark(s), if any.
980 : */
981 678648 : if (plannedstmt->rowMarks)
982 : {
983 9916 : estate->es_rowmarks = (ExecRowMark **)
984 9916 : palloc0(estate->es_range_table_size * sizeof(ExecRowMark *));
985 23060 : foreach(l, plannedstmt->rowMarks)
986 : {
987 13150 : PlanRowMark *rc = (PlanRowMark *) lfirst(l);
988 : Oid relid;
989 : Relation relation;
990 : ExecRowMark *erm;
991 :
992 : /*
993 : * Ignore "parent" rowmarks, because they are irrelevant at
994 : * runtime. Also ignore the rowmarks belonging to child tables
995 : * that have been pruned in ExecDoInitialPruning().
996 : */
997 13150 : if (rc->isParent ||
998 11312 : !bms_is_member(rc->rti, estate->es_unpruned_relids))
999 2366 : continue;
1000 :
1001 : /* get relation's OID (will produce InvalidOid if subquery) */
1002 10784 : relid = exec_rt_fetch(rc->rti, estate)->relid;
1003 :
1004 : /* open relation, if we need to access it for this mark type */
1005 10784 : switch (rc->markType)
1006 : {
1007 10472 : case ROW_MARK_EXCLUSIVE:
1008 : case ROW_MARK_NOKEYEXCLUSIVE:
1009 : case ROW_MARK_SHARE:
1010 : case ROW_MARK_KEYSHARE:
1011 : case ROW_MARK_REFERENCE:
1012 10472 : relation = ExecGetRangeTableRelation(estate, rc->rti, false);
1013 10472 : break;
1014 312 : case ROW_MARK_COPY:
1015 : /* no physical table access is required */
1016 312 : relation = NULL;
1017 312 : break;
1018 0 : default:
1019 0 : elog(ERROR, "unrecognized markType: %d", rc->markType);
1020 : relation = NULL; /* keep compiler quiet */
1021 : break;
1022 : }
1023 :
1024 : /* Check that relation is a legal target for marking */
1025 10784 : if (relation)
1026 10472 : CheckValidRowMarkRel(relation, rc->markType);
1027 :
1028 10778 : erm = (ExecRowMark *) palloc(sizeof(ExecRowMark));
1029 10778 : erm->relation = relation;
1030 10778 : erm->relid = relid;
1031 10778 : erm->rti = rc->rti;
1032 10778 : erm->prti = rc->prti;
1033 10778 : erm->rowmarkId = rc->rowmarkId;
1034 10778 : erm->markType = rc->markType;
1035 10778 : erm->strength = rc->strength;
1036 10778 : erm->waitPolicy = rc->waitPolicy;
1037 10778 : erm->ermActive = false;
1038 10778 : ItemPointerSetInvalid(&(erm->curCtid));
1039 10778 : erm->ermExtra = NULL;
1040 :
1041 : Assert(erm->rti > 0 && erm->rti <= estate->es_range_table_size &&
1042 : estate->es_rowmarks[erm->rti - 1] == NULL);
1043 :
1044 10778 : estate->es_rowmarks[erm->rti - 1] = erm;
1045 : }
1046 : }
1047 :
1048 : /*
1049 : * Initialize the executor's tuple table to empty.
1050 : */
1051 678642 : estate->es_tupleTable = NIL;
1052 :
1053 : /* signal that this EState is not used for EPQ */
1054 678642 : estate->es_epq_active = NULL;
1055 :
1056 : /*
1057 : * Initialize private state information for each SubPlan. We must do this
1058 : * before running ExecInitNode on the main query tree, since
1059 : * ExecInitSubPlan expects to be able to find these entries.
1060 : */
1061 : Assert(estate->es_subplanstates == NIL);
1062 678642 : i = 1; /* subplan indices count from 1 */
1063 722598 : foreach(l, plannedstmt->subplans)
1064 : {
1065 43956 : Plan *subplan = (Plan *) lfirst(l);
1066 : PlanState *subplanstate;
1067 : int sp_eflags;
1068 :
1069 : /*
1070 : * A subplan will never need to do BACKWARD scan nor MARK/RESTORE. If
1071 : * it is a parameterless subplan (not initplan), we suggest that it be
1072 : * prepared to handle REWIND efficiently; otherwise there is no need.
1073 : */
1074 43956 : sp_eflags = eflags
1075 : & ~(EXEC_FLAG_REWIND | EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK);
1076 43956 : if (bms_is_member(i, plannedstmt->rewindPlanIDs))
1077 42 : sp_eflags |= EXEC_FLAG_REWIND;
1078 :
1079 43956 : subplanstate = ExecInitNode(subplan, estate, sp_eflags);
1080 :
1081 43956 : estate->es_subplanstates = lappend(estate->es_subplanstates,
1082 : subplanstate);
1083 :
1084 43956 : i++;
1085 : }
1086 :
1087 : /*
1088 : * Initialize the private state information for all the nodes in the query
1089 : * tree. This opens files, allocates storage and leaves us ready to start
1090 : * processing tuples.
1091 : */
1092 678642 : planstate = ExecInitNode(plan, estate, eflags);
1093 :
1094 : /*
1095 : * Get the tuple descriptor describing the type of tuples to return.
1096 : */
1097 678210 : tupType = ExecGetResultType(planstate);
1098 :
1099 : /*
1100 : * Initialize the junk filter if needed. SELECT queries need a filter if
1101 : * there are any junk attrs in the top-level tlist.
1102 : */
1103 678210 : if (operation == CMD_SELECT)
1104 : {
1105 557558 : bool junk_filter_needed = false;
1106 : ListCell *tlist;
1107 :
1108 2116470 : foreach(tlist, plan->targetlist)
1109 : {
1110 1583438 : TargetEntry *tle = (TargetEntry *) lfirst(tlist);
1111 :
1112 1583438 : if (tle->resjunk)
1113 : {
1114 24526 : junk_filter_needed = true;
1115 24526 : break;
1116 : }
1117 : }
1118 :
1119 557558 : if (junk_filter_needed)
1120 : {
1121 : JunkFilter *j;
1122 : TupleTableSlot *slot;
1123 :
1124 24526 : slot = ExecInitExtraTupleSlot(estate, NULL, &TTSOpsVirtual);
1125 24526 : j = ExecInitJunkFilter(planstate->plan->targetlist,
1126 : slot);
1127 24526 : estate->es_junkFilter = j;
1128 :
1129 : /* Want to return the cleaned tuple type */
1130 24526 : tupType = j->jf_cleanTupType;
1131 : }
1132 : }
1133 :
1134 678210 : queryDesc->tupDesc = tupType;
1135 678210 : queryDesc->planstate = planstate;
1136 : }
1137 :
1138 : /*
1139 : * Check that a proposed result relation is a legal target for the operation
1140 : *
1141 : * Generally the parser and/or planner should have noticed any such mistake
1142 : * already, but let's make sure.
1143 : *
1144 : * For MERGE, mergeActions is the list of actions that may be performed. The
1145 : * result relation is required to support every action, regardless of whether
1146 : * or not they are all executed.
1147 : *
1148 : * Note: when changing this function, you probably also need to look at
1149 : * CheckValidRowMarkRel.
1150 : */
1151 : void
1152 135324 : CheckValidResultRel(ResultRelInfo *resultRelInfo, CmdType operation,
1153 : List *mergeActions)
1154 : {
1155 135324 : Relation resultRel = resultRelInfo->ri_RelationDesc;
1156 : FdwRoutine *fdwroutine;
1157 :
1158 : /* Expect a fully-formed ResultRelInfo from InitResultRelInfo(). */
1159 : Assert(resultRelInfo->ri_needLockTagTuple ==
1160 : IsInplaceUpdateRelation(resultRel));
1161 :
1162 135324 : switch (resultRel->rd_rel->relkind)
1163 : {
1164 134116 : case RELKIND_RELATION:
1165 : case RELKIND_PARTITIONED_TABLE:
1166 134116 : CheckCmdReplicaIdentity(resultRel, operation);
1167 133808 : break;
1168 0 : case RELKIND_SEQUENCE:
1169 0 : ereport(ERROR,
1170 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1171 : errmsg("cannot change sequence \"%s\"",
1172 : RelationGetRelationName(resultRel))));
1173 : break;
1174 0 : case RELKIND_TOASTVALUE:
1175 0 : ereport(ERROR,
1176 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1177 : errmsg("cannot change TOAST relation \"%s\"",
1178 : RelationGetRelationName(resultRel))));
1179 : break;
1180 420 : case RELKIND_VIEW:
1181 :
1182 : /*
1183 : * Okay only if there's a suitable INSTEAD OF trigger. Otherwise,
1184 : * complain, but omit errdetail because we haven't got the
1185 : * information handy (and given that it really shouldn't happen,
1186 : * it's not worth great exertion to get).
1187 : */
1188 420 : if (!view_has_instead_trigger(resultRel, operation, mergeActions))
1189 0 : error_view_not_updatable(resultRel, operation, mergeActions,
1190 : NULL);
1191 420 : break;
1192 120 : case RELKIND_MATVIEW:
1193 120 : if (!MatViewIncrementalMaintenanceIsEnabled())
1194 0 : ereport(ERROR,
1195 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1196 : errmsg("cannot change materialized view \"%s\"",
1197 : RelationGetRelationName(resultRel))));
1198 120 : break;
1199 668 : case RELKIND_FOREIGN_TABLE:
1200 : /* Okay only if the FDW supports it */
1201 668 : fdwroutine = resultRelInfo->ri_FdwRoutine;
1202 : switch (operation)
1203 : {
1204 304 : case CMD_INSERT:
1205 304 : if (fdwroutine->ExecForeignInsert == NULL)
1206 10 : ereport(ERROR,
1207 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1208 : errmsg("cannot insert into foreign table \"%s\"",
1209 : RelationGetRelationName(resultRel))));
1210 294 : if (fdwroutine->IsForeignRelUpdatable != NULL &&
1211 294 : (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_INSERT)) == 0)
1212 0 : ereport(ERROR,
1213 : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1214 : errmsg("foreign table \"%s\" does not allow inserts",
1215 : RelationGetRelationName(resultRel))));
1216 294 : break;
1217 206 : case CMD_UPDATE:
1218 206 : if (fdwroutine->ExecForeignUpdate == NULL)
1219 4 : ereport(ERROR,
1220 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1221 : errmsg("cannot update foreign table \"%s\"",
1222 : RelationGetRelationName(resultRel))));
1223 202 : if (fdwroutine->IsForeignRelUpdatable != NULL &&
1224 202 : (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_UPDATE)) == 0)
1225 0 : ereport(ERROR,
1226 : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1227 : errmsg("foreign table \"%s\" does not allow updates",
1228 : RelationGetRelationName(resultRel))));
1229 202 : break;
1230 158 : case CMD_DELETE:
1231 158 : if (fdwroutine->ExecForeignDelete == NULL)
1232 4 : ereport(ERROR,
1233 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1234 : errmsg("cannot delete from foreign table \"%s\"",
1235 : RelationGetRelationName(resultRel))));
1236 154 : if (fdwroutine->IsForeignRelUpdatable != NULL &&
1237 154 : (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_DELETE)) == 0)
1238 0 : ereport(ERROR,
1239 : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1240 : errmsg("foreign table \"%s\" does not allow deletes",
1241 : RelationGetRelationName(resultRel))));
1242 154 : break;
1243 0 : default:
1244 0 : elog(ERROR, "unrecognized CmdType: %d", (int) operation);
1245 : break;
1246 : }
1247 650 : break;
1248 0 : default:
1249 0 : ereport(ERROR,
1250 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1251 : errmsg("cannot change relation \"%s\"",
1252 : RelationGetRelationName(resultRel))));
1253 : break;
1254 : }
1255 134998 : }
1256 :
1257 : /*
1258 : * Check that a proposed rowmark target relation is a legal target
1259 : *
1260 : * In most cases parser and/or planner should have noticed this already, but
1261 : * they don't cover all cases.
1262 : */
1263 : static void
1264 10472 : CheckValidRowMarkRel(Relation rel, RowMarkType markType)
1265 : {
1266 : FdwRoutine *fdwroutine;
1267 :
1268 10472 : switch (rel->rd_rel->relkind)
1269 : {
1270 10460 : case RELKIND_RELATION:
1271 : case RELKIND_PARTITIONED_TABLE:
1272 : /* OK */
1273 10460 : break;
1274 0 : case RELKIND_SEQUENCE:
1275 : /* Must disallow this because we don't vacuum sequences */
1276 0 : ereport(ERROR,
1277 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1278 : errmsg("cannot lock rows in sequence \"%s\"",
1279 : RelationGetRelationName(rel))));
1280 : break;
1281 0 : case RELKIND_TOASTVALUE:
1282 : /* We could allow this, but there seems no good reason to */
1283 0 : ereport(ERROR,
1284 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1285 : errmsg("cannot lock rows in TOAST relation \"%s\"",
1286 : RelationGetRelationName(rel))));
1287 : break;
1288 0 : case RELKIND_VIEW:
1289 : /* Should not get here; planner should have expanded the view */
1290 0 : ereport(ERROR,
1291 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1292 : errmsg("cannot lock rows in view \"%s\"",
1293 : RelationGetRelationName(rel))));
1294 : break;
1295 12 : case RELKIND_MATVIEW:
1296 : /* Allow referencing a matview, but not actual locking clauses */
1297 12 : if (markType != ROW_MARK_REFERENCE)
1298 6 : ereport(ERROR,
1299 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1300 : errmsg("cannot lock rows in materialized view \"%s\"",
1301 : RelationGetRelationName(rel))));
1302 6 : break;
1303 0 : case RELKIND_FOREIGN_TABLE:
1304 : /* Okay only if the FDW supports it */
1305 0 : fdwroutine = GetFdwRoutineForRelation(rel, false);
1306 0 : if (fdwroutine->RefetchForeignRow == NULL)
1307 0 : ereport(ERROR,
1308 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1309 : errmsg("cannot lock rows in foreign table \"%s\"",
1310 : RelationGetRelationName(rel))));
1311 0 : break;
1312 0 : default:
1313 0 : ereport(ERROR,
1314 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1315 : errmsg("cannot lock rows in relation \"%s\"",
1316 : RelationGetRelationName(rel))));
1317 : break;
1318 : }
1319 10466 : }
1320 :
1321 : /*
1322 : * Initialize ResultRelInfo data for one result relation
1323 : *
1324 : * Caution: before Postgres 9.1, this function included the relkind checking
1325 : * that's now in CheckValidResultRel, and it also did ExecOpenIndices if
1326 : * appropriate. Be sure callers cover those needs.
1327 : */
1328 : void
1329 441488 : InitResultRelInfo(ResultRelInfo *resultRelInfo,
1330 : Relation resultRelationDesc,
1331 : Index resultRelationIndex,
1332 : ResultRelInfo *partition_root_rri,
1333 : int instrument_options)
1334 : {
1335 22515888 : MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
1336 441488 : resultRelInfo->type = T_ResultRelInfo;
1337 441488 : resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
1338 441488 : resultRelInfo->ri_RelationDesc = resultRelationDesc;
1339 441488 : resultRelInfo->ri_NumIndices = 0;
1340 441488 : resultRelInfo->ri_IndexRelationDescs = NULL;
1341 441488 : resultRelInfo->ri_IndexRelationInfo = NULL;
1342 441488 : resultRelInfo->ri_needLockTagTuple =
1343 441488 : IsInplaceUpdateRelation(resultRelationDesc);
1344 : /* make a copy so as not to depend on relcache info not changing... */
1345 441488 : resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
1346 441488 : if (resultRelInfo->ri_TrigDesc)
1347 : {
1348 17260 : int n = resultRelInfo->ri_TrigDesc->numtriggers;
1349 :
1350 17260 : resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
1351 17260 : palloc0(n * sizeof(FmgrInfo));
1352 17260 : resultRelInfo->ri_TrigWhenExprs = (ExprState **)
1353 17260 : palloc0(n * sizeof(ExprState *));
1354 17260 : if (instrument_options)
1355 0 : resultRelInfo->ri_TrigInstrument = InstrAlloc(n, instrument_options, false);
1356 : }
1357 : else
1358 : {
1359 424228 : resultRelInfo->ri_TrigFunctions = NULL;
1360 424228 : resultRelInfo->ri_TrigWhenExprs = NULL;
1361 424228 : resultRelInfo->ri_TrigInstrument = NULL;
1362 : }
1363 441488 : if (resultRelationDesc->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
1364 690 : resultRelInfo->ri_FdwRoutine = GetFdwRoutineForRelation(resultRelationDesc, true);
1365 : else
1366 440798 : resultRelInfo->ri_FdwRoutine = NULL;
1367 :
1368 : /* The following fields are set later if needed */
1369 441488 : resultRelInfo->ri_RowIdAttNo = 0;
1370 441488 : resultRelInfo->ri_extraUpdatedCols = NULL;
1371 441488 : resultRelInfo->ri_projectNew = NULL;
1372 441488 : resultRelInfo->ri_newTupleSlot = NULL;
1373 441488 : resultRelInfo->ri_oldTupleSlot = NULL;
1374 441488 : resultRelInfo->ri_projectNewInfoValid = false;
1375 441488 : resultRelInfo->ri_FdwState = NULL;
1376 441488 : resultRelInfo->ri_usesFdwDirectModify = false;
1377 441488 : resultRelInfo->ri_CheckConstraintExprs = NULL;
1378 441488 : resultRelInfo->ri_GenVirtualNotNullConstraintExprs = NULL;
1379 441488 : resultRelInfo->ri_GeneratedExprsI = NULL;
1380 441488 : resultRelInfo->ri_GeneratedExprsU = NULL;
1381 441488 : resultRelInfo->ri_projectReturning = NULL;
1382 441488 : resultRelInfo->ri_onConflictArbiterIndexes = NIL;
1383 441488 : resultRelInfo->ri_onConflict = NULL;
1384 441488 : resultRelInfo->ri_ReturningSlot = NULL;
1385 441488 : resultRelInfo->ri_TrigOldSlot = NULL;
1386 441488 : resultRelInfo->ri_TrigNewSlot = NULL;
1387 441488 : resultRelInfo->ri_AllNullSlot = NULL;
1388 441488 : resultRelInfo->ri_MergeActions[MERGE_WHEN_MATCHED] = NIL;
1389 441488 : resultRelInfo->ri_MergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE] = NIL;
1390 441488 : resultRelInfo->ri_MergeActions[MERGE_WHEN_NOT_MATCHED_BY_TARGET] = NIL;
1391 441488 : resultRelInfo->ri_MergeJoinCondition = NULL;
1392 :
1393 : /*
1394 : * Only ExecInitPartitionInfo() and ExecInitPartitionDispatchInfo() pass
1395 : * non-NULL partition_root_rri. For child relations that are part of the
1396 : * initial query rather than being dynamically added by tuple routing,
1397 : * this field is filled in ExecInitModifyTable().
1398 : */
1399 441488 : resultRelInfo->ri_RootResultRelInfo = partition_root_rri;
1400 : /* Set by ExecGetRootToChildMap */
1401 441488 : resultRelInfo->ri_RootToChildMap = NULL;
1402 441488 : resultRelInfo->ri_RootToChildMapValid = false;
1403 : /* Set by ExecInitRoutingInfo */
1404 441488 : resultRelInfo->ri_PartitionTupleSlot = NULL;
1405 441488 : resultRelInfo->ri_ChildToRootMap = NULL;
1406 441488 : resultRelInfo->ri_ChildToRootMapValid = false;
1407 441488 : resultRelInfo->ri_CopyMultiInsertBuffer = NULL;
1408 441488 : }
1409 :
1410 : /*
1411 : * ExecGetTriggerResultRel
1412 : * Get a ResultRelInfo for a trigger target relation.
1413 : *
1414 : * Most of the time, triggers are fired on one of the result relations of the
1415 : * query, and so we can just return a member of the es_result_relations array,
1416 : * or the es_tuple_routing_result_relations list (if any). (Note: in self-join
1417 : * situations there might be multiple members with the same OID; if so it
1418 : * doesn't matter which one we pick.)
1419 : *
1420 : * However, it is sometimes necessary to fire triggers on other relations;
1421 : * this happens mainly when an RI update trigger queues additional triggers
1422 : * on other relations, which will be processed in the context of the outer
1423 : * query. For efficiency's sake, we want to have a ResultRelInfo for those
1424 : * triggers too; that can avoid repeated re-opening of the relation. (It
1425 : * also provides a way for EXPLAIN ANALYZE to report the runtimes of such
1426 : * triggers.) So we make additional ResultRelInfo's as needed, and save them
1427 : * in es_trig_target_relations.
1428 : */
1429 : ResultRelInfo *
1430 8076 : ExecGetTriggerResultRel(EState *estate, Oid relid,
1431 : ResultRelInfo *rootRelInfo)
1432 : {
1433 : ResultRelInfo *rInfo;
1434 : ListCell *l;
1435 : Relation rel;
1436 : MemoryContext oldcontext;
1437 :
1438 : /* Search through the query result relations */
1439 10274 : foreach(l, estate->es_opened_result_relations)
1440 : {
1441 8844 : rInfo = lfirst(l);
1442 8844 : if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1443 6646 : return rInfo;
1444 : }
1445 :
1446 : /*
1447 : * Search through the result relations that were created during tuple
1448 : * routing, if any.
1449 : */
1450 1644 : foreach(l, estate->es_tuple_routing_result_relations)
1451 : {
1452 886 : rInfo = (ResultRelInfo *) lfirst(l);
1453 886 : if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1454 672 : return rInfo;
1455 : }
1456 :
1457 : /* Nope, but maybe we already made an extra ResultRelInfo for it */
1458 1100 : foreach(l, estate->es_trig_target_relations)
1459 : {
1460 372 : rInfo = (ResultRelInfo *) lfirst(l);
1461 372 : if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1462 30 : return rInfo;
1463 : }
1464 : /* Nope, so we need a new one */
1465 :
1466 : /*
1467 : * Open the target relation's relcache entry. We assume that an
1468 : * appropriate lock is still held by the backend from whenever the trigger
1469 : * event got queued, so we need take no new lock here. Also, we need not
1470 : * recheck the relkind, so no need for CheckValidResultRel.
1471 : */
1472 728 : rel = table_open(relid, NoLock);
1473 :
1474 : /*
1475 : * Make the new entry in the right context.
1476 : */
1477 728 : oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
1478 728 : rInfo = makeNode(ResultRelInfo);
1479 728 : InitResultRelInfo(rInfo,
1480 : rel,
1481 : 0, /* dummy rangetable index */
1482 : rootRelInfo,
1483 : estate->es_instrument);
1484 728 : estate->es_trig_target_relations =
1485 728 : lappend(estate->es_trig_target_relations, rInfo);
1486 728 : MemoryContextSwitchTo(oldcontext);
1487 :
1488 : /*
1489 : * Currently, we don't need any index information in ResultRelInfos used
1490 : * only for triggers, so no need to call ExecOpenIndices.
1491 : */
1492 :
1493 728 : return rInfo;
1494 : }
1495 :
1496 : /*
1497 : * Return the ancestor relations of a given leaf partition result relation
1498 : * up to and including the query's root target relation.
1499 : *
1500 : * These work much like the ones opened by ExecGetTriggerResultRel, except
1501 : * that we need to keep them in a separate list.
1502 : *
1503 : * These are closed by ExecCloseResultRelations.
1504 : */
1505 : List *
1506 300 : ExecGetAncestorResultRels(EState *estate, ResultRelInfo *resultRelInfo)
1507 : {
1508 300 : ResultRelInfo *rootRelInfo = resultRelInfo->ri_RootResultRelInfo;
1509 300 : Relation partRel = resultRelInfo->ri_RelationDesc;
1510 : Oid rootRelOid;
1511 :
1512 300 : if (!partRel->rd_rel->relispartition)
1513 0 : elog(ERROR, "cannot find ancestors of a non-partition result relation");
1514 : Assert(rootRelInfo != NULL);
1515 300 : rootRelOid = RelationGetRelid(rootRelInfo->ri_RelationDesc);
1516 300 : if (resultRelInfo->ri_ancestorResultRels == NIL)
1517 : {
1518 : ListCell *lc;
1519 234 : List *oids = get_partition_ancestors(RelationGetRelid(partRel));
1520 234 : List *ancResultRels = NIL;
1521 :
1522 300 : foreach(lc, oids)
1523 : {
1524 300 : Oid ancOid = lfirst_oid(lc);
1525 : Relation ancRel;
1526 : ResultRelInfo *rInfo;
1527 :
1528 : /*
1529 : * Ignore the root ancestor here, and use ri_RootResultRelInfo
1530 : * (below) for it instead. Also, we stop climbing up the
1531 : * hierarchy when we find the table that was mentioned in the
1532 : * query.
1533 : */
1534 300 : if (ancOid == rootRelOid)
1535 234 : break;
1536 :
1537 : /*
1538 : * All ancestors up to the root target relation must have been
1539 : * locked by the planner or AcquireExecutorLocks().
1540 : */
1541 66 : ancRel = table_open(ancOid, NoLock);
1542 66 : rInfo = makeNode(ResultRelInfo);
1543 :
1544 : /* dummy rangetable index */
1545 66 : InitResultRelInfo(rInfo, ancRel, 0, NULL,
1546 : estate->es_instrument);
1547 66 : ancResultRels = lappend(ancResultRels, rInfo);
1548 : }
1549 234 : ancResultRels = lappend(ancResultRels, rootRelInfo);
1550 234 : resultRelInfo->ri_ancestorResultRels = ancResultRels;
1551 : }
1552 :
1553 : /* We must have found some ancestor */
1554 : Assert(resultRelInfo->ri_ancestorResultRels != NIL);
1555 :
1556 300 : return resultRelInfo->ri_ancestorResultRels;
1557 : }
1558 :
1559 : /* ----------------------------------------------------------------
1560 : * ExecPostprocessPlan
1561 : *
1562 : * Give plan nodes a final chance to execute before shutdown
1563 : * ----------------------------------------------------------------
1564 : */
1565 : static void
1566 632752 : ExecPostprocessPlan(EState *estate)
1567 : {
1568 : ListCell *lc;
1569 :
1570 : /*
1571 : * Make sure nodes run forward.
1572 : */
1573 632752 : estate->es_direction = ForwardScanDirection;
1574 :
1575 : /*
1576 : * Run any secondary ModifyTable nodes to completion, in case the main
1577 : * query did not fetch all rows from them. (We do this to ensure that
1578 : * such nodes have predictable results.)
1579 : */
1580 633596 : foreach(lc, estate->es_auxmodifytables)
1581 : {
1582 844 : PlanState *ps = (PlanState *) lfirst(lc);
1583 :
1584 : for (;;)
1585 138 : {
1586 : TupleTableSlot *slot;
1587 :
1588 : /* Reset the per-output-tuple exprcontext each time */
1589 982 : ResetPerTupleExprContext(estate);
1590 :
1591 982 : slot = ExecProcNode(ps);
1592 :
1593 982 : if (TupIsNull(slot))
1594 : break;
1595 : }
1596 : }
1597 632752 : }
1598 :
1599 : /* ----------------------------------------------------------------
1600 : * ExecEndPlan
1601 : *
1602 : * Cleans up the query plan -- closes files and frees up storage
1603 : *
1604 : * NOTE: we are no longer very worried about freeing storage per se
1605 : * in this code; FreeExecutorState should be guaranteed to release all
1606 : * memory that needs to be released. What we are worried about doing
1607 : * is closing relations and dropping buffer pins. Thus, for example,
1608 : * tuple tables must be cleared or dropped to ensure pins are released.
1609 : * ----------------------------------------------------------------
1610 : */
1611 : static void
1612 651706 : ExecEndPlan(PlanState *planstate, EState *estate)
1613 : {
1614 : ListCell *l;
1615 :
1616 : /*
1617 : * shut down the node-type-specific query processing
1618 : */
1619 651706 : ExecEndNode(planstate);
1620 :
1621 : /*
1622 : * for subplans too
1623 : */
1624 695074 : foreach(l, estate->es_subplanstates)
1625 : {
1626 43368 : PlanState *subplanstate = (PlanState *) lfirst(l);
1627 :
1628 43368 : ExecEndNode(subplanstate);
1629 : }
1630 :
1631 : /*
1632 : * destroy the executor's tuple table. Actually we only care about
1633 : * releasing buffer pins and tupdesc refcounts; there's no need to pfree
1634 : * the TupleTableSlots, since the containing memory context is about to go
1635 : * away anyway.
1636 : */
1637 651706 : ExecResetTupleTable(estate->es_tupleTable, false);
1638 :
1639 : /*
1640 : * Close any Relations that have been opened for range table entries or
1641 : * result relations.
1642 : */
1643 651706 : ExecCloseResultRelations(estate);
1644 651706 : ExecCloseRangeTableRelations(estate);
1645 651706 : }
1646 :
1647 : /*
1648 : * Close any relations that have been opened for ResultRelInfos.
1649 : */
1650 : void
1651 654826 : ExecCloseResultRelations(EState *estate)
1652 : {
1653 : ListCell *l;
1654 :
1655 : /*
1656 : * close indexes of result relation(s) if any. (Rels themselves are
1657 : * closed in ExecCloseRangeTableRelations())
1658 : *
1659 : * In addition, close the stub RTs that may be in each resultrel's
1660 : * ri_ancestorResultRels.
1661 : */
1662 778516 : foreach(l, estate->es_opened_result_relations)
1663 : {
1664 123690 : ResultRelInfo *resultRelInfo = lfirst(l);
1665 : ListCell *lc;
1666 :
1667 123690 : ExecCloseIndices(resultRelInfo);
1668 123942 : foreach(lc, resultRelInfo->ri_ancestorResultRels)
1669 : {
1670 252 : ResultRelInfo *rInfo = lfirst(lc);
1671 :
1672 : /*
1673 : * Ancestors with RTI > 0 (should only be the root ancestor) are
1674 : * closed by ExecCloseRangeTableRelations.
1675 : */
1676 252 : if (rInfo->ri_RangeTableIndex > 0)
1677 204 : continue;
1678 :
1679 48 : table_close(rInfo->ri_RelationDesc, NoLock);
1680 : }
1681 : }
1682 :
1683 : /* Close any relations that have been opened by ExecGetTriggerResultRel(). */
1684 655336 : foreach(l, estate->es_trig_target_relations)
1685 : {
1686 510 : ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l);
1687 :
1688 : /*
1689 : * Assert this is a "dummy" ResultRelInfo, see above. Otherwise we
1690 : * might be issuing a duplicate close against a Relation opened by
1691 : * ExecGetRangeTableRelation.
1692 : */
1693 : Assert(resultRelInfo->ri_RangeTableIndex == 0);
1694 :
1695 : /*
1696 : * Since ExecGetTriggerResultRel doesn't call ExecOpenIndices for
1697 : * these rels, we needn't call ExecCloseIndices either.
1698 : */
1699 : Assert(resultRelInfo->ri_NumIndices == 0);
1700 :
1701 510 : table_close(resultRelInfo->ri_RelationDesc, NoLock);
1702 : }
1703 654826 : }
1704 :
1705 : /*
1706 : * Close all relations opened by ExecGetRangeTableRelation().
1707 : *
1708 : * We do not release any locks we might hold on those rels.
1709 : */
1710 : void
1711 654420 : ExecCloseRangeTableRelations(EState *estate)
1712 : {
1713 : int i;
1714 :
1715 1922586 : for (i = 0; i < estate->es_range_table_size; i++)
1716 : {
1717 1268166 : if (estate->es_relations[i])
1718 582234 : table_close(estate->es_relations[i], NoLock);
1719 : }
1720 654420 : }
1721 :
1722 : /* ----------------------------------------------------------------
1723 : * ExecutePlan
1724 : *
1725 : * Processes the query plan until we have retrieved 'numberTuples' tuples,
1726 : * moving in the specified direction.
1727 : *
1728 : * Runs to completion if numberTuples is 0
1729 : * ----------------------------------------------------------------
1730 : */
1731 : static void
1732 669200 : ExecutePlan(QueryDesc *queryDesc,
1733 : CmdType operation,
1734 : bool sendTuples,
1735 : uint64 numberTuples,
1736 : ScanDirection direction,
1737 : DestReceiver *dest)
1738 : {
1739 669200 : EState *estate = queryDesc->estate;
1740 669200 : PlanState *planstate = queryDesc->planstate;
1741 : bool use_parallel_mode;
1742 : TupleTableSlot *slot;
1743 : uint64 current_tuple_count;
1744 :
1745 : /*
1746 : * initialize local variables
1747 : */
1748 669200 : current_tuple_count = 0;
1749 :
1750 : /*
1751 : * Set the direction.
1752 : */
1753 669200 : estate->es_direction = direction;
1754 :
1755 : /*
1756 : * Set up parallel mode if appropriate.
1757 : *
1758 : * Parallel mode only supports complete execution of a plan. If we've
1759 : * already partially executed it, or if the caller asks us to exit early,
1760 : * we must force the plan to run without parallelism.
1761 : */
1762 669200 : if (queryDesc->already_executed || numberTuples != 0)
1763 178812 : use_parallel_mode = false;
1764 : else
1765 490388 : use_parallel_mode = queryDesc->plannedstmt->parallelModeNeeded;
1766 669200 : queryDesc->already_executed = true;
1767 :
1768 669200 : estate->es_use_parallel_mode = use_parallel_mode;
1769 669200 : if (use_parallel_mode)
1770 694 : EnterParallelMode();
1771 :
1772 : /*
1773 : * Loop until we've processed the proper number of tuples from the plan.
1774 : */
1775 : for (;;)
1776 : {
1777 : /* Reset the per-output-tuple exprcontext */
1778 12806504 : ResetPerTupleExprContext(estate);
1779 :
1780 : /*
1781 : * Execute the plan and obtain a tuple
1782 : */
1783 12806504 : slot = ExecProcNode(planstate);
1784 :
1785 : /*
1786 : * if the tuple is null, then we assume there is nothing more to
1787 : * process so we just end the loop...
1788 : */
1789 12783132 : if (TupIsNull(slot))
1790 : break;
1791 :
1792 : /*
1793 : * If we have a junk filter, then project a new tuple with the junk
1794 : * removed.
1795 : *
1796 : * Store this new "clean" tuple in the junkfilter's resultSlot.
1797 : * (Formerly, we stored it back over the "dirty" tuple, which is WRONG
1798 : * because that tuple slot has the wrong descriptor.)
1799 : */
1800 12277024 : if (estate->es_junkFilter != NULL)
1801 266348 : slot = ExecFilterJunk(estate->es_junkFilter, slot);
1802 :
1803 : /*
1804 : * If we are supposed to send the tuple somewhere, do so. (In
1805 : * practice, this is probably always the case at this point.)
1806 : */
1807 12277024 : if (sendTuples)
1808 : {
1809 : /*
1810 : * If we are not able to send the tuple, we assume the destination
1811 : * has closed and no more tuples can be sent. If that's the case,
1812 : * end the loop.
1813 : */
1814 12277024 : if (!dest->receiveSlot(slot, dest))
1815 0 : break;
1816 : }
1817 :
1818 : /*
1819 : * Count tuples processed, if this is a SELECT. (For other operation
1820 : * types, the ModifyTable plan node must count the appropriate
1821 : * events.)
1822 : */
1823 12277024 : if (operation == CMD_SELECT)
1824 12270394 : (estate->es_processed)++;
1825 :
1826 : /*
1827 : * check our tuple count.. if we've processed the proper number then
1828 : * quit, else loop again and process more tuples. Zero numberTuples
1829 : * means no limit.
1830 : */
1831 12277024 : current_tuple_count++;
1832 12277024 : if (numberTuples && numberTuples == current_tuple_count)
1833 139720 : break;
1834 : }
1835 :
1836 : /*
1837 : * If we know we won't need to back up, we can release resources at this
1838 : * point.
1839 : */
1840 645828 : if (!(estate->es_top_eflags & EXEC_FLAG_BACKWARD))
1841 638586 : ExecShutdownNode(planstate);
1842 :
1843 645828 : if (use_parallel_mode)
1844 682 : ExitParallelMode();
1845 645828 : }
1846 :
1847 :
1848 : /*
1849 : * ExecRelCheck --- check that tuple meets check constraints for result relation
1850 : *
1851 : * Returns NULL if OK, else name of failed check constraint
1852 : */
1853 : static const char *
1854 3016 : ExecRelCheck(ResultRelInfo *resultRelInfo,
1855 : TupleTableSlot *slot, EState *estate)
1856 : {
1857 3016 : Relation rel = resultRelInfo->ri_RelationDesc;
1858 3016 : int ncheck = rel->rd_att->constr->num_check;
1859 3016 : ConstrCheck *check = rel->rd_att->constr->check;
1860 : ExprContext *econtext;
1861 : MemoryContext oldContext;
1862 :
1863 : /*
1864 : * CheckConstraintFetch let this pass with only a warning, but now we
1865 : * should fail rather than possibly failing to enforce an important
1866 : * constraint.
1867 : */
1868 3016 : if (ncheck != rel->rd_rel->relchecks)
1869 0 : elog(ERROR, "%d pg_constraint record(s) missing for relation \"%s\"",
1870 : rel->rd_rel->relchecks - ncheck, RelationGetRelationName(rel));
1871 :
1872 : /*
1873 : * If first time through for this result relation, build expression
1874 : * nodetrees for rel's constraint expressions. Keep them in the per-query
1875 : * memory context so they'll survive throughout the query.
1876 : */
1877 3016 : if (resultRelInfo->ri_CheckConstraintExprs == NULL)
1878 : {
1879 1340 : oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
1880 1340 : resultRelInfo->ri_CheckConstraintExprs = palloc0_array(ExprState *, ncheck);
1881 3368 : for (int i = 0; i < ncheck; i++)
1882 : {
1883 : Expr *checkconstr;
1884 :
1885 : /* Skip not enforced constraint */
1886 2034 : if (!check[i].ccenforced)
1887 204 : continue;
1888 :
1889 1830 : checkconstr = stringToNode(check[i].ccbin);
1890 1830 : checkconstr = (Expr *) expand_generated_columns_in_expr((Node *) checkconstr, rel, 1);
1891 1824 : resultRelInfo->ri_CheckConstraintExprs[i] =
1892 1830 : ExecPrepareExpr(checkconstr, estate);
1893 : }
1894 1334 : MemoryContextSwitchTo(oldContext);
1895 : }
1896 :
1897 : /*
1898 : * We will use the EState's per-tuple context for evaluating constraint
1899 : * expressions (creating it if it's not already there).
1900 : */
1901 3010 : econtext = GetPerTupleExprContext(estate);
1902 :
1903 : /* Arrange for econtext's scan tuple to be the tuple under test */
1904 3010 : econtext->ecxt_scantuple = slot;
1905 :
1906 : /* And evaluate the constraints */
1907 6914 : for (int i = 0; i < ncheck; i++)
1908 : {
1909 4352 : ExprState *checkconstr = resultRelInfo->ri_CheckConstraintExprs[i];
1910 :
1911 : /*
1912 : * NOTE: SQL specifies that a NULL result from a constraint expression
1913 : * is not to be treated as a failure. Therefore, use ExecCheck not
1914 : * ExecQual.
1915 : */
1916 4352 : if (checkconstr && !ExecCheck(checkconstr, econtext))
1917 448 : return check[i].ccname;
1918 : }
1919 :
1920 : /* NULL result means no error */
1921 2562 : return NULL;
1922 : }
1923 :
1924 : /*
1925 : * ExecPartitionCheck --- check that tuple meets the partition constraint.
1926 : *
1927 : * Returns true if it meets the partition constraint. If the constraint
1928 : * fails and we're asked to emit an error, do so and don't return; otherwise
1929 : * return false.
1930 : */
1931 : bool
1932 158602 : ExecPartitionCheck(ResultRelInfo *resultRelInfo, TupleTableSlot *slot,
1933 : EState *estate, bool emitError)
1934 : {
1935 : ExprContext *econtext;
1936 : bool success;
1937 :
1938 : /*
1939 : * If first time through, build expression state tree for the partition
1940 : * check expression. (In the corner case where the partition check
1941 : * expression is empty, ie there's a default partition and nothing else,
1942 : * we'll be fooled into executing this code each time through. But it's
1943 : * pretty darn cheap in that case, so we don't worry about it.)
1944 : */
1945 158602 : if (resultRelInfo->ri_PartitionCheckExpr == NULL)
1946 : {
1947 : /*
1948 : * Ensure that the qual tree and prepared expression are in the
1949 : * query-lifespan context.
1950 : */
1951 5730 : MemoryContext oldcxt = MemoryContextSwitchTo(estate->es_query_cxt);
1952 5730 : List *qual = RelationGetPartitionQual(resultRelInfo->ri_RelationDesc);
1953 :
1954 5730 : resultRelInfo->ri_PartitionCheckExpr = ExecPrepareCheck(qual, estate);
1955 5730 : MemoryContextSwitchTo(oldcxt);
1956 : }
1957 :
1958 : /*
1959 : * We will use the EState's per-tuple context for evaluating constraint
1960 : * expressions (creating it if it's not already there).
1961 : */
1962 158602 : econtext = GetPerTupleExprContext(estate);
1963 :
1964 : /* Arrange for econtext's scan tuple to be the tuple under test */
1965 158602 : econtext->ecxt_scantuple = slot;
1966 :
1967 : /*
1968 : * As in case of the cataloged constraints, we treat a NULL result as
1969 : * success here, not a failure.
1970 : */
1971 158602 : success = ExecCheck(resultRelInfo->ri_PartitionCheckExpr, econtext);
1972 :
1973 : /* if asked to emit error, don't actually return on failure */
1974 158602 : if (!success && emitError)
1975 202 : ExecPartitionCheckEmitError(resultRelInfo, slot, estate);
1976 :
1977 158400 : return success;
1978 : }
1979 :
1980 : /*
1981 : * ExecPartitionCheckEmitError - Form and emit an error message after a failed
1982 : * partition constraint check.
1983 : */
1984 : void
1985 250 : ExecPartitionCheckEmitError(ResultRelInfo *resultRelInfo,
1986 : TupleTableSlot *slot,
1987 : EState *estate)
1988 : {
1989 : Oid root_relid;
1990 : TupleDesc tupdesc;
1991 : char *val_desc;
1992 : Bitmapset *modifiedCols;
1993 :
1994 : /*
1995 : * If the tuple has been routed, it's been converted to the partition's
1996 : * rowtype, which might differ from the root table's. We must convert it
1997 : * back to the root table's rowtype so that val_desc in the error message
1998 : * matches the input tuple.
1999 : */
2000 250 : if (resultRelInfo->ri_RootResultRelInfo)
2001 : {
2002 20 : ResultRelInfo *rootrel = resultRelInfo->ri_RootResultRelInfo;
2003 : TupleDesc old_tupdesc;
2004 : AttrMap *map;
2005 :
2006 20 : root_relid = RelationGetRelid(rootrel->ri_RelationDesc);
2007 20 : tupdesc = RelationGetDescr(rootrel->ri_RelationDesc);
2008 :
2009 20 : old_tupdesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
2010 : /* a reverse map */
2011 20 : map = build_attrmap_by_name_if_req(old_tupdesc, tupdesc, false);
2012 :
2013 : /*
2014 : * Partition-specific slot's tupdesc can't be changed, so allocate a
2015 : * new one.
2016 : */
2017 20 : if (map != NULL)
2018 8 : slot = execute_attr_map_slot(map, slot,
2019 : MakeTupleTableSlot(tupdesc, &TTSOpsVirtual));
2020 20 : modifiedCols = bms_union(ExecGetInsertedCols(rootrel, estate),
2021 20 : ExecGetUpdatedCols(rootrel, estate));
2022 : }
2023 : else
2024 : {
2025 230 : root_relid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
2026 230 : tupdesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
2027 230 : modifiedCols = bms_union(ExecGetInsertedCols(resultRelInfo, estate),
2028 230 : ExecGetUpdatedCols(resultRelInfo, estate));
2029 : }
2030 :
2031 250 : val_desc = ExecBuildSlotValueDescription(root_relid,
2032 : slot,
2033 : tupdesc,
2034 : modifiedCols,
2035 : 64);
2036 250 : ereport(ERROR,
2037 : (errcode(ERRCODE_CHECK_VIOLATION),
2038 : errmsg("new row for relation \"%s\" violates partition constraint",
2039 : RelationGetRelationName(resultRelInfo->ri_RelationDesc)),
2040 : val_desc ? errdetail("Failing row contains %s.", val_desc) : 0,
2041 : errtable(resultRelInfo->ri_RelationDesc)));
2042 : }
2043 :
2044 : /*
2045 : * ExecConstraints - check constraints of the tuple in 'slot'
2046 : *
2047 : * This checks the traditional NOT NULL and check constraints.
2048 : *
2049 : * The partition constraint is *NOT* checked.
2050 : *
2051 : * Note: 'slot' contains the tuple to check the constraints of, which may
2052 : * have been converted from the original input tuple after tuple routing.
2053 : * 'resultRelInfo' is the final result relation, after tuple routing.
2054 : */
2055 : void
2056 4574310 : ExecConstraints(ResultRelInfo *resultRelInfo,
2057 : TupleTableSlot *slot, EState *estate)
2058 : {
2059 4574310 : Relation rel = resultRelInfo->ri_RelationDesc;
2060 4574310 : TupleDesc tupdesc = RelationGetDescr(rel);
2061 4574310 : TupleConstr *constr = tupdesc->constr;
2062 : Bitmapset *modifiedCols;
2063 4574310 : List *notnull_virtual_attrs = NIL;
2064 :
2065 : Assert(constr); /* we should not be called otherwise */
2066 :
2067 : /*
2068 : * Verify not-null constraints.
2069 : *
2070 : * Not-null constraints on virtual generated columns are collected and
2071 : * checked separately below.
2072 : */
2073 4574310 : if (constr->has_not_null)
2074 : {
2075 16934146 : for (AttrNumber attnum = 1; attnum <= tupdesc->natts; attnum++)
2076 : {
2077 12366436 : Form_pg_attribute att = TupleDescAttr(tupdesc, attnum - 1);
2078 :
2079 12366436 : if (att->attnotnull && att->attgenerated == ATTRIBUTE_GENERATED_VIRTUAL)
2080 108 : notnull_virtual_attrs = lappend_int(notnull_virtual_attrs, attnum);
2081 12366328 : else if (att->attnotnull && slot_attisnull(slot, attnum))
2082 308 : ReportNotNullViolationError(resultRelInfo, slot, estate, attnum);
2083 : }
2084 : }
2085 :
2086 : /*
2087 : * Verify not-null constraints on virtual generated column, if any.
2088 : */
2089 4574002 : if (notnull_virtual_attrs)
2090 : {
2091 : AttrNumber attnum;
2092 :
2093 100 : attnum = ExecRelGenVirtualNotNull(resultRelInfo, slot, estate,
2094 : notnull_virtual_attrs);
2095 100 : if (attnum != InvalidAttrNumber)
2096 42 : ReportNotNullViolationError(resultRelInfo, slot, estate, attnum);
2097 : }
2098 :
2099 : /*
2100 : * Verify check constraints.
2101 : */
2102 4573960 : if (rel->rd_rel->relchecks > 0)
2103 : {
2104 : const char *failed;
2105 :
2106 3016 : if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
2107 : {
2108 : char *val_desc;
2109 448 : Relation orig_rel = rel;
2110 :
2111 : /*
2112 : * If the tuple has been routed, it's been converted to the
2113 : * partition's rowtype, which might differ from the root table's.
2114 : * We must convert it back to the root table's rowtype so that
2115 : * val_desc shown error message matches the input tuple.
2116 : */
2117 448 : if (resultRelInfo->ri_RootResultRelInfo)
2118 : {
2119 90 : ResultRelInfo *rootrel = resultRelInfo->ri_RootResultRelInfo;
2120 90 : TupleDesc old_tupdesc = RelationGetDescr(rel);
2121 : AttrMap *map;
2122 :
2123 90 : tupdesc = RelationGetDescr(rootrel->ri_RelationDesc);
2124 : /* a reverse map */
2125 90 : map = build_attrmap_by_name_if_req(old_tupdesc,
2126 : tupdesc,
2127 : false);
2128 :
2129 : /*
2130 : * Partition-specific slot's tupdesc can't be changed, so
2131 : * allocate a new one.
2132 : */
2133 90 : if (map != NULL)
2134 60 : slot = execute_attr_map_slot(map, slot,
2135 : MakeTupleTableSlot(tupdesc, &TTSOpsVirtual));
2136 90 : modifiedCols = bms_union(ExecGetInsertedCols(rootrel, estate),
2137 90 : ExecGetUpdatedCols(rootrel, estate));
2138 90 : rel = rootrel->ri_RelationDesc;
2139 : }
2140 : else
2141 358 : modifiedCols = bms_union(ExecGetInsertedCols(resultRelInfo, estate),
2142 358 : ExecGetUpdatedCols(resultRelInfo, estate));
2143 448 : val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
2144 : slot,
2145 : tupdesc,
2146 : modifiedCols,
2147 : 64);
2148 448 : ereport(ERROR,
2149 : (errcode(ERRCODE_CHECK_VIOLATION),
2150 : errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
2151 : RelationGetRelationName(orig_rel), failed),
2152 : val_desc ? errdetail("Failing row contains %s.", val_desc) : 0,
2153 : errtableconstraint(orig_rel, failed)));
2154 : }
2155 : }
2156 4573506 : }
2157 :
2158 : /*
2159 : * Verify not-null constraints on virtual generated columns of the given
2160 : * tuple slot.
2161 : *
2162 : * Return value of InvalidAttrNumber means all not-null constraints on virtual
2163 : * generated columns are satisfied. A return value > 0 means a not-null
2164 : * violation happened for that attribute.
2165 : *
2166 : * notnull_virtual_attrs is the list of the attnums of virtual generated column with
2167 : * not-null constraints.
2168 : */
2169 : AttrNumber
2170 184 : ExecRelGenVirtualNotNull(ResultRelInfo *resultRelInfo, TupleTableSlot *slot,
2171 : EState *estate, List *notnull_virtual_attrs)
2172 : {
2173 184 : Relation rel = resultRelInfo->ri_RelationDesc;
2174 : ExprContext *econtext;
2175 : MemoryContext oldContext;
2176 :
2177 : /*
2178 : * We implement this by building a NullTest node for each virtual
2179 : * generated column, which we cache in resultRelInfo, and running those
2180 : * through ExecCheck().
2181 : */
2182 184 : if (resultRelInfo->ri_GenVirtualNotNullConstraintExprs == NULL)
2183 : {
2184 132 : oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
2185 132 : resultRelInfo->ri_GenVirtualNotNullConstraintExprs =
2186 132 : palloc0_array(ExprState *, list_length(notnull_virtual_attrs));
2187 :
2188 430 : foreach_int(attnum, notnull_virtual_attrs)
2189 : {
2190 166 : int i = foreach_current_index(attnum);
2191 : NullTest *nnulltest;
2192 :
2193 : /* "generated_expression IS NOT NULL" check. */
2194 166 : nnulltest = makeNode(NullTest);
2195 166 : nnulltest->arg = (Expr *) build_generation_expression(rel, attnum);
2196 166 : nnulltest->nulltesttype = IS_NOT_NULL;
2197 166 : nnulltest->argisrow = false;
2198 166 : nnulltest->location = -1;
2199 :
2200 166 : resultRelInfo->ri_GenVirtualNotNullConstraintExprs[i] =
2201 166 : ExecPrepareExpr((Expr *) nnulltest, estate);
2202 : }
2203 132 : MemoryContextSwitchTo(oldContext);
2204 : }
2205 :
2206 : /*
2207 : * We will use the EState's per-tuple context for evaluating virtual
2208 : * generated column not null constraint expressions (creating it if it's
2209 : * not already there).
2210 : */
2211 184 : econtext = GetPerTupleExprContext(estate);
2212 :
2213 : /* Arrange for econtext's scan tuple to be the tuple under test */
2214 184 : econtext->ecxt_scantuple = slot;
2215 :
2216 : /* And evaluate the check constraints for virtual generated column */
2217 470 : foreach_int(attnum, notnull_virtual_attrs)
2218 : {
2219 246 : int i = foreach_current_index(attnum);
2220 246 : ExprState *exprstate = resultRelInfo->ri_GenVirtualNotNullConstraintExprs[i];
2221 :
2222 : Assert(exprstate != NULL);
2223 246 : if (!ExecCheck(exprstate, econtext))
2224 72 : return attnum;
2225 : }
2226 :
2227 : /* InvalidAttrNumber result means no error */
2228 112 : return InvalidAttrNumber;
2229 : }
2230 :
2231 : /*
2232 : * Report a violation of a not-null constraint that was already detected.
2233 : */
2234 : static void
2235 350 : ReportNotNullViolationError(ResultRelInfo *resultRelInfo, TupleTableSlot *slot,
2236 : EState *estate, int attnum)
2237 : {
2238 : Bitmapset *modifiedCols;
2239 : char *val_desc;
2240 350 : Relation rel = resultRelInfo->ri_RelationDesc;
2241 350 : Relation orig_rel = rel;
2242 350 : TupleDesc tupdesc = RelationGetDescr(rel);
2243 350 : TupleDesc orig_tupdesc = RelationGetDescr(rel);
2244 350 : Form_pg_attribute att = TupleDescAttr(tupdesc, attnum - 1);
2245 :
2246 : Assert(attnum > 0);
2247 :
2248 : /*
2249 : * If the tuple has been routed, it's been converted to the partition's
2250 : * rowtype, which might differ from the root table's. We must convert it
2251 : * back to the root table's rowtype so that val_desc shown error message
2252 : * matches the input tuple.
2253 : */
2254 350 : if (resultRelInfo->ri_RootResultRelInfo)
2255 : {
2256 72 : ResultRelInfo *rootrel = resultRelInfo->ri_RootResultRelInfo;
2257 : AttrMap *map;
2258 :
2259 72 : tupdesc = RelationGetDescr(rootrel->ri_RelationDesc);
2260 : /* a reverse map */
2261 72 : map = build_attrmap_by_name_if_req(orig_tupdesc,
2262 : tupdesc,
2263 : false);
2264 :
2265 : /*
2266 : * Partition-specific slot's tupdesc can't be changed, so allocate a
2267 : * new one.
2268 : */
2269 72 : if (map != NULL)
2270 42 : slot = execute_attr_map_slot(map, slot,
2271 : MakeTupleTableSlot(tupdesc, &TTSOpsVirtual));
2272 72 : modifiedCols = bms_union(ExecGetInsertedCols(rootrel, estate),
2273 72 : ExecGetUpdatedCols(rootrel, estate));
2274 72 : rel = rootrel->ri_RelationDesc;
2275 : }
2276 : else
2277 278 : modifiedCols = bms_union(ExecGetInsertedCols(resultRelInfo, estate),
2278 278 : ExecGetUpdatedCols(resultRelInfo, estate));
2279 :
2280 350 : val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
2281 : slot,
2282 : tupdesc,
2283 : modifiedCols,
2284 : 64);
2285 350 : ereport(ERROR,
2286 : errcode(ERRCODE_NOT_NULL_VIOLATION),
2287 : errmsg("null value in column \"%s\" of relation \"%s\" violates not-null constraint",
2288 : NameStr(att->attname),
2289 : RelationGetRelationName(orig_rel)),
2290 : val_desc ? errdetail("Failing row contains %s.", val_desc) : 0,
2291 : errtablecol(orig_rel, attnum));
2292 : }
2293 :
2294 : /*
2295 : * ExecWithCheckOptions -- check that tuple satisfies any WITH CHECK OPTIONs
2296 : * of the specified kind.
2297 : *
2298 : * Note that this needs to be called multiple times to ensure that all kinds of
2299 : * WITH CHECK OPTIONs are handled (both those from views which have the WITH
2300 : * CHECK OPTION set and from row-level security policies). See ExecInsert()
2301 : * and ExecUpdate().
2302 : */
2303 : void
2304 2072 : ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo,
2305 : TupleTableSlot *slot, EState *estate)
2306 : {
2307 2072 : Relation rel = resultRelInfo->ri_RelationDesc;
2308 2072 : TupleDesc tupdesc = RelationGetDescr(rel);
2309 : ExprContext *econtext;
2310 : ListCell *l1,
2311 : *l2;
2312 :
2313 : /*
2314 : * We will use the EState's per-tuple context for evaluating constraint
2315 : * expressions (creating it if it's not already there).
2316 : */
2317 2072 : econtext = GetPerTupleExprContext(estate);
2318 :
2319 : /* Arrange for econtext's scan tuple to be the tuple under test */
2320 2072 : econtext->ecxt_scantuple = slot;
2321 :
2322 : /* Check each of the constraints */
2323 4918 : forboth(l1, resultRelInfo->ri_WithCheckOptions,
2324 : l2, resultRelInfo->ri_WithCheckOptionExprs)
2325 : {
2326 3362 : WithCheckOption *wco = (WithCheckOption *) lfirst(l1);
2327 3362 : ExprState *wcoExpr = (ExprState *) lfirst(l2);
2328 :
2329 : /*
2330 : * Skip any WCOs which are not the kind we are looking for at this
2331 : * time.
2332 : */
2333 3362 : if (wco->kind != kind)
2334 1858 : continue;
2335 :
2336 : /*
2337 : * WITH CHECK OPTION checks are intended to ensure that the new tuple
2338 : * is visible (in the case of a view) or that it passes the
2339 : * 'with-check' policy (in the case of row security). If the qual
2340 : * evaluates to NULL or FALSE, then the new tuple won't be included in
2341 : * the view or doesn't pass the 'with-check' policy for the table.
2342 : */
2343 1504 : if (!ExecQual(wcoExpr, econtext))
2344 : {
2345 : char *val_desc;
2346 : Bitmapset *modifiedCols;
2347 :
2348 516 : switch (wco->kind)
2349 : {
2350 : /*
2351 : * For WITH CHECK OPTIONs coming from views, we might be
2352 : * able to provide the details on the row, depending on
2353 : * the permissions on the relation (that is, if the user
2354 : * could view it directly anyway). For RLS violations, we
2355 : * don't include the data since we don't know if the user
2356 : * should be able to view the tuple as that depends on the
2357 : * USING policy.
2358 : */
2359 228 : case WCO_VIEW_CHECK:
2360 : /* See the comment in ExecConstraints(). */
2361 228 : if (resultRelInfo->ri_RootResultRelInfo)
2362 : {
2363 42 : ResultRelInfo *rootrel = resultRelInfo->ri_RootResultRelInfo;
2364 42 : TupleDesc old_tupdesc = RelationGetDescr(rel);
2365 : AttrMap *map;
2366 :
2367 42 : tupdesc = RelationGetDescr(rootrel->ri_RelationDesc);
2368 : /* a reverse map */
2369 42 : map = build_attrmap_by_name_if_req(old_tupdesc,
2370 : tupdesc,
2371 : false);
2372 :
2373 : /*
2374 : * Partition-specific slot's tupdesc can't be changed,
2375 : * so allocate a new one.
2376 : */
2377 42 : if (map != NULL)
2378 24 : slot = execute_attr_map_slot(map, slot,
2379 : MakeTupleTableSlot(tupdesc, &TTSOpsVirtual));
2380 :
2381 42 : modifiedCols = bms_union(ExecGetInsertedCols(rootrel, estate),
2382 42 : ExecGetUpdatedCols(rootrel, estate));
2383 42 : rel = rootrel->ri_RelationDesc;
2384 : }
2385 : else
2386 186 : modifiedCols = bms_union(ExecGetInsertedCols(resultRelInfo, estate),
2387 186 : ExecGetUpdatedCols(resultRelInfo, estate));
2388 228 : val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
2389 : slot,
2390 : tupdesc,
2391 : modifiedCols,
2392 : 64);
2393 :
2394 228 : ereport(ERROR,
2395 : (errcode(ERRCODE_WITH_CHECK_OPTION_VIOLATION),
2396 : errmsg("new row violates check option for view \"%s\"",
2397 : wco->relname),
2398 : val_desc ? errdetail("Failing row contains %s.",
2399 : val_desc) : 0));
2400 : break;
2401 240 : case WCO_RLS_INSERT_CHECK:
2402 : case WCO_RLS_UPDATE_CHECK:
2403 240 : if (wco->polname != NULL)
2404 60 : ereport(ERROR,
2405 : (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2406 : errmsg("new row violates row-level security policy \"%s\" for table \"%s\"",
2407 : wco->polname, wco->relname)));
2408 : else
2409 180 : ereport(ERROR,
2410 : (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2411 : errmsg("new row violates row-level security policy for table \"%s\"",
2412 : wco->relname)));
2413 : break;
2414 24 : case WCO_RLS_MERGE_UPDATE_CHECK:
2415 : case WCO_RLS_MERGE_DELETE_CHECK:
2416 24 : if (wco->polname != NULL)
2417 0 : ereport(ERROR,
2418 : (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2419 : errmsg("target row violates row-level security policy \"%s\" (USING expression) for table \"%s\"",
2420 : wco->polname, wco->relname)));
2421 : else
2422 24 : ereport(ERROR,
2423 : (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2424 : errmsg("target row violates row-level security policy (USING expression) for table \"%s\"",
2425 : wco->relname)));
2426 : break;
2427 24 : case WCO_RLS_CONFLICT_CHECK:
2428 24 : if (wco->polname != NULL)
2429 0 : ereport(ERROR,
2430 : (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2431 : errmsg("new row violates row-level security policy \"%s\" (USING expression) for table \"%s\"",
2432 : wco->polname, wco->relname)));
2433 : else
2434 24 : ereport(ERROR,
2435 : (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2436 : errmsg("new row violates row-level security policy (USING expression) for table \"%s\"",
2437 : wco->relname)));
2438 : break;
2439 0 : default:
2440 0 : elog(ERROR, "unrecognized WCO kind: %u", wco->kind);
2441 : break;
2442 : }
2443 988 : }
2444 : }
2445 1556 : }
2446 :
2447 : /*
2448 : * ExecBuildSlotValueDescription -- construct a string representing a tuple
2449 : *
2450 : * This is intentionally very similar to BuildIndexValueDescription, but
2451 : * unlike that function, we truncate long field values (to at most maxfieldlen
2452 : * bytes). That seems necessary here since heap field values could be very
2453 : * long, whereas index entries typically aren't so wide.
2454 : *
2455 : * Also, unlike the case with index entries, we need to be prepared to ignore
2456 : * dropped columns. We used to use the slot's tuple descriptor to decode the
2457 : * data, but the slot's descriptor doesn't identify dropped columns, so we
2458 : * now need to be passed the relation's descriptor.
2459 : *
2460 : * Note that, like BuildIndexValueDescription, if the user does not have
2461 : * permission to view any of the columns involved, a NULL is returned. Unlike
2462 : * BuildIndexValueDescription, if the user has access to view a subset of the
2463 : * column involved, that subset will be returned with a key identifying which
2464 : * columns they are.
2465 : */
2466 : char *
2467 1358 : ExecBuildSlotValueDescription(Oid reloid,
2468 : TupleTableSlot *slot,
2469 : TupleDesc tupdesc,
2470 : Bitmapset *modifiedCols,
2471 : int maxfieldlen)
2472 : {
2473 : StringInfoData buf;
2474 : StringInfoData collist;
2475 1358 : bool write_comma = false;
2476 1358 : bool write_comma_collist = false;
2477 : int i;
2478 : AclResult aclresult;
2479 1358 : bool table_perm = false;
2480 1358 : bool any_perm = false;
2481 :
2482 : /*
2483 : * Check if RLS is enabled and should be active for the relation; if so,
2484 : * then don't return anything. Otherwise, go through normal permission
2485 : * checks.
2486 : */
2487 1358 : if (check_enable_rls(reloid, InvalidOid, true) == RLS_ENABLED)
2488 0 : return NULL;
2489 :
2490 1358 : initStringInfo(&buf);
2491 :
2492 1358 : appendStringInfoChar(&buf, '(');
2493 :
2494 : /*
2495 : * Check if the user has permissions to see the row. Table-level SELECT
2496 : * allows access to all columns. If the user does not have table-level
2497 : * SELECT then we check each column and include those the user has SELECT
2498 : * rights on. Additionally, we always include columns the user provided
2499 : * data for.
2500 : */
2501 1358 : aclresult = pg_class_aclcheck(reloid, GetUserId(), ACL_SELECT);
2502 1358 : if (aclresult != ACLCHECK_OK)
2503 : {
2504 : /* Set up the buffer for the column list */
2505 60 : initStringInfo(&collist);
2506 60 : appendStringInfoChar(&collist, '(');
2507 : }
2508 : else
2509 1298 : table_perm = any_perm = true;
2510 :
2511 : /* Make sure the tuple is fully deconstructed */
2512 1358 : slot_getallattrs(slot);
2513 :
2514 4842 : for (i = 0; i < tupdesc->natts; i++)
2515 : {
2516 3484 : bool column_perm = false;
2517 : char *val;
2518 : int vallen;
2519 3484 : Form_pg_attribute att = TupleDescAttr(tupdesc, i);
2520 :
2521 : /* ignore dropped columns */
2522 3484 : if (att->attisdropped)
2523 38 : continue;
2524 :
2525 3446 : if (!table_perm)
2526 : {
2527 : /*
2528 : * No table-level SELECT, so need to make sure they either have
2529 : * SELECT rights on the column or that they have provided the data
2530 : * for the column. If not, omit this column from the error
2531 : * message.
2532 : */
2533 234 : aclresult = pg_attribute_aclcheck(reloid, att->attnum,
2534 : GetUserId(), ACL_SELECT);
2535 234 : if (bms_is_member(att->attnum - FirstLowInvalidHeapAttributeNumber,
2536 138 : modifiedCols) || aclresult == ACLCHECK_OK)
2537 : {
2538 144 : column_perm = any_perm = true;
2539 :
2540 144 : if (write_comma_collist)
2541 84 : appendStringInfoString(&collist, ", ");
2542 : else
2543 60 : write_comma_collist = true;
2544 :
2545 144 : appendStringInfoString(&collist, NameStr(att->attname));
2546 : }
2547 : }
2548 :
2549 3446 : if (table_perm || column_perm)
2550 : {
2551 3356 : if (att->attgenerated == ATTRIBUTE_GENERATED_VIRTUAL)
2552 54 : val = "virtual";
2553 3302 : else if (slot->tts_isnull[i])
2554 628 : val = "null";
2555 : else
2556 : {
2557 : Oid foutoid;
2558 : bool typisvarlena;
2559 :
2560 2674 : getTypeOutputInfo(att->atttypid,
2561 : &foutoid, &typisvarlena);
2562 2674 : val = OidOutputFunctionCall(foutoid, slot->tts_values[i]);
2563 : }
2564 :
2565 3356 : if (write_comma)
2566 1998 : appendStringInfoString(&buf, ", ");
2567 : else
2568 1358 : write_comma = true;
2569 :
2570 : /* truncate if needed */
2571 3356 : vallen = strlen(val);
2572 3356 : if (vallen <= maxfieldlen)
2573 3354 : appendBinaryStringInfo(&buf, val, vallen);
2574 : else
2575 : {
2576 2 : vallen = pg_mbcliplen(val, vallen, maxfieldlen);
2577 2 : appendBinaryStringInfo(&buf, val, vallen);
2578 2 : appendStringInfoString(&buf, "...");
2579 : }
2580 : }
2581 : }
2582 :
2583 : /* If we end up with zero columns being returned, then return NULL. */
2584 1358 : if (!any_perm)
2585 0 : return NULL;
2586 :
2587 1358 : appendStringInfoChar(&buf, ')');
2588 :
2589 1358 : if (!table_perm)
2590 : {
2591 60 : appendStringInfoString(&collist, ") = ");
2592 60 : appendBinaryStringInfo(&collist, buf.data, buf.len);
2593 :
2594 60 : return collist.data;
2595 : }
2596 :
2597 1298 : return buf.data;
2598 : }
2599 :
2600 :
2601 : /*
2602 : * ExecUpdateLockMode -- find the appropriate UPDATE tuple lock mode for a
2603 : * given ResultRelInfo
2604 : */
2605 : LockTupleMode
2606 7880 : ExecUpdateLockMode(EState *estate, ResultRelInfo *relinfo)
2607 : {
2608 : Bitmapset *keyCols;
2609 : Bitmapset *updatedCols;
2610 :
2611 : /*
2612 : * Compute lock mode to use. If columns that are part of the key have not
2613 : * been modified, then we can use a weaker lock, allowing for better
2614 : * concurrency.
2615 : */
2616 7880 : updatedCols = ExecGetAllUpdatedCols(relinfo, estate);
2617 7880 : keyCols = RelationGetIndexAttrBitmap(relinfo->ri_RelationDesc,
2618 : INDEX_ATTR_BITMAP_KEY);
2619 :
2620 7880 : if (bms_overlap(keyCols, updatedCols))
2621 278 : return LockTupleExclusive;
2622 :
2623 7602 : return LockTupleNoKeyExclusive;
2624 : }
2625 :
2626 : /*
2627 : * ExecFindRowMark -- find the ExecRowMark struct for given rangetable index
2628 : *
2629 : * If no such struct, either return NULL or throw error depending on missing_ok
2630 : */
2631 : ExecRowMark *
2632 10758 : ExecFindRowMark(EState *estate, Index rti, bool missing_ok)
2633 : {
2634 10758 : if (rti > 0 && rti <= estate->es_range_table_size &&
2635 10758 : estate->es_rowmarks != NULL)
2636 : {
2637 10758 : ExecRowMark *erm = estate->es_rowmarks[rti - 1];
2638 :
2639 10758 : if (erm)
2640 10758 : return erm;
2641 : }
2642 0 : if (!missing_ok)
2643 0 : elog(ERROR, "failed to find ExecRowMark for rangetable index %u", rti);
2644 0 : return NULL;
2645 : }
2646 :
2647 : /*
2648 : * ExecBuildAuxRowMark -- create an ExecAuxRowMark struct
2649 : *
2650 : * Inputs are the underlying ExecRowMark struct and the targetlist of the
2651 : * input plan node (not planstate node!). We need the latter to find out
2652 : * the column numbers of the resjunk columns.
2653 : */
2654 : ExecAuxRowMark *
2655 10758 : ExecBuildAuxRowMark(ExecRowMark *erm, List *targetlist)
2656 : {
2657 10758 : ExecAuxRowMark *aerm = (ExecAuxRowMark *) palloc0(sizeof(ExecAuxRowMark));
2658 : char resname[32];
2659 :
2660 10758 : aerm->rowmark = erm;
2661 :
2662 : /* Look up the resjunk columns associated with this rowmark */
2663 10758 : if (erm->markType != ROW_MARK_COPY)
2664 : {
2665 : /* need ctid for all methods other than COPY */
2666 10486 : snprintf(resname, sizeof(resname), "ctid%u", erm->rowmarkId);
2667 10486 : aerm->ctidAttNo = ExecFindJunkAttributeInTlist(targetlist,
2668 : resname);
2669 10486 : if (!AttributeNumberIsValid(aerm->ctidAttNo))
2670 0 : elog(ERROR, "could not find junk %s column", resname);
2671 : }
2672 : else
2673 : {
2674 : /* need wholerow if COPY */
2675 272 : snprintf(resname, sizeof(resname), "wholerow%u", erm->rowmarkId);
2676 272 : aerm->wholeAttNo = ExecFindJunkAttributeInTlist(targetlist,
2677 : resname);
2678 272 : if (!AttributeNumberIsValid(aerm->wholeAttNo))
2679 0 : elog(ERROR, "could not find junk %s column", resname);
2680 : }
2681 :
2682 : /* if child rel, need tableoid */
2683 10758 : if (erm->rti != erm->prti)
2684 : {
2685 1864 : snprintf(resname, sizeof(resname), "tableoid%u", erm->rowmarkId);
2686 1864 : aerm->toidAttNo = ExecFindJunkAttributeInTlist(targetlist,
2687 : resname);
2688 1864 : if (!AttributeNumberIsValid(aerm->toidAttNo))
2689 0 : elog(ERROR, "could not find junk %s column", resname);
2690 : }
2691 :
2692 10758 : return aerm;
2693 : }
2694 :
2695 :
2696 : /*
2697 : * EvalPlanQual logic --- recheck modified tuple(s) to see if we want to
2698 : * process the updated version under READ COMMITTED rules.
2699 : *
2700 : * See backend/executor/README for some info about how this works.
2701 : */
2702 :
2703 :
2704 : /*
2705 : * Check the updated version of a tuple to see if we want to process it under
2706 : * READ COMMITTED rules.
2707 : *
2708 : * epqstate - state for EvalPlanQual rechecking
2709 : * relation - table containing tuple
2710 : * rti - rangetable index of table containing tuple
2711 : * inputslot - tuple for processing - this can be the slot from
2712 : * EvalPlanQualSlot() for this rel, for increased efficiency.
2713 : *
2714 : * This tests whether the tuple in inputslot still matches the relevant
2715 : * quals. For that result to be useful, typically the input tuple has to be
2716 : * last row version (otherwise the result isn't particularly useful) and
2717 : * locked (otherwise the result might be out of date). That's typically
2718 : * achieved by using table_tuple_lock() with the
2719 : * TUPLE_LOCK_FLAG_FIND_LAST_VERSION flag.
2720 : *
2721 : * Returns a slot containing the new candidate update/delete tuple, or
2722 : * NULL if we determine we shouldn't process the row.
2723 : */
2724 : TupleTableSlot *
2725 258 : EvalPlanQual(EPQState *epqstate, Relation relation,
2726 : Index rti, TupleTableSlot *inputslot)
2727 : {
2728 : TupleTableSlot *slot;
2729 : TupleTableSlot *testslot;
2730 :
2731 : Assert(rti > 0);
2732 :
2733 : /*
2734 : * Need to run a recheck subquery. Initialize or reinitialize EPQ state.
2735 : */
2736 258 : EvalPlanQualBegin(epqstate);
2737 :
2738 : /*
2739 : * Callers will often use the EvalPlanQualSlot to store the tuple to avoid
2740 : * an unnecessary copy.
2741 : */
2742 258 : testslot = EvalPlanQualSlot(epqstate, relation, rti);
2743 258 : if (testslot != inputslot)
2744 12 : ExecCopySlot(testslot, inputslot);
2745 :
2746 : /*
2747 : * Mark that an EPQ tuple is available for this relation. (If there is
2748 : * more than one result relation, the others remain marked as having no
2749 : * tuple available.)
2750 : */
2751 258 : epqstate->relsubs_done[rti - 1] = false;
2752 258 : epqstate->relsubs_blocked[rti - 1] = false;
2753 :
2754 : /*
2755 : * Run the EPQ query. We assume it will return at most one tuple.
2756 : */
2757 258 : slot = EvalPlanQualNext(epqstate);
2758 :
2759 : /*
2760 : * If we got a tuple, force the slot to materialize the tuple so that it
2761 : * is not dependent on any local state in the EPQ query (in particular,
2762 : * it's highly likely that the slot contains references to any pass-by-ref
2763 : * datums that may be present in copyTuple). As with the next step, this
2764 : * is to guard against early re-use of the EPQ query.
2765 : */
2766 258 : if (!TupIsNull(slot))
2767 190 : ExecMaterializeSlot(slot);
2768 :
2769 : /*
2770 : * Clear out the test tuple, and mark that no tuple is available here.
2771 : * This is needed in case the EPQ state is re-used to test a tuple for a
2772 : * different target relation.
2773 : */
2774 258 : ExecClearTuple(testslot);
2775 258 : epqstate->relsubs_blocked[rti - 1] = true;
2776 :
2777 258 : return slot;
2778 : }
2779 :
2780 : /*
2781 : * EvalPlanQualInit -- initialize during creation of a plan state node
2782 : * that might need to invoke EPQ processing.
2783 : *
2784 : * If the caller intends to use EvalPlanQual(), resultRelations should be
2785 : * a list of RT indexes of potential target relations for EvalPlanQual(),
2786 : * and we will arrange that the other listed relations don't return any
2787 : * tuple during an EvalPlanQual() call. Otherwise resultRelations
2788 : * should be NIL.
2789 : *
2790 : * Note: subplan/auxrowmarks can be NULL/NIL if they will be set later
2791 : * with EvalPlanQualSetPlan.
2792 : */
2793 : void
2794 273868 : EvalPlanQualInit(EPQState *epqstate, EState *parentestate,
2795 : Plan *subplan, List *auxrowmarks,
2796 : int epqParam, List *resultRelations)
2797 : {
2798 273868 : Index rtsize = parentestate->es_range_table_size;
2799 :
2800 : /* initialize data not changing over EPQState's lifetime */
2801 273868 : epqstate->parentestate = parentestate;
2802 273868 : epqstate->epqParam = epqParam;
2803 273868 : epqstate->resultRelations = resultRelations;
2804 :
2805 : /*
2806 : * Allocate space to reference a slot for each potential rti - do so now
2807 : * rather than in EvalPlanQualBegin(), as done for other dynamically
2808 : * allocated resources, so EvalPlanQualSlot() can be used to hold tuples
2809 : * that *may* need EPQ later, without forcing the overhead of
2810 : * EvalPlanQualBegin().
2811 : */
2812 273868 : epqstate->tuple_table = NIL;
2813 273868 : epqstate->relsubs_slot = (TupleTableSlot **)
2814 273868 : palloc0(rtsize * sizeof(TupleTableSlot *));
2815 :
2816 : /* ... and remember data that EvalPlanQualBegin will need */
2817 273868 : epqstate->plan = subplan;
2818 273868 : epqstate->arowMarks = auxrowmarks;
2819 :
2820 : /* ... and mark the EPQ state inactive */
2821 273868 : epqstate->origslot = NULL;
2822 273868 : epqstate->recheckestate = NULL;
2823 273868 : epqstate->recheckplanstate = NULL;
2824 273868 : epqstate->relsubs_rowmark = NULL;
2825 273868 : epqstate->relsubs_done = NULL;
2826 273868 : epqstate->relsubs_blocked = NULL;
2827 273868 : }
2828 :
2829 : /*
2830 : * EvalPlanQualSetPlan -- set or change subplan of an EPQState.
2831 : *
2832 : * We used to need this so that ModifyTable could deal with multiple subplans.
2833 : * It could now be refactored out of existence.
2834 : */
2835 : void
2836 120958 : EvalPlanQualSetPlan(EPQState *epqstate, Plan *subplan, List *auxrowmarks)
2837 : {
2838 : /* If we have a live EPQ query, shut it down */
2839 120958 : EvalPlanQualEnd(epqstate);
2840 : /* And set/change the plan pointer */
2841 120958 : epqstate->plan = subplan;
2842 : /* The rowmarks depend on the plan, too */
2843 120958 : epqstate->arowMarks = auxrowmarks;
2844 120958 : }
2845 :
2846 : /*
2847 : * Return, and create if necessary, a slot for an EPQ test tuple.
2848 : *
2849 : * Note this only requires EvalPlanQualInit() to have been called,
2850 : * EvalPlanQualBegin() is not necessary.
2851 : */
2852 : TupleTableSlot *
2853 17282 : EvalPlanQualSlot(EPQState *epqstate,
2854 : Relation relation, Index rti)
2855 : {
2856 : TupleTableSlot **slot;
2857 :
2858 : Assert(relation);
2859 : Assert(rti > 0 && rti <= epqstate->parentestate->es_range_table_size);
2860 17282 : slot = &epqstate->relsubs_slot[rti - 1];
2861 :
2862 17282 : if (*slot == NULL)
2863 : {
2864 : MemoryContext oldcontext;
2865 :
2866 5904 : oldcontext = MemoryContextSwitchTo(epqstate->parentestate->es_query_cxt);
2867 5904 : *slot = table_slot_create(relation, &epqstate->tuple_table);
2868 5904 : MemoryContextSwitchTo(oldcontext);
2869 : }
2870 :
2871 17282 : return *slot;
2872 : }
2873 :
2874 : /*
2875 : * Fetch the current row value for a non-locked relation, identified by rti,
2876 : * that needs to be scanned by an EvalPlanQual operation. origslot must have
2877 : * been set to contain the current result row (top-level row) that we need to
2878 : * recheck. Returns true if a substitution tuple was found, false if not.
2879 : */
2880 : bool
2881 26 : EvalPlanQualFetchRowMark(EPQState *epqstate, Index rti, TupleTableSlot *slot)
2882 : {
2883 26 : ExecAuxRowMark *earm = epqstate->relsubs_rowmark[rti - 1];
2884 : ExecRowMark *erm;
2885 : Datum datum;
2886 : bool isNull;
2887 :
2888 : Assert(earm != NULL);
2889 : Assert(epqstate->origslot != NULL);
2890 :
2891 26 : erm = earm->rowmark;
2892 :
2893 26 : if (RowMarkRequiresRowShareLock(erm->markType))
2894 0 : elog(ERROR, "EvalPlanQual doesn't support locking rowmarks");
2895 :
2896 : /* if child rel, must check whether it produced this row */
2897 26 : if (erm->rti != erm->prti)
2898 : {
2899 : Oid tableoid;
2900 :
2901 0 : datum = ExecGetJunkAttribute(epqstate->origslot,
2902 0 : earm->toidAttNo,
2903 : &isNull);
2904 : /* non-locked rels could be on the inside of outer joins */
2905 0 : if (isNull)
2906 0 : return false;
2907 :
2908 0 : tableoid = DatumGetObjectId(datum);
2909 :
2910 : Assert(OidIsValid(erm->relid));
2911 0 : if (tableoid != erm->relid)
2912 : {
2913 : /* this child is inactive right now */
2914 0 : return false;
2915 : }
2916 : }
2917 :
2918 26 : if (erm->markType == ROW_MARK_REFERENCE)
2919 : {
2920 : Assert(erm->relation != NULL);
2921 :
2922 : /* fetch the tuple's ctid */
2923 26 : datum = ExecGetJunkAttribute(epqstate->origslot,
2924 26 : earm->ctidAttNo,
2925 : &isNull);
2926 : /* non-locked rels could be on the inside of outer joins */
2927 26 : if (isNull)
2928 0 : return false;
2929 :
2930 : /* fetch requests on foreign tables must be passed to their FDW */
2931 26 : if (erm->relation->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
2932 : {
2933 : FdwRoutine *fdwroutine;
2934 0 : bool updated = false;
2935 :
2936 0 : fdwroutine = GetFdwRoutineForRelation(erm->relation, false);
2937 : /* this should have been checked already, but let's be safe */
2938 0 : if (fdwroutine->RefetchForeignRow == NULL)
2939 0 : ereport(ERROR,
2940 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2941 : errmsg("cannot lock rows in foreign table \"%s\"",
2942 : RelationGetRelationName(erm->relation))));
2943 :
2944 0 : fdwroutine->RefetchForeignRow(epqstate->recheckestate,
2945 : erm,
2946 : datum,
2947 : slot,
2948 : &updated);
2949 0 : if (TupIsNull(slot))
2950 0 : elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
2951 :
2952 : /*
2953 : * Ideally we'd insist on updated == false here, but that assumes
2954 : * that FDWs can track that exactly, which they might not be able
2955 : * to. So just ignore the flag.
2956 : */
2957 0 : return true;
2958 : }
2959 : else
2960 : {
2961 : /* ordinary table, fetch the tuple */
2962 26 : if (!table_tuple_fetch_row_version(erm->relation,
2963 26 : (ItemPointer) DatumGetPointer(datum),
2964 : SnapshotAny, slot))
2965 0 : elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
2966 26 : return true;
2967 : }
2968 : }
2969 : else
2970 : {
2971 : Assert(erm->markType == ROW_MARK_COPY);
2972 :
2973 : /* fetch the whole-row Var for the relation */
2974 0 : datum = ExecGetJunkAttribute(epqstate->origslot,
2975 0 : earm->wholeAttNo,
2976 : &isNull);
2977 : /* non-locked rels could be on the inside of outer joins */
2978 0 : if (isNull)
2979 0 : return false;
2980 :
2981 0 : ExecStoreHeapTupleDatum(datum, slot);
2982 0 : return true;
2983 : }
2984 : }
2985 :
2986 : /*
2987 : * Fetch the next row (if any) from EvalPlanQual testing
2988 : *
2989 : * (In practice, there should never be more than one row...)
2990 : */
2991 : TupleTableSlot *
2992 314 : EvalPlanQualNext(EPQState *epqstate)
2993 : {
2994 : MemoryContext oldcontext;
2995 : TupleTableSlot *slot;
2996 :
2997 314 : oldcontext = MemoryContextSwitchTo(epqstate->recheckestate->es_query_cxt);
2998 314 : slot = ExecProcNode(epqstate->recheckplanstate);
2999 314 : MemoryContextSwitchTo(oldcontext);
3000 :
3001 314 : return slot;
3002 : }
3003 :
3004 : /*
3005 : * Initialize or reset an EvalPlanQual state tree
3006 : */
3007 : void
3008 370 : EvalPlanQualBegin(EPQState *epqstate)
3009 : {
3010 370 : EState *parentestate = epqstate->parentestate;
3011 370 : EState *recheckestate = epqstate->recheckestate;
3012 :
3013 370 : if (recheckestate == NULL)
3014 : {
3015 : /* First time through, so create a child EState */
3016 226 : EvalPlanQualStart(epqstate, epqstate->plan);
3017 : }
3018 : else
3019 : {
3020 : /*
3021 : * We already have a suitable child EPQ tree, so just reset it.
3022 : */
3023 144 : Index rtsize = parentestate->es_range_table_size;
3024 144 : PlanState *rcplanstate = epqstate->recheckplanstate;
3025 :
3026 : /*
3027 : * Reset the relsubs_done[] flags to equal relsubs_blocked[], so that
3028 : * the EPQ run will never attempt to fetch tuples from blocked target
3029 : * relations.
3030 : */
3031 144 : memcpy(epqstate->relsubs_done, epqstate->relsubs_blocked,
3032 : rtsize * sizeof(bool));
3033 :
3034 : /* Recopy current values of parent parameters */
3035 144 : if (parentestate->es_plannedstmt->paramExecTypes != NIL)
3036 : {
3037 : int i;
3038 :
3039 : /*
3040 : * Force evaluation of any InitPlan outputs that could be needed
3041 : * by the subplan, just in case they got reset since
3042 : * EvalPlanQualStart (see comments therein).
3043 : */
3044 144 : ExecSetParamPlanMulti(rcplanstate->plan->extParam,
3045 144 : GetPerTupleExprContext(parentestate));
3046 :
3047 144 : i = list_length(parentestate->es_plannedstmt->paramExecTypes);
3048 :
3049 306 : while (--i >= 0)
3050 : {
3051 : /* copy value if any, but not execPlan link */
3052 162 : recheckestate->es_param_exec_vals[i].value =
3053 162 : parentestate->es_param_exec_vals[i].value;
3054 162 : recheckestate->es_param_exec_vals[i].isnull =
3055 162 : parentestate->es_param_exec_vals[i].isnull;
3056 : }
3057 : }
3058 :
3059 : /*
3060 : * Mark child plan tree as needing rescan at all scan nodes. The
3061 : * first ExecProcNode will take care of actually doing the rescan.
3062 : */
3063 144 : rcplanstate->chgParam = bms_add_member(rcplanstate->chgParam,
3064 : epqstate->epqParam);
3065 : }
3066 370 : }
3067 :
3068 : /*
3069 : * Start execution of an EvalPlanQual plan tree.
3070 : *
3071 : * This is a cut-down version of ExecutorStart(): we copy some state from
3072 : * the top-level estate rather than initializing it fresh.
3073 : */
3074 : static void
3075 226 : EvalPlanQualStart(EPQState *epqstate, Plan *planTree)
3076 : {
3077 226 : EState *parentestate = epqstate->parentestate;
3078 226 : Index rtsize = parentestate->es_range_table_size;
3079 : EState *rcestate;
3080 : MemoryContext oldcontext;
3081 : ListCell *l;
3082 :
3083 226 : epqstate->recheckestate = rcestate = CreateExecutorState();
3084 :
3085 226 : oldcontext = MemoryContextSwitchTo(rcestate->es_query_cxt);
3086 :
3087 : /* signal that this is an EState for executing EPQ */
3088 226 : rcestate->es_epq_active = epqstate;
3089 :
3090 : /*
3091 : * Child EPQ EStates share the parent's copy of unchanging state such as
3092 : * the snapshot, rangetable, and external Param info. They need their own
3093 : * copies of local state, including a tuple table, es_param_exec_vals,
3094 : * result-rel info, etc.
3095 : *
3096 : * es_cachedplan is not copied because EPQ plan execution does not acquire
3097 : * any new locks that could invalidate the CachedPlan.
3098 : */
3099 226 : rcestate->es_direction = ForwardScanDirection;
3100 226 : rcestate->es_snapshot = parentestate->es_snapshot;
3101 226 : rcestate->es_crosscheck_snapshot = parentestate->es_crosscheck_snapshot;
3102 226 : rcestate->es_range_table = parentestate->es_range_table;
3103 226 : rcestate->es_range_table_size = parentestate->es_range_table_size;
3104 226 : rcestate->es_relations = parentestate->es_relations;
3105 226 : rcestate->es_rowmarks = parentestate->es_rowmarks;
3106 226 : rcestate->es_rteperminfos = parentestate->es_rteperminfos;
3107 226 : rcestate->es_plannedstmt = parentestate->es_plannedstmt;
3108 226 : rcestate->es_junkFilter = parentestate->es_junkFilter;
3109 226 : rcestate->es_output_cid = parentestate->es_output_cid;
3110 226 : rcestate->es_queryEnv = parentestate->es_queryEnv;
3111 :
3112 : /*
3113 : * ResultRelInfos needed by subplans are initialized from scratch when the
3114 : * subplans themselves are initialized.
3115 : */
3116 226 : rcestate->es_result_relations = NULL;
3117 : /* es_trig_target_relations must NOT be copied */
3118 226 : rcestate->es_top_eflags = parentestate->es_top_eflags;
3119 226 : rcestate->es_instrument = parentestate->es_instrument;
3120 : /* es_auxmodifytables must NOT be copied */
3121 :
3122 : /*
3123 : * The external param list is simply shared from parent. The internal
3124 : * param workspace has to be local state, but we copy the initial values
3125 : * from the parent, so as to have access to any param values that were
3126 : * already set from other parts of the parent's plan tree.
3127 : */
3128 226 : rcestate->es_param_list_info = parentestate->es_param_list_info;
3129 226 : if (parentestate->es_plannedstmt->paramExecTypes != NIL)
3130 : {
3131 : int i;
3132 :
3133 : /*
3134 : * Force evaluation of any InitPlan outputs that could be needed by
3135 : * the subplan. (With more complexity, maybe we could postpone this
3136 : * till the subplan actually demands them, but it doesn't seem worth
3137 : * the trouble; this is a corner case already, since usually the
3138 : * InitPlans would have been evaluated before reaching EvalPlanQual.)
3139 : *
3140 : * This will not touch output params of InitPlans that occur somewhere
3141 : * within the subplan tree, only those that are attached to the
3142 : * ModifyTable node or above it and are referenced within the subplan.
3143 : * That's OK though, because the planner would only attach such
3144 : * InitPlans to a lower-level SubqueryScan node, and EPQ execution
3145 : * will not descend into a SubqueryScan.
3146 : *
3147 : * The EState's per-output-tuple econtext is sufficiently short-lived
3148 : * for this, since it should get reset before there is any chance of
3149 : * doing EvalPlanQual again.
3150 : */
3151 226 : ExecSetParamPlanMulti(planTree->extParam,
3152 226 : GetPerTupleExprContext(parentestate));
3153 :
3154 : /* now make the internal param workspace ... */
3155 226 : i = list_length(parentestate->es_plannedstmt->paramExecTypes);
3156 226 : rcestate->es_param_exec_vals = (ParamExecData *)
3157 226 : palloc0(i * sizeof(ParamExecData));
3158 : /* ... and copy down all values, whether really needed or not */
3159 550 : while (--i >= 0)
3160 : {
3161 : /* copy value if any, but not execPlan link */
3162 324 : rcestate->es_param_exec_vals[i].value =
3163 324 : parentestate->es_param_exec_vals[i].value;
3164 324 : rcestate->es_param_exec_vals[i].isnull =
3165 324 : parentestate->es_param_exec_vals[i].isnull;
3166 : }
3167 : }
3168 :
3169 : /*
3170 : * Copy es_unpruned_relids so that pruned relations are ignored by
3171 : * ExecInitLockRows() and ExecInitModifyTable() when initializing the plan
3172 : * trees below.
3173 : */
3174 226 : rcestate->es_unpruned_relids = parentestate->es_unpruned_relids;
3175 :
3176 : /*
3177 : * Initialize private state information for each SubPlan. We must do this
3178 : * before running ExecInitNode on the main query tree, since
3179 : * ExecInitSubPlan expects to be able to find these entries. Some of the
3180 : * SubPlans might not be used in the part of the plan tree we intend to
3181 : * run, but since it's not easy to tell which, we just initialize them
3182 : * all.
3183 : */
3184 : Assert(rcestate->es_subplanstates == NIL);
3185 282 : foreach(l, parentestate->es_plannedstmt->subplans)
3186 : {
3187 56 : Plan *subplan = (Plan *) lfirst(l);
3188 : PlanState *subplanstate;
3189 :
3190 56 : subplanstate = ExecInitNode(subplan, rcestate, 0);
3191 56 : rcestate->es_subplanstates = lappend(rcestate->es_subplanstates,
3192 : subplanstate);
3193 : }
3194 :
3195 : /*
3196 : * Build an RTI indexed array of rowmarks, so that
3197 : * EvalPlanQualFetchRowMark() can efficiently access the to be fetched
3198 : * rowmark.
3199 : */
3200 226 : epqstate->relsubs_rowmark = (ExecAuxRowMark **)
3201 226 : palloc0(rtsize * sizeof(ExecAuxRowMark *));
3202 238 : foreach(l, epqstate->arowMarks)
3203 : {
3204 12 : ExecAuxRowMark *earm = (ExecAuxRowMark *) lfirst(l);
3205 :
3206 12 : epqstate->relsubs_rowmark[earm->rowmark->rti - 1] = earm;
3207 : }
3208 :
3209 : /*
3210 : * Initialize per-relation EPQ tuple states. Result relations, if any,
3211 : * get marked as blocked; others as not-fetched.
3212 : */
3213 226 : epqstate->relsubs_done = palloc_array(bool, rtsize);
3214 226 : epqstate->relsubs_blocked = palloc0_array(bool, rtsize);
3215 :
3216 452 : foreach(l, epqstate->resultRelations)
3217 : {
3218 226 : int rtindex = lfirst_int(l);
3219 :
3220 : Assert(rtindex > 0 && rtindex <= rtsize);
3221 226 : epqstate->relsubs_blocked[rtindex - 1] = true;
3222 : }
3223 :
3224 226 : memcpy(epqstate->relsubs_done, epqstate->relsubs_blocked,
3225 : rtsize * sizeof(bool));
3226 :
3227 : /*
3228 : * Initialize the private state information for all the nodes in the part
3229 : * of the plan tree we need to run. This opens files, allocates storage
3230 : * and leaves us ready to start processing tuples.
3231 : */
3232 226 : epqstate->recheckplanstate = ExecInitNode(planTree, rcestate, 0);
3233 :
3234 226 : MemoryContextSwitchTo(oldcontext);
3235 226 : }
3236 :
3237 : /*
3238 : * EvalPlanQualEnd -- shut down at termination of parent plan state node,
3239 : * or if we are done with the current EPQ child.
3240 : *
3241 : * This is a cut-down version of ExecutorEnd(); basically we want to do most
3242 : * of the normal cleanup, but *not* close result relations (which we are
3243 : * just sharing from the outer query). We do, however, have to close any
3244 : * result and trigger target relations that got opened, since those are not
3245 : * shared. (There probably shouldn't be any of the latter, but just in
3246 : * case...)
3247 : */
3248 : void
3249 393254 : EvalPlanQualEnd(EPQState *epqstate)
3250 : {
3251 393254 : EState *estate = epqstate->recheckestate;
3252 : Index rtsize;
3253 : MemoryContext oldcontext;
3254 : ListCell *l;
3255 :
3256 393254 : rtsize = epqstate->parentestate->es_range_table_size;
3257 :
3258 : /*
3259 : * We may have a tuple table, even if EPQ wasn't started, because we allow
3260 : * use of EvalPlanQualSlot() without calling EvalPlanQualBegin().
3261 : */
3262 393254 : if (epqstate->tuple_table != NIL)
3263 : {
3264 5690 : memset(epqstate->relsubs_slot, 0,
3265 : rtsize * sizeof(TupleTableSlot *));
3266 5690 : ExecResetTupleTable(epqstate->tuple_table, true);
3267 5690 : epqstate->tuple_table = NIL;
3268 : }
3269 :
3270 : /* EPQ wasn't started, nothing further to do */
3271 393254 : if (estate == NULL)
3272 393040 : return;
3273 :
3274 214 : oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
3275 :
3276 214 : ExecEndNode(epqstate->recheckplanstate);
3277 :
3278 264 : foreach(l, estate->es_subplanstates)
3279 : {
3280 50 : PlanState *subplanstate = (PlanState *) lfirst(l);
3281 :
3282 50 : ExecEndNode(subplanstate);
3283 : }
3284 :
3285 : /* throw away the per-estate tuple table, some node may have used it */
3286 214 : ExecResetTupleTable(estate->es_tupleTable, false);
3287 :
3288 : /* Close any result and trigger target relations attached to this EState */
3289 214 : ExecCloseResultRelations(estate);
3290 :
3291 214 : MemoryContextSwitchTo(oldcontext);
3292 :
3293 214 : FreeExecutorState(estate);
3294 :
3295 : /* Mark EPQState idle */
3296 214 : epqstate->origslot = NULL;
3297 214 : epqstate->recheckestate = NULL;
3298 214 : epqstate->recheckplanstate = NULL;
3299 214 : epqstate->relsubs_rowmark = NULL;
3300 214 : epqstate->relsubs_done = NULL;
3301 214 : epqstate->relsubs_blocked = NULL;
3302 : }
|