Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * execParallel.c
4 : * Support routines for parallel execution.
5 : *
6 : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : * This file contains routines that are intended to support setting up,
10 : * using, and tearing down a ParallelContext from within the PostgreSQL
11 : * executor. The ParallelContext machinery will handle starting the
12 : * workers and ensuring that their state generally matches that of the
13 : * leader; see src/backend/access/transam/README.parallel for details.
14 : * However, we must save and restore relevant executor state, such as
15 : * any ParamListInfo associated with the query, buffer/WAL usage info, and
16 : * the actual plan to be passed down to the worker.
17 : *
18 : * IDENTIFICATION
19 : * src/backend/executor/execParallel.c
20 : *
21 : *-------------------------------------------------------------------------
22 : */
23 :
24 : #include "postgres.h"
25 :
26 : #include "executor/execParallel.h"
27 : #include "executor/executor.h"
28 : #include "executor/nodeAgg.h"
29 : #include "executor/nodeAppend.h"
30 : #include "executor/nodeBitmapHeapscan.h"
31 : #include "executor/nodeCustom.h"
32 : #include "executor/nodeForeignscan.h"
33 : #include "executor/nodeHash.h"
34 : #include "executor/nodeHashjoin.h"
35 : #include "executor/nodeIncrementalSort.h"
36 : #include "executor/nodeIndexonlyscan.h"
37 : #include "executor/nodeIndexscan.h"
38 : #include "executor/nodeMemoize.h"
39 : #include "executor/nodeSeqscan.h"
40 : #include "executor/nodeSort.h"
41 : #include "executor/nodeSubplan.h"
42 : #include "executor/tqueue.h"
43 : #include "jit/jit.h"
44 : #include "nodes/nodeFuncs.h"
45 : #include "pgstat.h"
46 : #include "tcop/tcopprot.h"
47 : #include "utils/datum.h"
48 : #include "utils/dsa.h"
49 : #include "utils/lsyscache.h"
50 : #include "utils/snapmgr.h"
51 :
52 : /*
53 : * Magic numbers for parallel executor communication. We use constants
54 : * greater than any 32-bit integer here so that values < 2^32 can be used
55 : * by individual parallel nodes to store their own state.
56 : */
57 : #define PARALLEL_KEY_EXECUTOR_FIXED UINT64CONST(0xE000000000000001)
58 : #define PARALLEL_KEY_PLANNEDSTMT UINT64CONST(0xE000000000000002)
59 : #define PARALLEL_KEY_PARAMLISTINFO UINT64CONST(0xE000000000000003)
60 : #define PARALLEL_KEY_BUFFER_USAGE UINT64CONST(0xE000000000000004)
61 : #define PARALLEL_KEY_TUPLE_QUEUE UINT64CONST(0xE000000000000005)
62 : #define PARALLEL_KEY_INSTRUMENTATION UINT64CONST(0xE000000000000006)
63 : #define PARALLEL_KEY_DSA UINT64CONST(0xE000000000000007)
64 : #define PARALLEL_KEY_QUERY_TEXT UINT64CONST(0xE000000000000008)
65 : #define PARALLEL_KEY_JIT_INSTRUMENTATION UINT64CONST(0xE000000000000009)
66 : #define PARALLEL_KEY_WAL_USAGE UINT64CONST(0xE00000000000000A)
67 :
68 : #define PARALLEL_TUPLE_QUEUE_SIZE 65536
69 :
70 : /*
71 : * Fixed-size random stuff that we need to pass to parallel workers.
72 : */
73 : typedef struct FixedParallelExecutorState
74 : {
75 : int64 tuples_needed; /* tuple bound, see ExecSetTupleBound */
76 : dsa_pointer param_exec;
77 : int eflags;
78 : int jit_flags;
79 : } FixedParallelExecutorState;
80 :
81 : /*
82 : * DSM structure for accumulating per-PlanState instrumentation.
83 : *
84 : * instrument_options: Same meaning here as in instrument.c.
85 : *
86 : * instrument_offset: Offset, relative to the start of this structure,
87 : * of the first Instrumentation object. This will depend on the length of
88 : * the plan_node_id array.
89 : *
90 : * num_workers: Number of workers.
91 : *
92 : * num_plan_nodes: Number of plan nodes.
93 : *
94 : * plan_node_id: Array of plan nodes for which we are gathering instrumentation
95 : * from parallel workers. The length of this array is given by num_plan_nodes.
96 : */
97 : struct SharedExecutorInstrumentation
98 : {
99 : int instrument_options;
100 : int instrument_offset;
101 : int num_workers;
102 : int num_plan_nodes;
103 : int plan_node_id[FLEXIBLE_ARRAY_MEMBER];
104 : /* array of num_plan_nodes * num_workers Instrumentation objects follows */
105 : };
106 : #define GetInstrumentationArray(sei) \
107 : (AssertVariableIsOfTypeMacro(sei, SharedExecutorInstrumentation *), \
108 : (Instrumentation *) (((char *) sei) + sei->instrument_offset))
109 :
110 : /* Context object for ExecParallelEstimate. */
111 : typedef struct ExecParallelEstimateContext
112 : {
113 : ParallelContext *pcxt;
114 : int nnodes;
115 : } ExecParallelEstimateContext;
116 :
117 : /* Context object for ExecParallelInitializeDSM. */
118 : typedef struct ExecParallelInitializeDSMContext
119 : {
120 : ParallelContext *pcxt;
121 : SharedExecutorInstrumentation *instrumentation;
122 : int nnodes;
123 : } ExecParallelInitializeDSMContext;
124 :
125 : /* Helper functions that run in the parallel leader. */
126 : static char *ExecSerializePlan(Plan *plan, EState *estate);
127 : static bool ExecParallelEstimate(PlanState *planstate,
128 : ExecParallelEstimateContext *e);
129 : static bool ExecParallelInitializeDSM(PlanState *planstate,
130 : ExecParallelInitializeDSMContext *d);
131 : static shm_mq_handle **ExecParallelSetupTupleQueues(ParallelContext *pcxt,
132 : bool reinitialize);
133 : static bool ExecParallelReInitializeDSM(PlanState *planstate,
134 : ParallelContext *pcxt);
135 : static bool ExecParallelRetrieveInstrumentation(PlanState *planstate,
136 : SharedExecutorInstrumentation *instrumentation);
137 :
138 : /* Helper function that runs in the parallel worker. */
139 : static DestReceiver *ExecParallelGetReceiver(dsm_segment *seg, shm_toc *toc);
140 :
141 : /*
142 : * Create a serialized representation of the plan to be sent to each worker.
143 : */
144 : static char *
145 712 : ExecSerializePlan(Plan *plan, EState *estate)
146 : {
147 : PlannedStmt *pstmt;
148 : ListCell *lc;
149 :
150 : /* We can't scribble on the original plan, so make a copy. */
151 712 : plan = copyObject(plan);
152 :
153 : /*
154 : * The worker will start its own copy of the executor, and that copy will
155 : * insert a junk filter if the toplevel node has any resjunk entries. We
156 : * don't want that to happen, because while resjunk columns shouldn't be
157 : * sent back to the user, here the tuples are coming back to another
158 : * backend which may very well need them. So mutate the target list
159 : * accordingly. This is sort of a hack; there might be better ways to do
160 : * this...
161 : */
162 1960 : foreach(lc, plan->targetlist)
163 : {
164 1248 : TargetEntry *tle = lfirst_node(TargetEntry, lc);
165 :
166 1248 : tle->resjunk = false;
167 : }
168 :
169 : /*
170 : * Create a dummy PlannedStmt. Most of the fields don't need to be valid
171 : * for our purposes, but the worker will need at least a minimal
172 : * PlannedStmt to start the executor.
173 : */
174 712 : pstmt = makeNode(PlannedStmt);
175 712 : pstmt->commandType = CMD_SELECT;
176 712 : pstmt->queryId = pgstat_get_my_query_id();
177 712 : pstmt->hasReturning = false;
178 712 : pstmt->hasModifyingCTE = false;
179 712 : pstmt->canSetTag = true;
180 712 : pstmt->transientPlan = false;
181 712 : pstmt->dependsOnRole = false;
182 712 : pstmt->parallelModeNeeded = false;
183 712 : pstmt->planTree = plan;
184 712 : pstmt->partPruneInfos = estate->es_part_prune_infos;
185 712 : pstmt->rtable = estate->es_range_table;
186 712 : pstmt->unprunableRelids = estate->es_unpruned_relids;
187 712 : pstmt->permInfos = estate->es_rteperminfos;
188 712 : pstmt->resultRelations = NIL;
189 712 : pstmt->appendRelations = NIL;
190 :
191 : /*
192 : * Transfer only parallel-safe subplans, leaving a NULL "hole" in the list
193 : * for unsafe ones (so that the list indexes of the safe ones are
194 : * preserved). This positively ensures that the worker won't try to run,
195 : * or even do ExecInitNode on, an unsafe subplan. That's important to
196 : * protect, eg, non-parallel-aware FDWs from getting into trouble.
197 : */
198 712 : pstmt->subplans = NIL;
199 766 : foreach(lc, estate->es_plannedstmt->subplans)
200 : {
201 54 : Plan *subplan = (Plan *) lfirst(lc);
202 :
203 54 : if (subplan && !subplan->parallel_safe)
204 12 : subplan = NULL;
205 54 : pstmt->subplans = lappend(pstmt->subplans, subplan);
206 : }
207 :
208 712 : pstmt->rewindPlanIDs = NULL;
209 712 : pstmt->rowMarks = NIL;
210 712 : pstmt->relationOids = NIL;
211 712 : pstmt->invalItems = NIL; /* workers can't replan anyway... */
212 712 : pstmt->paramExecTypes = estate->es_plannedstmt->paramExecTypes;
213 712 : pstmt->utilityStmt = NULL;
214 712 : pstmt->stmt_location = -1;
215 712 : pstmt->stmt_len = -1;
216 :
217 : /* Return serialized copy of our dummy PlannedStmt. */
218 712 : return nodeToString(pstmt);
219 : }
220 :
221 : /*
222 : * Parallel-aware plan nodes (and occasionally others) may need some state
223 : * which is shared across all parallel workers. Before we size the DSM, give
224 : * them a chance to call shm_toc_estimate_chunk or shm_toc_estimate_keys on
225 : * &pcxt->estimator.
226 : *
227 : * While we're at it, count the number of PlanState nodes in the tree, so
228 : * we know how many Instrumentation structures we need.
229 : */
230 : static bool
231 2954 : ExecParallelEstimate(PlanState *planstate, ExecParallelEstimateContext *e)
232 : {
233 2954 : if (planstate == NULL)
234 0 : return false;
235 :
236 : /* Count this node. */
237 2954 : e->nnodes++;
238 :
239 2954 : switch (nodeTag(planstate))
240 : {
241 1138 : case T_SeqScanState:
242 1138 : if (planstate->plan->parallel_aware)
243 900 : ExecSeqScanEstimate((SeqScanState *) planstate,
244 : e->pcxt);
245 1138 : break;
246 294 : case T_IndexScanState:
247 294 : if (planstate->plan->parallel_aware)
248 18 : ExecIndexScanEstimate((IndexScanState *) planstate,
249 : e->pcxt);
250 294 : break;
251 58 : case T_IndexOnlyScanState:
252 58 : if (planstate->plan->parallel_aware)
253 46 : ExecIndexOnlyScanEstimate((IndexOnlyScanState *) planstate,
254 : e->pcxt);
255 58 : break;
256 0 : case T_ForeignScanState:
257 0 : if (planstate->plan->parallel_aware)
258 0 : ExecForeignScanEstimate((ForeignScanState *) planstate,
259 : e->pcxt);
260 0 : break;
261 186 : case T_AppendState:
262 186 : if (planstate->plan->parallel_aware)
263 138 : ExecAppendEstimate((AppendState *) planstate,
264 : e->pcxt);
265 186 : break;
266 0 : case T_CustomScanState:
267 0 : if (planstate->plan->parallel_aware)
268 0 : ExecCustomScanEstimate((CustomScanState *) planstate,
269 : e->pcxt);
270 0 : break;
271 20 : case T_BitmapHeapScanState:
272 20 : if (planstate->plan->parallel_aware)
273 18 : ExecBitmapHeapEstimate((BitmapHeapScanState *) planstate,
274 : e->pcxt);
275 20 : break;
276 192 : case T_HashJoinState:
277 192 : if (planstate->plan->parallel_aware)
278 120 : ExecHashJoinEstimate((HashJoinState *) planstate,
279 : e->pcxt);
280 192 : break;
281 192 : case T_HashState:
282 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
283 192 : ExecHashEstimate((HashState *) planstate, e->pcxt);
284 192 : break;
285 152 : case T_SortState:
286 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
287 152 : ExecSortEstimate((SortState *) planstate, e->pcxt);
288 152 : break;
289 0 : case T_IncrementalSortState:
290 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
291 0 : ExecIncrementalSortEstimate((IncrementalSortState *) planstate, e->pcxt);
292 0 : break;
293 554 : case T_AggState:
294 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
295 554 : ExecAggEstimate((AggState *) planstate, e->pcxt);
296 554 : break;
297 6 : case T_MemoizeState:
298 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
299 6 : ExecMemoizeEstimate((MemoizeState *) planstate, e->pcxt);
300 6 : break;
301 162 : default:
302 162 : break;
303 : }
304 :
305 2954 : return planstate_tree_walker(planstate, ExecParallelEstimate, e);
306 : }
307 :
308 : /*
309 : * Estimate the amount of space required to serialize the indicated parameters.
310 : */
311 : static Size
312 24 : EstimateParamExecSpace(EState *estate, Bitmapset *params)
313 : {
314 : int paramid;
315 24 : Size sz = sizeof(int);
316 :
317 24 : paramid = -1;
318 54 : while ((paramid = bms_next_member(params, paramid)) >= 0)
319 : {
320 : Oid typeOid;
321 : int16 typLen;
322 : bool typByVal;
323 : ParamExecData *prm;
324 :
325 30 : prm = &(estate->es_param_exec_vals[paramid]);
326 30 : typeOid = list_nth_oid(estate->es_plannedstmt->paramExecTypes,
327 : paramid);
328 :
329 30 : sz = add_size(sz, sizeof(int)); /* space for paramid */
330 :
331 : /* space for datum/isnull */
332 30 : if (OidIsValid(typeOid))
333 30 : get_typlenbyval(typeOid, &typLen, &typByVal);
334 : else
335 : {
336 : /* If no type OID, assume by-value, like copyParamList does. */
337 0 : typLen = sizeof(Datum);
338 0 : typByVal = true;
339 : }
340 30 : sz = add_size(sz,
341 30 : datumEstimateSpace(prm->value, prm->isnull,
342 : typByVal, typLen));
343 : }
344 24 : return sz;
345 : }
346 :
347 : /*
348 : * Serialize specified PARAM_EXEC parameters.
349 : *
350 : * We write the number of parameters first, as a 4-byte integer, and then
351 : * write details for each parameter in turn. The details for each parameter
352 : * consist of a 4-byte paramid (location of param in execution time internal
353 : * parameter array) and then the datum as serialized by datumSerialize().
354 : */
355 : static dsa_pointer
356 24 : SerializeParamExecParams(EState *estate, Bitmapset *params, dsa_area *area)
357 : {
358 : Size size;
359 : int nparams;
360 : int paramid;
361 : ParamExecData *prm;
362 : dsa_pointer handle;
363 : char *start_address;
364 :
365 : /* Allocate enough space for the current parameter values. */
366 24 : size = EstimateParamExecSpace(estate, params);
367 24 : handle = dsa_allocate(area, size);
368 24 : start_address = dsa_get_address(area, handle);
369 :
370 : /* First write the number of parameters as a 4-byte integer. */
371 24 : nparams = bms_num_members(params);
372 24 : memcpy(start_address, &nparams, sizeof(int));
373 24 : start_address += sizeof(int);
374 :
375 : /* Write details for each parameter in turn. */
376 24 : paramid = -1;
377 54 : while ((paramid = bms_next_member(params, paramid)) >= 0)
378 : {
379 : Oid typeOid;
380 : int16 typLen;
381 : bool typByVal;
382 :
383 30 : prm = &(estate->es_param_exec_vals[paramid]);
384 30 : typeOid = list_nth_oid(estate->es_plannedstmt->paramExecTypes,
385 : paramid);
386 :
387 : /* Write paramid. */
388 30 : memcpy(start_address, ¶mid, sizeof(int));
389 30 : start_address += sizeof(int);
390 :
391 : /* Write datum/isnull */
392 30 : if (OidIsValid(typeOid))
393 30 : get_typlenbyval(typeOid, &typLen, &typByVal);
394 : else
395 : {
396 : /* If no type OID, assume by-value, like copyParamList does. */
397 0 : typLen = sizeof(Datum);
398 0 : typByVal = true;
399 : }
400 30 : datumSerialize(prm->value, prm->isnull, typByVal, typLen,
401 : &start_address);
402 : }
403 :
404 24 : return handle;
405 : }
406 :
407 : /*
408 : * Restore specified PARAM_EXEC parameters.
409 : */
410 : static void
411 72 : RestoreParamExecParams(char *start_address, EState *estate)
412 : {
413 : int nparams;
414 : int i;
415 : int paramid;
416 :
417 72 : memcpy(&nparams, start_address, sizeof(int));
418 72 : start_address += sizeof(int);
419 :
420 156 : for (i = 0; i < nparams; i++)
421 : {
422 : ParamExecData *prm;
423 :
424 : /* Read paramid */
425 84 : memcpy(¶mid, start_address, sizeof(int));
426 84 : start_address += sizeof(int);
427 84 : prm = &(estate->es_param_exec_vals[paramid]);
428 :
429 : /* Read datum/isnull. */
430 84 : prm->value = datumRestore(&start_address, &prm->isnull);
431 84 : prm->execPlan = NULL;
432 : }
433 72 : }
434 :
435 : /*
436 : * Initialize the dynamic shared memory segment that will be used to control
437 : * parallel execution.
438 : */
439 : static bool
440 2954 : ExecParallelInitializeDSM(PlanState *planstate,
441 : ExecParallelInitializeDSMContext *d)
442 : {
443 2954 : if (planstate == NULL)
444 0 : return false;
445 :
446 : /* If instrumentation is enabled, initialize slot for this node. */
447 2954 : if (d->instrumentation != NULL)
448 1026 : d->instrumentation->plan_node_id[d->nnodes] =
449 1026 : planstate->plan->plan_node_id;
450 :
451 : /* Count this node. */
452 2954 : d->nnodes++;
453 :
454 : /*
455 : * Call initializers for DSM-using plan nodes.
456 : *
457 : * Most plan nodes won't do anything here, but plan nodes that allocated
458 : * DSM may need to initialize shared state in the DSM before parallel
459 : * workers are launched. They can allocate the space they previously
460 : * estimated using shm_toc_allocate, and add the keys they previously
461 : * estimated using shm_toc_insert, in each case targeting pcxt->toc.
462 : */
463 2954 : switch (nodeTag(planstate))
464 : {
465 1138 : case T_SeqScanState:
466 1138 : if (planstate->plan->parallel_aware)
467 900 : ExecSeqScanInitializeDSM((SeqScanState *) planstate,
468 : d->pcxt);
469 1138 : break;
470 294 : case T_IndexScanState:
471 294 : if (planstate->plan->parallel_aware)
472 18 : ExecIndexScanInitializeDSM((IndexScanState *) planstate,
473 : d->pcxt);
474 294 : break;
475 58 : case T_IndexOnlyScanState:
476 58 : if (planstate->plan->parallel_aware)
477 46 : ExecIndexOnlyScanInitializeDSM((IndexOnlyScanState *) planstate,
478 : d->pcxt);
479 58 : break;
480 0 : case T_ForeignScanState:
481 0 : if (planstate->plan->parallel_aware)
482 0 : ExecForeignScanInitializeDSM((ForeignScanState *) planstate,
483 : d->pcxt);
484 0 : break;
485 186 : case T_AppendState:
486 186 : if (planstate->plan->parallel_aware)
487 138 : ExecAppendInitializeDSM((AppendState *) planstate,
488 : d->pcxt);
489 186 : break;
490 0 : case T_CustomScanState:
491 0 : if (planstate->plan->parallel_aware)
492 0 : ExecCustomScanInitializeDSM((CustomScanState *) planstate,
493 : d->pcxt);
494 0 : break;
495 20 : case T_BitmapHeapScanState:
496 20 : if (planstate->plan->parallel_aware)
497 18 : ExecBitmapHeapInitializeDSM((BitmapHeapScanState *) planstate,
498 : d->pcxt);
499 20 : break;
500 192 : case T_HashJoinState:
501 192 : if (planstate->plan->parallel_aware)
502 120 : ExecHashJoinInitializeDSM((HashJoinState *) planstate,
503 : d->pcxt);
504 192 : break;
505 192 : case T_HashState:
506 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
507 192 : ExecHashInitializeDSM((HashState *) planstate, d->pcxt);
508 192 : break;
509 152 : case T_SortState:
510 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
511 152 : ExecSortInitializeDSM((SortState *) planstate, d->pcxt);
512 152 : break;
513 0 : case T_IncrementalSortState:
514 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
515 0 : ExecIncrementalSortInitializeDSM((IncrementalSortState *) planstate, d->pcxt);
516 0 : break;
517 554 : case T_AggState:
518 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
519 554 : ExecAggInitializeDSM((AggState *) planstate, d->pcxt);
520 554 : break;
521 6 : case T_MemoizeState:
522 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
523 6 : ExecMemoizeInitializeDSM((MemoizeState *) planstate, d->pcxt);
524 6 : break;
525 162 : default:
526 162 : break;
527 : }
528 :
529 2954 : return planstate_tree_walker(planstate, ExecParallelInitializeDSM, d);
530 : }
531 :
532 : /*
533 : * It sets up the response queues for backend workers to return tuples
534 : * to the main backend and start the workers.
535 : */
536 : static shm_mq_handle **
537 970 : ExecParallelSetupTupleQueues(ParallelContext *pcxt, bool reinitialize)
538 : {
539 : shm_mq_handle **responseq;
540 : char *tqueuespace;
541 : int i;
542 :
543 : /* Skip this if no workers. */
544 970 : if (pcxt->nworkers == 0)
545 0 : return NULL;
546 :
547 : /* Allocate memory for shared memory queue handles. */
548 : responseq = (shm_mq_handle **)
549 970 : palloc(pcxt->nworkers * sizeof(shm_mq_handle *));
550 :
551 : /*
552 : * If not reinitializing, allocate space from the DSM for the queues;
553 : * otherwise, find the already allocated space.
554 : */
555 970 : if (!reinitialize)
556 : tqueuespace =
557 712 : shm_toc_allocate(pcxt->toc,
558 : mul_size(PARALLEL_TUPLE_QUEUE_SIZE,
559 712 : pcxt->nworkers));
560 : else
561 258 : tqueuespace = shm_toc_lookup(pcxt->toc, PARALLEL_KEY_TUPLE_QUEUE, false);
562 :
563 : /* Create the queues, and become the receiver for each. */
564 3570 : for (i = 0; i < pcxt->nworkers; ++i)
565 : {
566 : shm_mq *mq;
567 :
568 2600 : mq = shm_mq_create(tqueuespace +
569 2600 : ((Size) i) * PARALLEL_TUPLE_QUEUE_SIZE,
570 : (Size) PARALLEL_TUPLE_QUEUE_SIZE);
571 :
572 2600 : shm_mq_set_receiver(mq, MyProc);
573 2600 : responseq[i] = shm_mq_attach(mq, pcxt->seg, NULL);
574 : }
575 :
576 : /* Add array of queues to shm_toc, so others can find it. */
577 970 : if (!reinitialize)
578 712 : shm_toc_insert(pcxt->toc, PARALLEL_KEY_TUPLE_QUEUE, tqueuespace);
579 :
580 : /* Return array of handles. */
581 970 : return responseq;
582 : }
583 :
584 : /*
585 : * Sets up the required infrastructure for backend workers to perform
586 : * execution and return results to the main backend.
587 : */
588 : ParallelExecutorInfo *
589 712 : ExecInitParallelPlan(PlanState *planstate, EState *estate,
590 : Bitmapset *sendParams, int nworkers,
591 : int64 tuples_needed)
592 : {
593 : ParallelExecutorInfo *pei;
594 : ParallelContext *pcxt;
595 : ExecParallelEstimateContext e;
596 : ExecParallelInitializeDSMContext d;
597 : FixedParallelExecutorState *fpes;
598 : char *pstmt_data;
599 : char *pstmt_space;
600 : char *paramlistinfo_space;
601 : BufferUsage *bufusage_space;
602 : WalUsage *walusage_space;
603 712 : SharedExecutorInstrumentation *instrumentation = NULL;
604 712 : SharedJitInstrumentation *jit_instrumentation = NULL;
605 : int pstmt_len;
606 : int paramlistinfo_len;
607 712 : int instrumentation_len = 0;
608 712 : int jit_instrumentation_len = 0;
609 712 : int instrument_offset = 0;
610 712 : Size dsa_minsize = dsa_minimum_size();
611 : char *query_string;
612 : int query_len;
613 :
614 : /*
615 : * Force any initplan outputs that we're going to pass to workers to be
616 : * evaluated, if they weren't already.
617 : *
618 : * For simplicity, we use the EState's per-output-tuple ExprContext here.
619 : * That risks intra-query memory leakage, since we might pass through here
620 : * many times before that ExprContext gets reset; but ExecSetParamPlan
621 : * doesn't normally leak any memory in the context (see its comments), so
622 : * it doesn't seem worth complicating this function's API to pass it a
623 : * shorter-lived ExprContext. This might need to change someday.
624 : */
625 712 : ExecSetParamPlanMulti(sendParams, GetPerTupleExprContext(estate));
626 :
627 : /* Allocate object for return value. */
628 712 : pei = palloc0(sizeof(ParallelExecutorInfo));
629 712 : pei->finished = false;
630 712 : pei->planstate = planstate;
631 :
632 : /* Fix up and serialize plan to be sent to workers. */
633 712 : pstmt_data = ExecSerializePlan(planstate->plan, estate);
634 :
635 : /* Create a parallel context. */
636 712 : pcxt = CreateParallelContext("postgres", "ParallelQueryMain", nworkers);
637 712 : pei->pcxt = pcxt;
638 :
639 : /*
640 : * Before telling the parallel context to create a dynamic shared memory
641 : * segment, we need to figure out how big it should be. Estimate space
642 : * for the various things we need to store.
643 : */
644 :
645 : /* Estimate space for fixed-size state. */
646 712 : shm_toc_estimate_chunk(&pcxt->estimator,
647 : sizeof(FixedParallelExecutorState));
648 712 : shm_toc_estimate_keys(&pcxt->estimator, 1);
649 :
650 : /* Estimate space for query text. */
651 712 : query_len = strlen(estate->es_sourceText);
652 712 : shm_toc_estimate_chunk(&pcxt->estimator, query_len + 1);
653 712 : shm_toc_estimate_keys(&pcxt->estimator, 1);
654 :
655 : /* Estimate space for serialized PlannedStmt. */
656 712 : pstmt_len = strlen(pstmt_data) + 1;
657 712 : shm_toc_estimate_chunk(&pcxt->estimator, pstmt_len);
658 712 : shm_toc_estimate_keys(&pcxt->estimator, 1);
659 :
660 : /* Estimate space for serialized ParamListInfo. */
661 712 : paramlistinfo_len = EstimateParamListSpace(estate->es_param_list_info);
662 712 : shm_toc_estimate_chunk(&pcxt->estimator, paramlistinfo_len);
663 712 : shm_toc_estimate_keys(&pcxt->estimator, 1);
664 :
665 : /*
666 : * Estimate space for BufferUsage.
667 : *
668 : * If EXPLAIN is not in use and there are no extensions loaded that care,
669 : * we could skip this. But we have no way of knowing whether anyone's
670 : * looking at pgBufferUsage, so do it unconditionally.
671 : */
672 712 : shm_toc_estimate_chunk(&pcxt->estimator,
673 : mul_size(sizeof(BufferUsage), pcxt->nworkers));
674 712 : shm_toc_estimate_keys(&pcxt->estimator, 1);
675 :
676 : /*
677 : * Same thing for WalUsage.
678 : */
679 712 : shm_toc_estimate_chunk(&pcxt->estimator,
680 : mul_size(sizeof(WalUsage), pcxt->nworkers));
681 712 : shm_toc_estimate_keys(&pcxt->estimator, 1);
682 :
683 : /* Estimate space for tuple queues. */
684 712 : shm_toc_estimate_chunk(&pcxt->estimator,
685 : mul_size(PARALLEL_TUPLE_QUEUE_SIZE, pcxt->nworkers));
686 712 : shm_toc_estimate_keys(&pcxt->estimator, 1);
687 :
688 : /*
689 : * Give parallel-aware nodes a chance to add to the estimates, and get a
690 : * count of how many PlanState nodes there are.
691 : */
692 712 : e.pcxt = pcxt;
693 712 : e.nnodes = 0;
694 712 : ExecParallelEstimate(planstate, &e);
695 :
696 : /* Estimate space for instrumentation, if required. */
697 712 : if (estate->es_instrument)
698 : {
699 180 : instrumentation_len =
700 : offsetof(SharedExecutorInstrumentation, plan_node_id) +
701 180 : sizeof(int) * e.nnodes;
702 180 : instrumentation_len = MAXALIGN(instrumentation_len);
703 180 : instrument_offset = instrumentation_len;
704 180 : instrumentation_len +=
705 180 : mul_size(sizeof(Instrumentation),
706 180 : mul_size(e.nnodes, nworkers));
707 180 : shm_toc_estimate_chunk(&pcxt->estimator, instrumentation_len);
708 180 : shm_toc_estimate_keys(&pcxt->estimator, 1);
709 :
710 : /* Estimate space for JIT instrumentation, if required. */
711 180 : if (estate->es_jit_flags != PGJIT_NONE)
712 : {
713 24 : jit_instrumentation_len =
714 24 : offsetof(SharedJitInstrumentation, jit_instr) +
715 : sizeof(JitInstrumentation) * nworkers;
716 24 : shm_toc_estimate_chunk(&pcxt->estimator, jit_instrumentation_len);
717 24 : shm_toc_estimate_keys(&pcxt->estimator, 1);
718 : }
719 : }
720 :
721 : /* Estimate space for DSA area. */
722 712 : shm_toc_estimate_chunk(&pcxt->estimator, dsa_minsize);
723 712 : shm_toc_estimate_keys(&pcxt->estimator, 1);
724 :
725 : /*
726 : * InitializeParallelDSM() passes the active snapshot to the parallel
727 : * worker, which uses it to set es_snapshot. Make sure we don't set
728 : * es_snapshot differently in the child.
729 : */
730 : Assert(GetActiveSnapshot() == estate->es_snapshot);
731 :
732 : /* Everyone's had a chance to ask for space, so now create the DSM. */
733 712 : InitializeParallelDSM(pcxt);
734 :
735 : /*
736 : * OK, now we have a dynamic shared memory segment, and it should be big
737 : * enough to store all of the data we estimated we would want to put into
738 : * it, plus whatever general stuff (not specifically executor-related) the
739 : * ParallelContext itself needs to store there. None of the space we
740 : * asked for has been allocated or initialized yet, though, so do that.
741 : */
742 :
743 : /* Store fixed-size state. */
744 712 : fpes = shm_toc_allocate(pcxt->toc, sizeof(FixedParallelExecutorState));
745 712 : fpes->tuples_needed = tuples_needed;
746 712 : fpes->param_exec = InvalidDsaPointer;
747 712 : fpes->eflags = estate->es_top_eflags;
748 712 : fpes->jit_flags = estate->es_jit_flags;
749 712 : shm_toc_insert(pcxt->toc, PARALLEL_KEY_EXECUTOR_FIXED, fpes);
750 :
751 : /* Store query string */
752 712 : query_string = shm_toc_allocate(pcxt->toc, query_len + 1);
753 712 : memcpy(query_string, estate->es_sourceText, query_len + 1);
754 712 : shm_toc_insert(pcxt->toc, PARALLEL_KEY_QUERY_TEXT, query_string);
755 :
756 : /* Store serialized PlannedStmt. */
757 712 : pstmt_space = shm_toc_allocate(pcxt->toc, pstmt_len);
758 712 : memcpy(pstmt_space, pstmt_data, pstmt_len);
759 712 : shm_toc_insert(pcxt->toc, PARALLEL_KEY_PLANNEDSTMT, pstmt_space);
760 :
761 : /* Store serialized ParamListInfo. */
762 712 : paramlistinfo_space = shm_toc_allocate(pcxt->toc, paramlistinfo_len);
763 712 : shm_toc_insert(pcxt->toc, PARALLEL_KEY_PARAMLISTINFO, paramlistinfo_space);
764 712 : SerializeParamList(estate->es_param_list_info, ¶mlistinfo_space);
765 :
766 : /* Allocate space for each worker's BufferUsage; no need to initialize. */
767 712 : bufusage_space = shm_toc_allocate(pcxt->toc,
768 712 : mul_size(sizeof(BufferUsage), pcxt->nworkers));
769 712 : shm_toc_insert(pcxt->toc, PARALLEL_KEY_BUFFER_USAGE, bufusage_space);
770 712 : pei->buffer_usage = bufusage_space;
771 :
772 : /* Same for WalUsage. */
773 712 : walusage_space = shm_toc_allocate(pcxt->toc,
774 712 : mul_size(sizeof(WalUsage), pcxt->nworkers));
775 712 : shm_toc_insert(pcxt->toc, PARALLEL_KEY_WAL_USAGE, walusage_space);
776 712 : pei->wal_usage = walusage_space;
777 :
778 : /* Set up the tuple queues that the workers will write into. */
779 712 : pei->tqueue = ExecParallelSetupTupleQueues(pcxt, false);
780 :
781 : /* We don't need the TupleQueueReaders yet, though. */
782 712 : pei->reader = NULL;
783 :
784 : /*
785 : * If instrumentation options were supplied, allocate space for the data.
786 : * It only gets partially initialized here; the rest happens during
787 : * ExecParallelInitializeDSM.
788 : */
789 712 : if (estate->es_instrument)
790 : {
791 : Instrumentation *instrument;
792 : int i;
793 :
794 180 : instrumentation = shm_toc_allocate(pcxt->toc, instrumentation_len);
795 180 : instrumentation->instrument_options = estate->es_instrument;
796 180 : instrumentation->instrument_offset = instrument_offset;
797 180 : instrumentation->num_workers = nworkers;
798 180 : instrumentation->num_plan_nodes = e.nnodes;
799 180 : instrument = GetInstrumentationArray(instrumentation);
800 1860 : for (i = 0; i < nworkers * e.nnodes; ++i)
801 1680 : InstrInit(&instrument[i], estate->es_instrument);
802 180 : shm_toc_insert(pcxt->toc, PARALLEL_KEY_INSTRUMENTATION,
803 : instrumentation);
804 180 : pei->instrumentation = instrumentation;
805 :
806 180 : if (estate->es_jit_flags != PGJIT_NONE)
807 : {
808 24 : jit_instrumentation = shm_toc_allocate(pcxt->toc,
809 : jit_instrumentation_len);
810 24 : jit_instrumentation->num_workers = nworkers;
811 24 : memset(jit_instrumentation->jit_instr, 0,
812 : sizeof(JitInstrumentation) * nworkers);
813 24 : shm_toc_insert(pcxt->toc, PARALLEL_KEY_JIT_INSTRUMENTATION,
814 : jit_instrumentation);
815 24 : pei->jit_instrumentation = jit_instrumentation;
816 : }
817 : }
818 :
819 : /*
820 : * Create a DSA area that can be used by the leader and all workers.
821 : * (However, if we failed to create a DSM and are using private memory
822 : * instead, then skip this.)
823 : */
824 712 : if (pcxt->seg != NULL)
825 : {
826 : char *area_space;
827 :
828 712 : area_space = shm_toc_allocate(pcxt->toc, dsa_minsize);
829 712 : shm_toc_insert(pcxt->toc, PARALLEL_KEY_DSA, area_space);
830 712 : pei->area = dsa_create_in_place(area_space, dsa_minsize,
831 : LWTRANCHE_PARALLEL_QUERY_DSA,
832 : pcxt->seg);
833 :
834 : /*
835 : * Serialize parameters, if any, using DSA storage. We don't dare use
836 : * the main parallel query DSM for this because we might relaunch
837 : * workers after the values have changed (and thus the amount of
838 : * storage required has changed).
839 : */
840 712 : if (!bms_is_empty(sendParams))
841 : {
842 24 : pei->param_exec = SerializeParamExecParams(estate, sendParams,
843 : pei->area);
844 24 : fpes->param_exec = pei->param_exec;
845 : }
846 : }
847 :
848 : /*
849 : * Give parallel-aware nodes a chance to initialize their shared data.
850 : * This also initializes the elements of instrumentation->ps_instrument,
851 : * if it exists.
852 : */
853 712 : d.pcxt = pcxt;
854 712 : d.instrumentation = instrumentation;
855 712 : d.nnodes = 0;
856 :
857 : /* Install our DSA area while initializing the plan. */
858 712 : estate->es_query_dsa = pei->area;
859 712 : ExecParallelInitializeDSM(planstate, &d);
860 712 : estate->es_query_dsa = NULL;
861 :
862 : /*
863 : * Make sure that the world hasn't shifted under our feet. This could
864 : * probably just be an Assert(), but let's be conservative for now.
865 : */
866 712 : if (e.nnodes != d.nnodes)
867 0 : elog(ERROR, "inconsistent count of PlanState nodes");
868 :
869 : /* OK, we're ready to rock and roll. */
870 712 : return pei;
871 : }
872 :
873 : /*
874 : * Set up tuple queue readers to read the results of a parallel subplan.
875 : *
876 : * This is separate from ExecInitParallelPlan() because we can launch the
877 : * worker processes and let them start doing something before we do this.
878 : */
879 : void
880 952 : ExecParallelCreateReaders(ParallelExecutorInfo *pei)
881 : {
882 952 : int nworkers = pei->pcxt->nworkers_launched;
883 : int i;
884 :
885 : Assert(pei->reader == NULL);
886 :
887 952 : if (nworkers > 0)
888 : {
889 952 : pei->reader = (TupleQueueReader **)
890 952 : palloc(nworkers * sizeof(TupleQueueReader *));
891 :
892 3472 : for (i = 0; i < nworkers; i++)
893 : {
894 2520 : shm_mq_set_handle(pei->tqueue[i],
895 2520 : pei->pcxt->worker[i].bgwhandle);
896 2520 : pei->reader[i] = CreateTupleQueueReader(pei->tqueue[i]);
897 : }
898 : }
899 952 : }
900 :
901 : /*
902 : * Re-initialize the parallel executor shared memory state before launching
903 : * a fresh batch of workers.
904 : */
905 : void
906 258 : ExecParallelReinitialize(PlanState *planstate,
907 : ParallelExecutorInfo *pei,
908 : Bitmapset *sendParams)
909 : {
910 258 : EState *estate = planstate->state;
911 : FixedParallelExecutorState *fpes;
912 :
913 : /* Old workers must already be shut down */
914 : Assert(pei->finished);
915 :
916 : /*
917 : * Force any initplan outputs that we're going to pass to workers to be
918 : * evaluated, if they weren't already (see comments in
919 : * ExecInitParallelPlan).
920 : */
921 258 : ExecSetParamPlanMulti(sendParams, GetPerTupleExprContext(estate));
922 :
923 258 : ReinitializeParallelDSM(pei->pcxt);
924 258 : pei->tqueue = ExecParallelSetupTupleQueues(pei->pcxt, true);
925 258 : pei->reader = NULL;
926 258 : pei->finished = false;
927 :
928 258 : fpes = shm_toc_lookup(pei->pcxt->toc, PARALLEL_KEY_EXECUTOR_FIXED, false);
929 :
930 : /* Free any serialized parameters from the last round. */
931 258 : if (DsaPointerIsValid(fpes->param_exec))
932 : {
933 0 : dsa_free(pei->area, fpes->param_exec);
934 0 : fpes->param_exec = InvalidDsaPointer;
935 : }
936 :
937 : /* Serialize current parameter values if required. */
938 258 : if (!bms_is_empty(sendParams))
939 : {
940 0 : pei->param_exec = SerializeParamExecParams(estate, sendParams,
941 : pei->area);
942 0 : fpes->param_exec = pei->param_exec;
943 : }
944 :
945 : /* Traverse plan tree and let each child node reset associated state. */
946 258 : estate->es_query_dsa = pei->area;
947 258 : ExecParallelReInitializeDSM(planstate, pei->pcxt);
948 258 : estate->es_query_dsa = NULL;
949 258 : }
950 :
951 : /*
952 : * Traverse plan tree to reinitialize per-node dynamic shared memory state
953 : */
954 : static bool
955 666 : ExecParallelReInitializeDSM(PlanState *planstate,
956 : ParallelContext *pcxt)
957 : {
958 666 : if (planstate == NULL)
959 0 : return false;
960 :
961 : /*
962 : * Call reinitializers for DSM-using plan nodes.
963 : */
964 666 : switch (nodeTag(planstate))
965 : {
966 276 : case T_SeqScanState:
967 276 : if (planstate->plan->parallel_aware)
968 228 : ExecSeqScanReInitializeDSM((SeqScanState *) planstate,
969 : pcxt);
970 276 : break;
971 12 : case T_IndexScanState:
972 12 : if (planstate->plan->parallel_aware)
973 12 : ExecIndexScanReInitializeDSM((IndexScanState *) planstate,
974 : pcxt);
975 12 : break;
976 12 : case T_IndexOnlyScanState:
977 12 : if (planstate->plan->parallel_aware)
978 12 : ExecIndexOnlyScanReInitializeDSM((IndexOnlyScanState *) planstate,
979 : pcxt);
980 12 : break;
981 0 : case T_ForeignScanState:
982 0 : if (planstate->plan->parallel_aware)
983 0 : ExecForeignScanReInitializeDSM((ForeignScanState *) planstate,
984 : pcxt);
985 0 : break;
986 0 : case T_AppendState:
987 0 : if (planstate->plan->parallel_aware)
988 0 : ExecAppendReInitializeDSM((AppendState *) planstate, pcxt);
989 0 : break;
990 0 : case T_CustomScanState:
991 0 : if (planstate->plan->parallel_aware)
992 0 : ExecCustomScanReInitializeDSM((CustomScanState *) planstate,
993 : pcxt);
994 0 : break;
995 54 : case T_BitmapHeapScanState:
996 54 : if (planstate->plan->parallel_aware)
997 54 : ExecBitmapHeapReInitializeDSM((BitmapHeapScanState *) planstate,
998 : pcxt);
999 54 : break;
1000 96 : case T_HashJoinState:
1001 96 : if (planstate->plan->parallel_aware)
1002 48 : ExecHashJoinReInitializeDSM((HashJoinState *) planstate,
1003 : pcxt);
1004 96 : break;
1005 126 : case T_HashState:
1006 : case T_SortState:
1007 : case T_IncrementalSortState:
1008 : case T_MemoizeState:
1009 : /* these nodes have DSM state, but no reinitialization is required */
1010 126 : break;
1011 :
1012 90 : default:
1013 90 : break;
1014 : }
1015 :
1016 666 : return planstate_tree_walker(planstate, ExecParallelReInitializeDSM, pcxt);
1017 : }
1018 :
1019 : /*
1020 : * Copy instrumentation information about this node and its descendants from
1021 : * dynamic shared memory.
1022 : */
1023 : static bool
1024 1026 : ExecParallelRetrieveInstrumentation(PlanState *planstate,
1025 : SharedExecutorInstrumentation *instrumentation)
1026 : {
1027 : Instrumentation *instrument;
1028 : int i;
1029 : int n;
1030 : int ibytes;
1031 1026 : int plan_node_id = planstate->plan->plan_node_id;
1032 : MemoryContext oldcontext;
1033 :
1034 : /* Find the instrumentation for this node. */
1035 4638 : for (i = 0; i < instrumentation->num_plan_nodes; ++i)
1036 4638 : if (instrumentation->plan_node_id[i] == plan_node_id)
1037 1026 : break;
1038 1026 : if (i >= instrumentation->num_plan_nodes)
1039 0 : elog(ERROR, "plan node %d not found", plan_node_id);
1040 :
1041 : /* Accumulate the statistics from all workers. */
1042 1026 : instrument = GetInstrumentationArray(instrumentation);
1043 1026 : instrument += i * instrumentation->num_workers;
1044 2706 : for (n = 0; n < instrumentation->num_workers; ++n)
1045 1680 : InstrAggNode(planstate->instrument, &instrument[n]);
1046 :
1047 : /*
1048 : * Also store the per-worker detail.
1049 : *
1050 : * Worker instrumentation should be allocated in the same context as the
1051 : * regular instrumentation information, which is the per-query context.
1052 : * Switch into per-query memory context.
1053 : */
1054 1026 : oldcontext = MemoryContextSwitchTo(planstate->state->es_query_cxt);
1055 1026 : ibytes = mul_size(instrumentation->num_workers, sizeof(Instrumentation));
1056 1026 : planstate->worker_instrument =
1057 1026 : palloc(ibytes + offsetof(WorkerInstrumentation, instrument));
1058 1026 : MemoryContextSwitchTo(oldcontext);
1059 :
1060 1026 : planstate->worker_instrument->num_workers = instrumentation->num_workers;
1061 1026 : memcpy(&planstate->worker_instrument->instrument, instrument, ibytes);
1062 :
1063 : /* Perform any node-type-specific work that needs to be done. */
1064 1026 : switch (nodeTag(planstate))
1065 : {
1066 12 : case T_SortState:
1067 12 : ExecSortRetrieveInstrumentation((SortState *) planstate);
1068 12 : break;
1069 0 : case T_IncrementalSortState:
1070 0 : ExecIncrementalSortRetrieveInstrumentation((IncrementalSortState *) planstate);
1071 0 : break;
1072 84 : case T_HashState:
1073 84 : ExecHashRetrieveInstrumentation((HashState *) planstate);
1074 84 : break;
1075 102 : case T_AggState:
1076 102 : ExecAggRetrieveInstrumentation((AggState *) planstate);
1077 102 : break;
1078 0 : case T_MemoizeState:
1079 0 : ExecMemoizeRetrieveInstrumentation((MemoizeState *) planstate);
1080 0 : break;
1081 0 : case T_BitmapHeapScanState:
1082 0 : ExecBitmapHeapRetrieveInstrumentation((BitmapHeapScanState *) planstate);
1083 0 : break;
1084 828 : default:
1085 828 : break;
1086 : }
1087 :
1088 1026 : return planstate_tree_walker(planstate, ExecParallelRetrieveInstrumentation,
1089 : instrumentation);
1090 : }
1091 :
1092 : /*
1093 : * Add up the workers' JIT instrumentation from dynamic shared memory.
1094 : */
1095 : static void
1096 24 : ExecParallelRetrieveJitInstrumentation(PlanState *planstate,
1097 : SharedJitInstrumentation *shared_jit)
1098 : {
1099 : JitInstrumentation *combined;
1100 : int ibytes;
1101 :
1102 : int n;
1103 :
1104 : /*
1105 : * Accumulate worker JIT instrumentation into the combined JIT
1106 : * instrumentation, allocating it if required.
1107 : */
1108 24 : if (!planstate->state->es_jit_worker_instr)
1109 24 : planstate->state->es_jit_worker_instr =
1110 24 : MemoryContextAllocZero(planstate->state->es_query_cxt, sizeof(JitInstrumentation));
1111 24 : combined = planstate->state->es_jit_worker_instr;
1112 :
1113 : /* Accumulate all the workers' instrumentations. */
1114 72 : for (n = 0; n < shared_jit->num_workers; ++n)
1115 48 : InstrJitAgg(combined, &shared_jit->jit_instr[n]);
1116 :
1117 : /*
1118 : * Store the per-worker detail.
1119 : *
1120 : * Similar to ExecParallelRetrieveInstrumentation(), allocate the
1121 : * instrumentation in per-query context.
1122 : */
1123 24 : ibytes = offsetof(SharedJitInstrumentation, jit_instr)
1124 24 : + mul_size(shared_jit->num_workers, sizeof(JitInstrumentation));
1125 24 : planstate->worker_jit_instrument =
1126 24 : MemoryContextAlloc(planstate->state->es_query_cxt, ibytes);
1127 :
1128 24 : memcpy(planstate->worker_jit_instrument, shared_jit, ibytes);
1129 24 : }
1130 :
1131 : /*
1132 : * Finish parallel execution. We wait for parallel workers to finish, and
1133 : * accumulate their buffer/WAL usage.
1134 : */
1135 : void
1136 1748 : ExecParallelFinish(ParallelExecutorInfo *pei)
1137 : {
1138 1748 : int nworkers = pei->pcxt->nworkers_launched;
1139 : int i;
1140 :
1141 : /* Make this be a no-op if called twice in a row. */
1142 1748 : if (pei->finished)
1143 790 : return;
1144 :
1145 : /*
1146 : * Detach from tuple queues ASAP, so that any still-active workers will
1147 : * notice that no further results are wanted.
1148 : */
1149 958 : if (pei->tqueue != NULL)
1150 : {
1151 3466 : for (i = 0; i < nworkers; i++)
1152 2508 : shm_mq_detach(pei->tqueue[i]);
1153 958 : pfree(pei->tqueue);
1154 958 : pei->tqueue = NULL;
1155 : }
1156 :
1157 : /*
1158 : * While we're waiting for the workers to finish, let's get rid of the
1159 : * tuple queue readers. (Any other local cleanup could be done here too.)
1160 : */
1161 958 : if (pei->reader != NULL)
1162 : {
1163 3448 : for (i = 0; i < nworkers; i++)
1164 2508 : DestroyTupleQueueReader(pei->reader[i]);
1165 940 : pfree(pei->reader);
1166 940 : pei->reader = NULL;
1167 : }
1168 :
1169 : /* Now wait for the workers to finish. */
1170 958 : WaitForParallelWorkersToFinish(pei->pcxt);
1171 :
1172 : /*
1173 : * Next, accumulate buffer/WAL usage. (This must wait for the workers to
1174 : * finish, or we might get incomplete data.)
1175 : */
1176 3466 : for (i = 0; i < nworkers; i++)
1177 2508 : InstrAccumParallelQuery(&pei->buffer_usage[i], &pei->wal_usage[i]);
1178 :
1179 958 : pei->finished = true;
1180 : }
1181 :
1182 : /*
1183 : * Accumulate instrumentation, and then clean up whatever ParallelExecutorInfo
1184 : * resources still exist after ExecParallelFinish. We separate these
1185 : * routines because someone might want to examine the contents of the DSM
1186 : * after ExecParallelFinish and before calling this routine.
1187 : */
1188 : void
1189 700 : ExecParallelCleanup(ParallelExecutorInfo *pei)
1190 : {
1191 : /* Accumulate instrumentation, if any. */
1192 700 : if (pei->instrumentation)
1193 180 : ExecParallelRetrieveInstrumentation(pei->planstate,
1194 : pei->instrumentation);
1195 :
1196 : /* Accumulate JIT instrumentation, if any. */
1197 700 : if (pei->jit_instrumentation)
1198 24 : ExecParallelRetrieveJitInstrumentation(pei->planstate,
1199 24 : pei->jit_instrumentation);
1200 :
1201 : /* Free any serialized parameters. */
1202 700 : if (DsaPointerIsValid(pei->param_exec))
1203 : {
1204 24 : dsa_free(pei->area, pei->param_exec);
1205 24 : pei->param_exec = InvalidDsaPointer;
1206 : }
1207 700 : if (pei->area != NULL)
1208 : {
1209 700 : dsa_detach(pei->area);
1210 700 : pei->area = NULL;
1211 : }
1212 700 : if (pei->pcxt != NULL)
1213 : {
1214 700 : DestroyParallelContext(pei->pcxt);
1215 700 : pei->pcxt = NULL;
1216 : }
1217 700 : pfree(pei);
1218 700 : }
1219 :
1220 : /*
1221 : * Create a DestReceiver to write tuples we produce to the shm_mq designated
1222 : * for that purpose.
1223 : */
1224 : static DestReceiver *
1225 2520 : ExecParallelGetReceiver(dsm_segment *seg, shm_toc *toc)
1226 : {
1227 : char *mqspace;
1228 : shm_mq *mq;
1229 :
1230 2520 : mqspace = shm_toc_lookup(toc, PARALLEL_KEY_TUPLE_QUEUE, false);
1231 2520 : mqspace += ParallelWorkerNumber * PARALLEL_TUPLE_QUEUE_SIZE;
1232 2520 : mq = (shm_mq *) mqspace;
1233 2520 : shm_mq_set_sender(mq, MyProc);
1234 2520 : return CreateTupleQueueDestReceiver(shm_mq_attach(mq, seg, NULL));
1235 : }
1236 :
1237 : /*
1238 : * Create a QueryDesc for the PlannedStmt we are to execute, and return it.
1239 : */
1240 : static QueryDesc *
1241 2520 : ExecParallelGetQueryDesc(shm_toc *toc, DestReceiver *receiver,
1242 : int instrument_options)
1243 : {
1244 : char *pstmtspace;
1245 : char *paramspace;
1246 : PlannedStmt *pstmt;
1247 : ParamListInfo paramLI;
1248 : char *queryString;
1249 :
1250 : /* Get the query string from shared memory */
1251 2520 : queryString = shm_toc_lookup(toc, PARALLEL_KEY_QUERY_TEXT, false);
1252 :
1253 : /* Reconstruct leader-supplied PlannedStmt. */
1254 2520 : pstmtspace = shm_toc_lookup(toc, PARALLEL_KEY_PLANNEDSTMT, false);
1255 2520 : pstmt = (PlannedStmt *) stringToNode(pstmtspace);
1256 :
1257 : /* Reconstruct ParamListInfo. */
1258 2520 : paramspace = shm_toc_lookup(toc, PARALLEL_KEY_PARAMLISTINFO, false);
1259 2520 : paramLI = RestoreParamList(¶mspace);
1260 :
1261 : /*
1262 : * Create a QueryDesc for the query. We pass NULL for cachedplan, because
1263 : * we don't have a pointer to the CachedPlan in the leader's process. It's
1264 : * fine because the only reason the executor needs to see it is to decide
1265 : * if it should take locks on certain relations, but parallel workers
1266 : * always take locks anyway.
1267 : */
1268 2520 : return CreateQueryDesc(pstmt,
1269 : NULL,
1270 : queryString,
1271 : GetActiveSnapshot(), InvalidSnapshot,
1272 : receiver, paramLI, NULL, instrument_options);
1273 : }
1274 :
1275 : /*
1276 : * Copy instrumentation information from this node and its descendants into
1277 : * dynamic shared memory, so that the parallel leader can retrieve it.
1278 : */
1279 : static bool
1280 2376 : ExecParallelReportInstrumentation(PlanState *planstate,
1281 : SharedExecutorInstrumentation *instrumentation)
1282 : {
1283 : int i;
1284 2376 : int plan_node_id = planstate->plan->plan_node_id;
1285 : Instrumentation *instrument;
1286 :
1287 2376 : InstrEndLoop(planstate->instrument);
1288 :
1289 : /*
1290 : * If we shuffled the plan_node_id values in ps_instrument into sorted
1291 : * order, we could use binary search here. This might matter someday if
1292 : * we're pushing down sufficiently large plan trees. For now, do it the
1293 : * slow, dumb way.
1294 : */
1295 7812 : for (i = 0; i < instrumentation->num_plan_nodes; ++i)
1296 7812 : if (instrumentation->plan_node_id[i] == plan_node_id)
1297 2376 : break;
1298 2376 : if (i >= instrumentation->num_plan_nodes)
1299 0 : elog(ERROR, "plan node %d not found", plan_node_id);
1300 :
1301 : /*
1302 : * Add our statistics to the per-node, per-worker totals. It's possible
1303 : * that this could happen more than once if we relaunched workers.
1304 : */
1305 2376 : instrument = GetInstrumentationArray(instrumentation);
1306 2376 : instrument += i * instrumentation->num_workers;
1307 : Assert(IsParallelWorker());
1308 : Assert(ParallelWorkerNumber < instrumentation->num_workers);
1309 2376 : InstrAggNode(&instrument[ParallelWorkerNumber], planstate->instrument);
1310 :
1311 2376 : return planstate_tree_walker(planstate, ExecParallelReportInstrumentation,
1312 : instrumentation);
1313 : }
1314 :
1315 : /*
1316 : * Initialize the PlanState and its descendants with the information
1317 : * retrieved from shared memory. This has to be done once the PlanState
1318 : * is allocated and initialized by executor; that is, after ExecutorStart().
1319 : */
1320 : static bool
1321 8160 : ExecParallelInitializeWorker(PlanState *planstate, ParallelWorkerContext *pwcxt)
1322 : {
1323 8160 : if (planstate == NULL)
1324 0 : return false;
1325 :
1326 8160 : switch (nodeTag(planstate))
1327 : {
1328 3310 : case T_SeqScanState:
1329 3310 : if (planstate->plan->parallel_aware)
1330 2682 : ExecSeqScanInitializeWorker((SeqScanState *) planstate, pwcxt);
1331 3310 : break;
1332 396 : case T_IndexScanState:
1333 396 : if (planstate->plan->parallel_aware)
1334 120 : ExecIndexScanInitializeWorker((IndexScanState *) planstate,
1335 : pwcxt);
1336 396 : break;
1337 242 : case T_IndexOnlyScanState:
1338 242 : if (planstate->plan->parallel_aware)
1339 206 : ExecIndexOnlyScanInitializeWorker((IndexOnlyScanState *) planstate,
1340 : pwcxt);
1341 242 : break;
1342 0 : case T_ForeignScanState:
1343 0 : if (planstate->plan->parallel_aware)
1344 0 : ExecForeignScanInitializeWorker((ForeignScanState *) planstate,
1345 : pwcxt);
1346 0 : break;
1347 378 : case T_AppendState:
1348 378 : if (planstate->plan->parallel_aware)
1349 318 : ExecAppendInitializeWorker((AppendState *) planstate, pwcxt);
1350 378 : break;
1351 0 : case T_CustomScanState:
1352 0 : if (planstate->plan->parallel_aware)
1353 0 : ExecCustomScanInitializeWorker((CustomScanState *) planstate,
1354 : pwcxt);
1355 0 : break;
1356 272 : case T_BitmapHeapScanState:
1357 272 : if (planstate->plan->parallel_aware)
1358 270 : ExecBitmapHeapInitializeWorker((BitmapHeapScanState *) planstate,
1359 : pwcxt);
1360 272 : break;
1361 548 : case T_HashJoinState:
1362 548 : if (planstate->plan->parallel_aware)
1363 308 : ExecHashJoinInitializeWorker((HashJoinState *) planstate,
1364 : pwcxt);
1365 548 : break;
1366 548 : case T_HashState:
1367 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
1368 548 : ExecHashInitializeWorker((HashState *) planstate, pwcxt);
1369 548 : break;
1370 452 : case T_SortState:
1371 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
1372 452 : ExecSortInitializeWorker((SortState *) planstate, pwcxt);
1373 452 : break;
1374 0 : case T_IncrementalSortState:
1375 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
1376 0 : ExecIncrementalSortInitializeWorker((IncrementalSortState *) planstate,
1377 : pwcxt);
1378 0 : break;
1379 1548 : case T_AggState:
1380 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
1381 1548 : ExecAggInitializeWorker((AggState *) planstate, pwcxt);
1382 1548 : break;
1383 12 : case T_MemoizeState:
1384 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
1385 12 : ExecMemoizeInitializeWorker((MemoizeState *) planstate, pwcxt);
1386 12 : break;
1387 454 : default:
1388 454 : break;
1389 : }
1390 :
1391 8160 : return planstate_tree_walker(planstate, ExecParallelInitializeWorker,
1392 : pwcxt);
1393 : }
1394 :
1395 : /*
1396 : * Main entrypoint for parallel query worker processes.
1397 : *
1398 : * We reach this function from ParallelWorkerMain, so the setup necessary to
1399 : * create a sensible parallel environment has already been done;
1400 : * ParallelWorkerMain worries about stuff like the transaction state, combo
1401 : * CID mappings, and GUC values, so we don't need to deal with any of that
1402 : * here.
1403 : *
1404 : * Our job is to deal with concerns specific to the executor. The parallel
1405 : * group leader will have stored a serialized PlannedStmt, and it's our job
1406 : * to execute that plan and write the resulting tuples to the appropriate
1407 : * tuple queue. Various bits of supporting information that we need in order
1408 : * to do this are also stored in the dsm_segment and can be accessed through
1409 : * the shm_toc.
1410 : */
1411 : void
1412 2520 : ParallelQueryMain(dsm_segment *seg, shm_toc *toc)
1413 : {
1414 : FixedParallelExecutorState *fpes;
1415 : BufferUsage *buffer_usage;
1416 : WalUsage *wal_usage;
1417 : DestReceiver *receiver;
1418 : QueryDesc *queryDesc;
1419 : SharedExecutorInstrumentation *instrumentation;
1420 : SharedJitInstrumentation *jit_instrumentation;
1421 2520 : int instrument_options = 0;
1422 : void *area_space;
1423 : dsa_area *area;
1424 : ParallelWorkerContext pwcxt;
1425 :
1426 : /* Get fixed-size state. */
1427 2520 : fpes = shm_toc_lookup(toc, PARALLEL_KEY_EXECUTOR_FIXED, false);
1428 :
1429 : /* Set up DestReceiver, SharedExecutorInstrumentation, and QueryDesc. */
1430 2520 : receiver = ExecParallelGetReceiver(seg, toc);
1431 2520 : instrumentation = shm_toc_lookup(toc, PARALLEL_KEY_INSTRUMENTATION, true);
1432 2520 : if (instrumentation != NULL)
1433 726 : instrument_options = instrumentation->instrument_options;
1434 2520 : jit_instrumentation = shm_toc_lookup(toc, PARALLEL_KEY_JIT_INSTRUMENTATION,
1435 : true);
1436 2520 : queryDesc = ExecParallelGetQueryDesc(toc, receiver, instrument_options);
1437 :
1438 : /* Setting debug_query_string for individual workers */
1439 2520 : debug_query_string = queryDesc->sourceText;
1440 :
1441 : /* Report workers' query for monitoring purposes */
1442 2520 : pgstat_report_activity(STATE_RUNNING, debug_query_string);
1443 :
1444 : /* Attach to the dynamic shared memory area. */
1445 2520 : area_space = shm_toc_lookup(toc, PARALLEL_KEY_DSA, false);
1446 2520 : area = dsa_attach_in_place(area_space, seg);
1447 :
1448 : /* Start up the executor */
1449 2520 : queryDesc->plannedstmt->jitFlags = fpes->jit_flags;
1450 2520 : if (!ExecutorStart(queryDesc, fpes->eflags))
1451 0 : elog(ERROR, "ExecutorStart() failed unexpectedly");
1452 :
1453 : /* Special executor initialization steps for parallel workers */
1454 2520 : queryDesc->planstate->state->es_query_dsa = area;
1455 2520 : if (DsaPointerIsValid(fpes->param_exec))
1456 : {
1457 : char *paramexec_space;
1458 :
1459 72 : paramexec_space = dsa_get_address(area, fpes->param_exec);
1460 72 : RestoreParamExecParams(paramexec_space, queryDesc->estate);
1461 : }
1462 2520 : pwcxt.toc = toc;
1463 2520 : pwcxt.seg = seg;
1464 2520 : ExecParallelInitializeWorker(queryDesc->planstate, &pwcxt);
1465 :
1466 : /* Pass down any tuple bound */
1467 2520 : ExecSetTupleBound(fpes->tuples_needed, queryDesc->planstate);
1468 :
1469 : /*
1470 : * Prepare to track buffer/WAL usage during query execution.
1471 : *
1472 : * We do this after starting up the executor to match what happens in the
1473 : * leader, which also doesn't count buffer accesses and WAL activity that
1474 : * occur during executor startup.
1475 : */
1476 2520 : InstrStartParallelQuery();
1477 :
1478 : /*
1479 : * Run the plan. If we specified a tuple bound, be careful not to demand
1480 : * more tuples than that.
1481 : */
1482 2520 : ExecutorRun(queryDesc,
1483 : ForwardScanDirection,
1484 2520 : fpes->tuples_needed < 0 ? (int64) 0 : fpes->tuples_needed);
1485 :
1486 : /* Shut down the executor */
1487 2508 : ExecutorFinish(queryDesc);
1488 :
1489 : /* Report buffer/WAL usage during parallel execution. */
1490 2508 : buffer_usage = shm_toc_lookup(toc, PARALLEL_KEY_BUFFER_USAGE, false);
1491 2508 : wal_usage = shm_toc_lookup(toc, PARALLEL_KEY_WAL_USAGE, false);
1492 2508 : InstrEndParallelQuery(&buffer_usage[ParallelWorkerNumber],
1493 2508 : &wal_usage[ParallelWorkerNumber]);
1494 :
1495 : /* Report instrumentation data if any instrumentation options are set. */
1496 2508 : if (instrumentation != NULL)
1497 726 : ExecParallelReportInstrumentation(queryDesc->planstate,
1498 : instrumentation);
1499 :
1500 : /* Report JIT instrumentation data if any */
1501 2508 : if (queryDesc->estate->es_jit && jit_instrumentation != NULL)
1502 : {
1503 : Assert(ParallelWorkerNumber < jit_instrumentation->num_workers);
1504 144 : jit_instrumentation->jit_instr[ParallelWorkerNumber] =
1505 144 : queryDesc->estate->es_jit->instr;
1506 : }
1507 :
1508 : /* Must do this after capturing instrumentation. */
1509 2508 : ExecutorEnd(queryDesc);
1510 :
1511 : /* Cleanup. */
1512 2508 : dsa_detach(area);
1513 2508 : FreeQueryDesc(queryDesc);
1514 2508 : receiver->rDestroy(receiver);
1515 2508 : }
|