Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * execParallel.c
4 : * Support routines for parallel execution.
5 : *
6 : * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : * This file contains routines that are intended to support setting up,
10 : * using, and tearing down a ParallelContext from within the PostgreSQL
11 : * executor. The ParallelContext machinery will handle starting the
12 : * workers and ensuring that their state generally matches that of the
13 : * leader; see src/backend/access/transam/README.parallel for details.
14 : * However, we must save and restore relevant executor state, such as
15 : * any ParamListInfo associated with the query, buffer/WAL usage info, and
16 : * the actual plan to be passed down to the worker.
17 : *
18 : * IDENTIFICATION
19 : * src/backend/executor/execParallel.c
20 : *
21 : *-------------------------------------------------------------------------
22 : */
23 :
24 : #include "postgres.h"
25 :
26 : #include "executor/execParallel.h"
27 : #include "executor/executor.h"
28 : #include "executor/nodeAgg.h"
29 : #include "executor/nodeAppend.h"
30 : #include "executor/nodeBitmapHeapscan.h"
31 : #include "executor/nodeCustom.h"
32 : #include "executor/nodeForeignscan.h"
33 : #include "executor/nodeHash.h"
34 : #include "executor/nodeHashjoin.h"
35 : #include "executor/nodeIncrementalSort.h"
36 : #include "executor/nodeIndexonlyscan.h"
37 : #include "executor/nodeIndexscan.h"
38 : #include "executor/nodeMemoize.h"
39 : #include "executor/nodeSeqscan.h"
40 : #include "executor/nodeSort.h"
41 : #include "executor/nodeSubplan.h"
42 : #include "executor/tqueue.h"
43 : #include "jit/jit.h"
44 : #include "nodes/nodeFuncs.h"
45 : #include "pgstat.h"
46 : #include "tcop/tcopprot.h"
47 : #include "utils/datum.h"
48 : #include "utils/dsa.h"
49 : #include "utils/lsyscache.h"
50 : #include "utils/snapmgr.h"
51 :
52 : /*
53 : * Magic numbers for parallel executor communication. We use constants
54 : * greater than any 32-bit integer here so that values < 2^32 can be used
55 : * by individual parallel nodes to store their own state.
56 : */
57 : #define PARALLEL_KEY_EXECUTOR_FIXED UINT64CONST(0xE000000000000001)
58 : #define PARALLEL_KEY_PLANNEDSTMT UINT64CONST(0xE000000000000002)
59 : #define PARALLEL_KEY_PARAMLISTINFO UINT64CONST(0xE000000000000003)
60 : #define PARALLEL_KEY_BUFFER_USAGE UINT64CONST(0xE000000000000004)
61 : #define PARALLEL_KEY_TUPLE_QUEUE UINT64CONST(0xE000000000000005)
62 : #define PARALLEL_KEY_INSTRUMENTATION UINT64CONST(0xE000000000000006)
63 : #define PARALLEL_KEY_DSA UINT64CONST(0xE000000000000007)
64 : #define PARALLEL_KEY_QUERY_TEXT UINT64CONST(0xE000000000000008)
65 : #define PARALLEL_KEY_JIT_INSTRUMENTATION UINT64CONST(0xE000000000000009)
66 : #define PARALLEL_KEY_WAL_USAGE UINT64CONST(0xE00000000000000A)
67 :
68 : #define PARALLEL_TUPLE_QUEUE_SIZE 65536
69 :
70 : /*
71 : * Fixed-size random stuff that we need to pass to parallel workers.
72 : */
73 : typedef struct FixedParallelExecutorState
74 : {
75 : int64 tuples_needed; /* tuple bound, see ExecSetTupleBound */
76 : dsa_pointer param_exec;
77 : int eflags;
78 : int jit_flags;
79 : } FixedParallelExecutorState;
80 :
81 : /*
82 : * DSM structure for accumulating per-PlanState instrumentation.
83 : *
84 : * instrument_options: Same meaning here as in instrument.c.
85 : *
86 : * instrument_offset: Offset, relative to the start of this structure,
87 : * of the first Instrumentation object. This will depend on the length of
88 : * the plan_node_id array.
89 : *
90 : * num_workers: Number of workers.
91 : *
92 : * num_plan_nodes: Number of plan nodes.
93 : *
94 : * plan_node_id: Array of plan nodes for which we are gathering instrumentation
95 : * from parallel workers. The length of this array is given by num_plan_nodes.
96 : */
97 : struct SharedExecutorInstrumentation
98 : {
99 : int instrument_options;
100 : int instrument_offset;
101 : int num_workers;
102 : int num_plan_nodes;
103 : int plan_node_id[FLEXIBLE_ARRAY_MEMBER];
104 : /* array of num_plan_nodes * num_workers Instrumentation objects follows */
105 : };
106 : #define GetInstrumentationArray(sei) \
107 : (AssertVariableIsOfTypeMacro(sei, SharedExecutorInstrumentation *), \
108 : (Instrumentation *) (((char *) sei) + sei->instrument_offset))
109 :
110 : /* Context object for ExecParallelEstimate. */
111 : typedef struct ExecParallelEstimateContext
112 : {
113 : ParallelContext *pcxt;
114 : int nnodes;
115 : } ExecParallelEstimateContext;
116 :
117 : /* Context object for ExecParallelInitializeDSM. */
118 : typedef struct ExecParallelInitializeDSMContext
119 : {
120 : ParallelContext *pcxt;
121 : SharedExecutorInstrumentation *instrumentation;
122 : int nnodes;
123 : } ExecParallelInitializeDSMContext;
124 :
125 : /* Helper functions that run in the parallel leader. */
126 : static char *ExecSerializePlan(Plan *plan, EState *estate);
127 : static bool ExecParallelEstimate(PlanState *planstate,
128 : ExecParallelEstimateContext *e);
129 : static bool ExecParallelInitializeDSM(PlanState *planstate,
130 : ExecParallelInitializeDSMContext *d);
131 : static shm_mq_handle **ExecParallelSetupTupleQueues(ParallelContext *pcxt,
132 : bool reinitialize);
133 : static bool ExecParallelReInitializeDSM(PlanState *planstate,
134 : ParallelContext *pcxt);
135 : static bool ExecParallelRetrieveInstrumentation(PlanState *planstate,
136 : SharedExecutorInstrumentation *instrumentation);
137 :
138 : /* Helper function that runs in the parallel worker. */
139 : static DestReceiver *ExecParallelGetReceiver(dsm_segment *seg, shm_toc *toc);
140 :
141 : /*
142 : * Create a serialized representation of the plan to be sent to each worker.
143 : */
144 : static char *
145 742 : ExecSerializePlan(Plan *plan, EState *estate)
146 : {
147 : PlannedStmt *pstmt;
148 : ListCell *lc;
149 :
150 : /* We can't scribble on the original plan, so make a copy. */
151 742 : plan = copyObject(plan);
152 :
153 : /*
154 : * The worker will start its own copy of the executor, and that copy will
155 : * insert a junk filter if the toplevel node has any resjunk entries. We
156 : * don't want that to happen, because while resjunk columns shouldn't be
157 : * sent back to the user, here the tuples are coming back to another
158 : * backend which may very well need them. So mutate the target list
159 : * accordingly. This is sort of a hack; there might be better ways to do
160 : * this...
161 : */
162 2014 : foreach(lc, plan->targetlist)
163 : {
164 1272 : TargetEntry *tle = lfirst_node(TargetEntry, lc);
165 :
166 1272 : tle->resjunk = false;
167 : }
168 :
169 : /*
170 : * Create a dummy PlannedStmt. Most of the fields don't need to be valid
171 : * for our purposes, but the worker will need at least a minimal
172 : * PlannedStmt to start the executor.
173 : */
174 742 : pstmt = makeNode(PlannedStmt);
175 742 : pstmt->commandType = CMD_SELECT;
176 742 : pstmt->queryId = pgstat_get_my_query_id();
177 742 : pstmt->hasReturning = false;
178 742 : pstmt->hasModifyingCTE = false;
179 742 : pstmt->canSetTag = true;
180 742 : pstmt->transientPlan = false;
181 742 : pstmt->dependsOnRole = false;
182 742 : pstmt->parallelModeNeeded = false;
183 742 : pstmt->planTree = plan;
184 742 : pstmt->rtable = estate->es_range_table;
185 742 : pstmt->permInfos = estate->es_rteperminfos;
186 742 : pstmt->resultRelations = NIL;
187 742 : pstmt->appendRelations = NIL;
188 :
189 : /*
190 : * Transfer only parallel-safe subplans, leaving a NULL "hole" in the list
191 : * for unsafe ones (so that the list indexes of the safe ones are
192 : * preserved). This positively ensures that the worker won't try to run,
193 : * or even do ExecInitNode on, an unsafe subplan. That's important to
194 : * protect, eg, non-parallel-aware FDWs from getting into trouble.
195 : */
196 742 : pstmt->subplans = NIL;
197 796 : foreach(lc, estate->es_plannedstmt->subplans)
198 : {
199 54 : Plan *subplan = (Plan *) lfirst(lc);
200 :
201 54 : if (subplan && !subplan->parallel_safe)
202 12 : subplan = NULL;
203 54 : pstmt->subplans = lappend(pstmt->subplans, subplan);
204 : }
205 :
206 742 : pstmt->rewindPlanIDs = NULL;
207 742 : pstmt->rowMarks = NIL;
208 742 : pstmt->relationOids = NIL;
209 742 : pstmt->invalItems = NIL; /* workers can't replan anyway... */
210 742 : pstmt->paramExecTypes = estate->es_plannedstmt->paramExecTypes;
211 742 : pstmt->utilityStmt = NULL;
212 742 : pstmt->stmt_location = -1;
213 742 : pstmt->stmt_len = -1;
214 :
215 : /* Return serialized copy of our dummy PlannedStmt. */
216 742 : return nodeToString(pstmt);
217 : }
218 :
219 : /*
220 : * Parallel-aware plan nodes (and occasionally others) may need some state
221 : * which is shared across all parallel workers. Before we size the DSM, give
222 : * them a chance to call shm_toc_estimate_chunk or shm_toc_estimate_keys on
223 : * &pcxt->estimator.
224 : *
225 : * While we're at it, count the number of PlanState nodes in the tree, so
226 : * we know how many Instrumentation structures we need.
227 : */
228 : static bool
229 2978 : ExecParallelEstimate(PlanState *planstate, ExecParallelEstimateContext *e)
230 : {
231 2978 : if (planstate == NULL)
232 0 : return false;
233 :
234 : /* Count this node. */
235 2978 : e->nnodes++;
236 :
237 2978 : switch (nodeTag(planstate))
238 : {
239 1138 : case T_SeqScanState:
240 1138 : if (planstate->plan->parallel_aware)
241 900 : ExecSeqScanEstimate((SeqScanState *) planstate,
242 : e->pcxt);
243 1138 : break;
244 294 : case T_IndexScanState:
245 294 : if (planstate->plan->parallel_aware)
246 18 : ExecIndexScanEstimate((IndexScanState *) planstate,
247 : e->pcxt);
248 294 : break;
249 52 : case T_IndexOnlyScanState:
250 52 : if (planstate->plan->parallel_aware)
251 40 : ExecIndexOnlyScanEstimate((IndexOnlyScanState *) planstate,
252 : e->pcxt);
253 52 : break;
254 0 : case T_ForeignScanState:
255 0 : if (planstate->plan->parallel_aware)
256 0 : ExecForeignScanEstimate((ForeignScanState *) planstate,
257 : e->pcxt);
258 0 : break;
259 186 : case T_AppendState:
260 186 : if (planstate->plan->parallel_aware)
261 138 : ExecAppendEstimate((AppendState *) planstate,
262 : e->pcxt);
263 186 : break;
264 0 : case T_CustomScanState:
265 0 : if (planstate->plan->parallel_aware)
266 0 : ExecCustomScanEstimate((CustomScanState *) planstate,
267 : e->pcxt);
268 0 : break;
269 20 : case T_BitmapHeapScanState:
270 20 : if (planstate->plan->parallel_aware)
271 18 : ExecBitmapHeapEstimate((BitmapHeapScanState *) planstate,
272 : e->pcxt);
273 20 : break;
274 192 : case T_HashJoinState:
275 192 : if (planstate->plan->parallel_aware)
276 120 : ExecHashJoinEstimate((HashJoinState *) planstate,
277 : e->pcxt);
278 192 : break;
279 192 : case T_HashState:
280 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
281 192 : ExecHashEstimate((HashState *) planstate, e->pcxt);
282 192 : break;
283 152 : case T_SortState:
284 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
285 152 : ExecSortEstimate((SortState *) planstate, e->pcxt);
286 152 : break;
287 0 : case T_IncrementalSortState:
288 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
289 0 : ExecIncrementalSortEstimate((IncrementalSortState *) planstate, e->pcxt);
290 0 : break;
291 554 : case T_AggState:
292 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
293 554 : ExecAggEstimate((AggState *) planstate, e->pcxt);
294 554 : break;
295 6 : case T_MemoizeState:
296 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
297 6 : ExecMemoizeEstimate((MemoizeState *) planstate, e->pcxt);
298 6 : break;
299 192 : default:
300 192 : break;
301 : }
302 :
303 2978 : return planstate_tree_walker(planstate, ExecParallelEstimate, e);
304 : }
305 :
306 : /*
307 : * Estimate the amount of space required to serialize the indicated parameters.
308 : */
309 : static Size
310 24 : EstimateParamExecSpace(EState *estate, Bitmapset *params)
311 : {
312 : int paramid;
313 24 : Size sz = sizeof(int);
314 :
315 24 : paramid = -1;
316 54 : while ((paramid = bms_next_member(params, paramid)) >= 0)
317 : {
318 : Oid typeOid;
319 : int16 typLen;
320 : bool typByVal;
321 : ParamExecData *prm;
322 :
323 30 : prm = &(estate->es_param_exec_vals[paramid]);
324 30 : typeOid = list_nth_oid(estate->es_plannedstmt->paramExecTypes,
325 : paramid);
326 :
327 30 : sz = add_size(sz, sizeof(int)); /* space for paramid */
328 :
329 : /* space for datum/isnull */
330 30 : if (OidIsValid(typeOid))
331 30 : get_typlenbyval(typeOid, &typLen, &typByVal);
332 : else
333 : {
334 : /* If no type OID, assume by-value, like copyParamList does. */
335 0 : typLen = sizeof(Datum);
336 0 : typByVal = true;
337 : }
338 30 : sz = add_size(sz,
339 30 : datumEstimateSpace(prm->value, prm->isnull,
340 : typByVal, typLen));
341 : }
342 24 : return sz;
343 : }
344 :
345 : /*
346 : * Serialize specified PARAM_EXEC parameters.
347 : *
348 : * We write the number of parameters first, as a 4-byte integer, and then
349 : * write details for each parameter in turn. The details for each parameter
350 : * consist of a 4-byte paramid (location of param in execution time internal
351 : * parameter array) and then the datum as serialized by datumSerialize().
352 : */
353 : static dsa_pointer
354 24 : SerializeParamExecParams(EState *estate, Bitmapset *params, dsa_area *area)
355 : {
356 : Size size;
357 : int nparams;
358 : int paramid;
359 : ParamExecData *prm;
360 : dsa_pointer handle;
361 : char *start_address;
362 :
363 : /* Allocate enough space for the current parameter values. */
364 24 : size = EstimateParamExecSpace(estate, params);
365 24 : handle = dsa_allocate(area, size);
366 24 : start_address = dsa_get_address(area, handle);
367 :
368 : /* First write the number of parameters as a 4-byte integer. */
369 24 : nparams = bms_num_members(params);
370 24 : memcpy(start_address, &nparams, sizeof(int));
371 24 : start_address += sizeof(int);
372 :
373 : /* Write details for each parameter in turn. */
374 24 : paramid = -1;
375 54 : while ((paramid = bms_next_member(params, paramid)) >= 0)
376 : {
377 : Oid typeOid;
378 : int16 typLen;
379 : bool typByVal;
380 :
381 30 : prm = &(estate->es_param_exec_vals[paramid]);
382 30 : typeOid = list_nth_oid(estate->es_plannedstmt->paramExecTypes,
383 : paramid);
384 :
385 : /* Write paramid. */
386 30 : memcpy(start_address, ¶mid, sizeof(int));
387 30 : start_address += sizeof(int);
388 :
389 : /* Write datum/isnull */
390 30 : if (OidIsValid(typeOid))
391 30 : get_typlenbyval(typeOid, &typLen, &typByVal);
392 : else
393 : {
394 : /* If no type OID, assume by-value, like copyParamList does. */
395 0 : typLen = sizeof(Datum);
396 0 : typByVal = true;
397 : }
398 30 : datumSerialize(prm->value, prm->isnull, typByVal, typLen,
399 : &start_address);
400 : }
401 :
402 24 : return handle;
403 : }
404 :
405 : /*
406 : * Restore specified PARAM_EXEC parameters.
407 : */
408 : static void
409 72 : RestoreParamExecParams(char *start_address, EState *estate)
410 : {
411 : int nparams;
412 : int i;
413 : int paramid;
414 :
415 72 : memcpy(&nparams, start_address, sizeof(int));
416 72 : start_address += sizeof(int);
417 :
418 156 : for (i = 0; i < nparams; i++)
419 : {
420 : ParamExecData *prm;
421 :
422 : /* Read paramid */
423 84 : memcpy(¶mid, start_address, sizeof(int));
424 84 : start_address += sizeof(int);
425 84 : prm = &(estate->es_param_exec_vals[paramid]);
426 :
427 : /* Read datum/isnull. */
428 84 : prm->value = datumRestore(&start_address, &prm->isnull);
429 84 : prm->execPlan = NULL;
430 : }
431 72 : }
432 :
433 : /*
434 : * Initialize the dynamic shared memory segment that will be used to control
435 : * parallel execution.
436 : */
437 : static bool
438 2978 : ExecParallelInitializeDSM(PlanState *planstate,
439 : ExecParallelInitializeDSMContext *d)
440 : {
441 2978 : if (planstate == NULL)
442 0 : return false;
443 :
444 : /* If instrumentation is enabled, initialize slot for this node. */
445 2978 : if (d->instrumentation != NULL)
446 1026 : d->instrumentation->plan_node_id[d->nnodes] =
447 1026 : planstate->plan->plan_node_id;
448 :
449 : /* Count this node. */
450 2978 : d->nnodes++;
451 :
452 : /*
453 : * Call initializers for DSM-using plan nodes.
454 : *
455 : * Most plan nodes won't do anything here, but plan nodes that allocated
456 : * DSM may need to initialize shared state in the DSM before parallel
457 : * workers are launched. They can allocate the space they previously
458 : * estimated using shm_toc_allocate, and add the keys they previously
459 : * estimated using shm_toc_insert, in each case targeting pcxt->toc.
460 : */
461 2978 : switch (nodeTag(planstate))
462 : {
463 1138 : case T_SeqScanState:
464 1138 : if (planstate->plan->parallel_aware)
465 900 : ExecSeqScanInitializeDSM((SeqScanState *) planstate,
466 : d->pcxt);
467 1138 : break;
468 294 : case T_IndexScanState:
469 294 : if (planstate->plan->parallel_aware)
470 18 : ExecIndexScanInitializeDSM((IndexScanState *) planstate,
471 : d->pcxt);
472 294 : break;
473 52 : case T_IndexOnlyScanState:
474 52 : if (planstate->plan->parallel_aware)
475 40 : ExecIndexOnlyScanInitializeDSM((IndexOnlyScanState *) planstate,
476 : d->pcxt);
477 52 : break;
478 0 : case T_ForeignScanState:
479 0 : if (planstate->plan->parallel_aware)
480 0 : ExecForeignScanInitializeDSM((ForeignScanState *) planstate,
481 : d->pcxt);
482 0 : break;
483 186 : case T_AppendState:
484 186 : if (planstate->plan->parallel_aware)
485 138 : ExecAppendInitializeDSM((AppendState *) planstate,
486 : d->pcxt);
487 186 : break;
488 0 : case T_CustomScanState:
489 0 : if (planstate->plan->parallel_aware)
490 0 : ExecCustomScanInitializeDSM((CustomScanState *) planstate,
491 : d->pcxt);
492 0 : break;
493 20 : case T_BitmapHeapScanState:
494 20 : if (planstate->plan->parallel_aware)
495 18 : ExecBitmapHeapInitializeDSM((BitmapHeapScanState *) planstate,
496 : d->pcxt);
497 20 : break;
498 192 : case T_HashJoinState:
499 192 : if (planstate->plan->parallel_aware)
500 120 : ExecHashJoinInitializeDSM((HashJoinState *) planstate,
501 : d->pcxt);
502 192 : break;
503 192 : case T_HashState:
504 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
505 192 : ExecHashInitializeDSM((HashState *) planstate, d->pcxt);
506 192 : break;
507 152 : case T_SortState:
508 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
509 152 : ExecSortInitializeDSM((SortState *) planstate, d->pcxt);
510 152 : break;
511 0 : case T_IncrementalSortState:
512 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
513 0 : ExecIncrementalSortInitializeDSM((IncrementalSortState *) planstate, d->pcxt);
514 0 : break;
515 554 : case T_AggState:
516 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
517 554 : ExecAggInitializeDSM((AggState *) planstate, d->pcxt);
518 554 : break;
519 6 : case T_MemoizeState:
520 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
521 6 : ExecMemoizeInitializeDSM((MemoizeState *) planstate, d->pcxt);
522 6 : break;
523 192 : default:
524 192 : break;
525 : }
526 :
527 2978 : return planstate_tree_walker(planstate, ExecParallelInitializeDSM, d);
528 : }
529 :
530 : /*
531 : * It sets up the response queues for backend workers to return tuples
532 : * to the main backend and start the workers.
533 : */
534 : static shm_mq_handle **
535 1000 : ExecParallelSetupTupleQueues(ParallelContext *pcxt, bool reinitialize)
536 : {
537 : shm_mq_handle **responseq;
538 : char *tqueuespace;
539 : int i;
540 :
541 : /* Skip this if no workers. */
542 1000 : if (pcxt->nworkers == 0)
543 36 : return NULL;
544 :
545 : /* Allocate memory for shared memory queue handles. */
546 : responseq = (shm_mq_handle **)
547 964 : palloc(pcxt->nworkers * sizeof(shm_mq_handle *));
548 :
549 : /*
550 : * If not reinitializing, allocate space from the DSM for the queues;
551 : * otherwise, find the already allocated space.
552 : */
553 964 : if (!reinitialize)
554 : tqueuespace =
555 706 : shm_toc_allocate(pcxt->toc,
556 : mul_size(PARALLEL_TUPLE_QUEUE_SIZE,
557 706 : pcxt->nworkers));
558 : else
559 258 : tqueuespace = shm_toc_lookup(pcxt->toc, PARALLEL_KEY_TUPLE_QUEUE, false);
560 :
561 : /* Create the queues, and become the receiver for each. */
562 3558 : for (i = 0; i < pcxt->nworkers; ++i)
563 : {
564 : shm_mq *mq;
565 :
566 2594 : mq = shm_mq_create(tqueuespace +
567 2594 : ((Size) i) * PARALLEL_TUPLE_QUEUE_SIZE,
568 : (Size) PARALLEL_TUPLE_QUEUE_SIZE);
569 :
570 2594 : shm_mq_set_receiver(mq, MyProc);
571 2594 : responseq[i] = shm_mq_attach(mq, pcxt->seg, NULL);
572 : }
573 :
574 : /* Add array of queues to shm_toc, so others can find it. */
575 964 : if (!reinitialize)
576 706 : shm_toc_insert(pcxt->toc, PARALLEL_KEY_TUPLE_QUEUE, tqueuespace);
577 :
578 : /* Return array of handles. */
579 964 : return responseq;
580 : }
581 :
582 : /*
583 : * Sets up the required infrastructure for backend workers to perform
584 : * execution and return results to the main backend.
585 : */
586 : ParallelExecutorInfo *
587 742 : ExecInitParallelPlan(PlanState *planstate, EState *estate,
588 : Bitmapset *sendParams, int nworkers,
589 : int64 tuples_needed)
590 : {
591 : ParallelExecutorInfo *pei;
592 : ParallelContext *pcxt;
593 : ExecParallelEstimateContext e;
594 : ExecParallelInitializeDSMContext d;
595 : FixedParallelExecutorState *fpes;
596 : char *pstmt_data;
597 : char *pstmt_space;
598 : char *paramlistinfo_space;
599 : BufferUsage *bufusage_space;
600 : WalUsage *walusage_space;
601 742 : SharedExecutorInstrumentation *instrumentation = NULL;
602 742 : SharedJitInstrumentation *jit_instrumentation = NULL;
603 : int pstmt_len;
604 : int paramlistinfo_len;
605 742 : int instrumentation_len = 0;
606 742 : int jit_instrumentation_len = 0;
607 742 : int instrument_offset = 0;
608 742 : Size dsa_minsize = dsa_minimum_size();
609 : char *query_string;
610 : int query_len;
611 :
612 : /*
613 : * Force any initplan outputs that we're going to pass to workers to be
614 : * evaluated, if they weren't already.
615 : *
616 : * For simplicity, we use the EState's per-output-tuple ExprContext here.
617 : * That risks intra-query memory leakage, since we might pass through here
618 : * many times before that ExprContext gets reset; but ExecSetParamPlan
619 : * doesn't normally leak any memory in the context (see its comments), so
620 : * it doesn't seem worth complicating this function's API to pass it a
621 : * shorter-lived ExprContext. This might need to change someday.
622 : */
623 742 : ExecSetParamPlanMulti(sendParams, GetPerTupleExprContext(estate));
624 :
625 : /* Allocate object for return value. */
626 742 : pei = palloc0(sizeof(ParallelExecutorInfo));
627 742 : pei->finished = false;
628 742 : pei->planstate = planstate;
629 :
630 : /* Fix up and serialize plan to be sent to workers. */
631 742 : pstmt_data = ExecSerializePlan(planstate->plan, estate);
632 :
633 : /* Create a parallel context. */
634 742 : pcxt = CreateParallelContext("postgres", "ParallelQueryMain", nworkers);
635 742 : pei->pcxt = pcxt;
636 :
637 : /*
638 : * Before telling the parallel context to create a dynamic shared memory
639 : * segment, we need to figure out how big it should be. Estimate space
640 : * for the various things we need to store.
641 : */
642 :
643 : /* Estimate space for fixed-size state. */
644 742 : shm_toc_estimate_chunk(&pcxt->estimator,
645 : sizeof(FixedParallelExecutorState));
646 742 : shm_toc_estimate_keys(&pcxt->estimator, 1);
647 :
648 : /* Estimate space for query text. */
649 742 : query_len = strlen(estate->es_sourceText);
650 742 : shm_toc_estimate_chunk(&pcxt->estimator, query_len + 1);
651 742 : shm_toc_estimate_keys(&pcxt->estimator, 1);
652 :
653 : /* Estimate space for serialized PlannedStmt. */
654 742 : pstmt_len = strlen(pstmt_data) + 1;
655 742 : shm_toc_estimate_chunk(&pcxt->estimator, pstmt_len);
656 742 : shm_toc_estimate_keys(&pcxt->estimator, 1);
657 :
658 : /* Estimate space for serialized ParamListInfo. */
659 742 : paramlistinfo_len = EstimateParamListSpace(estate->es_param_list_info);
660 742 : shm_toc_estimate_chunk(&pcxt->estimator, paramlistinfo_len);
661 742 : shm_toc_estimate_keys(&pcxt->estimator, 1);
662 :
663 : /*
664 : * Estimate space for BufferUsage.
665 : *
666 : * If EXPLAIN is not in use and there are no extensions loaded that care,
667 : * we could skip this. But we have no way of knowing whether anyone's
668 : * looking at pgBufferUsage, so do it unconditionally.
669 : */
670 742 : shm_toc_estimate_chunk(&pcxt->estimator,
671 : mul_size(sizeof(BufferUsage), pcxt->nworkers));
672 742 : shm_toc_estimate_keys(&pcxt->estimator, 1);
673 :
674 : /*
675 : * Same thing for WalUsage.
676 : */
677 742 : shm_toc_estimate_chunk(&pcxt->estimator,
678 : mul_size(sizeof(WalUsage), pcxt->nworkers));
679 742 : shm_toc_estimate_keys(&pcxt->estimator, 1);
680 :
681 : /* Estimate space for tuple queues. */
682 742 : shm_toc_estimate_chunk(&pcxt->estimator,
683 : mul_size(PARALLEL_TUPLE_QUEUE_SIZE, pcxt->nworkers));
684 742 : shm_toc_estimate_keys(&pcxt->estimator, 1);
685 :
686 : /*
687 : * Give parallel-aware nodes a chance to add to the estimates, and get a
688 : * count of how many PlanState nodes there are.
689 : */
690 742 : e.pcxt = pcxt;
691 742 : e.nnodes = 0;
692 742 : ExecParallelEstimate(planstate, &e);
693 :
694 : /* Estimate space for instrumentation, if required. */
695 742 : if (estate->es_instrument)
696 : {
697 180 : instrumentation_len =
698 : offsetof(SharedExecutorInstrumentation, plan_node_id) +
699 180 : sizeof(int) * e.nnodes;
700 180 : instrumentation_len = MAXALIGN(instrumentation_len);
701 180 : instrument_offset = instrumentation_len;
702 180 : instrumentation_len +=
703 180 : mul_size(sizeof(Instrumentation),
704 180 : mul_size(e.nnodes, nworkers));
705 180 : shm_toc_estimate_chunk(&pcxt->estimator, instrumentation_len);
706 180 : shm_toc_estimate_keys(&pcxt->estimator, 1);
707 :
708 : /* Estimate space for JIT instrumentation, if required. */
709 180 : if (estate->es_jit_flags != PGJIT_NONE)
710 : {
711 24 : jit_instrumentation_len =
712 24 : offsetof(SharedJitInstrumentation, jit_instr) +
713 : sizeof(JitInstrumentation) * nworkers;
714 24 : shm_toc_estimate_chunk(&pcxt->estimator, jit_instrumentation_len);
715 24 : shm_toc_estimate_keys(&pcxt->estimator, 1);
716 : }
717 : }
718 :
719 : /* Estimate space for DSA area. */
720 742 : shm_toc_estimate_chunk(&pcxt->estimator, dsa_minsize);
721 742 : shm_toc_estimate_keys(&pcxt->estimator, 1);
722 :
723 : /*
724 : * InitializeParallelDSM() passes the active snapshot to the parallel
725 : * worker, which uses it to set es_snapshot. Make sure we don't set
726 : * es_snapshot differently in the child.
727 : */
728 : Assert(GetActiveSnapshot() == estate->es_snapshot);
729 :
730 : /* Everyone's had a chance to ask for space, so now create the DSM. */
731 742 : InitializeParallelDSM(pcxt);
732 :
733 : /*
734 : * OK, now we have a dynamic shared memory segment, and it should be big
735 : * enough to store all of the data we estimated we would want to put into
736 : * it, plus whatever general stuff (not specifically executor-related) the
737 : * ParallelContext itself needs to store there. None of the space we
738 : * asked for has been allocated or initialized yet, though, so do that.
739 : */
740 :
741 : /* Store fixed-size state. */
742 742 : fpes = shm_toc_allocate(pcxt->toc, sizeof(FixedParallelExecutorState));
743 742 : fpes->tuples_needed = tuples_needed;
744 742 : fpes->param_exec = InvalidDsaPointer;
745 742 : fpes->eflags = estate->es_top_eflags;
746 742 : fpes->jit_flags = estate->es_jit_flags;
747 742 : shm_toc_insert(pcxt->toc, PARALLEL_KEY_EXECUTOR_FIXED, fpes);
748 :
749 : /* Store query string */
750 742 : query_string = shm_toc_allocate(pcxt->toc, query_len + 1);
751 742 : memcpy(query_string, estate->es_sourceText, query_len + 1);
752 742 : shm_toc_insert(pcxt->toc, PARALLEL_KEY_QUERY_TEXT, query_string);
753 :
754 : /* Store serialized PlannedStmt. */
755 742 : pstmt_space = shm_toc_allocate(pcxt->toc, pstmt_len);
756 742 : memcpy(pstmt_space, pstmt_data, pstmt_len);
757 742 : shm_toc_insert(pcxt->toc, PARALLEL_KEY_PLANNEDSTMT, pstmt_space);
758 :
759 : /* Store serialized ParamListInfo. */
760 742 : paramlistinfo_space = shm_toc_allocate(pcxt->toc, paramlistinfo_len);
761 742 : shm_toc_insert(pcxt->toc, PARALLEL_KEY_PARAMLISTINFO, paramlistinfo_space);
762 742 : SerializeParamList(estate->es_param_list_info, ¶mlistinfo_space);
763 :
764 : /* Allocate space for each worker's BufferUsage; no need to initialize. */
765 742 : bufusage_space = shm_toc_allocate(pcxt->toc,
766 742 : mul_size(sizeof(BufferUsage), pcxt->nworkers));
767 742 : shm_toc_insert(pcxt->toc, PARALLEL_KEY_BUFFER_USAGE, bufusage_space);
768 742 : pei->buffer_usage = bufusage_space;
769 :
770 : /* Same for WalUsage. */
771 742 : walusage_space = shm_toc_allocate(pcxt->toc,
772 742 : mul_size(sizeof(WalUsage), pcxt->nworkers));
773 742 : shm_toc_insert(pcxt->toc, PARALLEL_KEY_WAL_USAGE, walusage_space);
774 742 : pei->wal_usage = walusage_space;
775 :
776 : /* Set up the tuple queues that the workers will write into. */
777 742 : pei->tqueue = ExecParallelSetupTupleQueues(pcxt, false);
778 :
779 : /* We don't need the TupleQueueReaders yet, though. */
780 742 : pei->reader = NULL;
781 :
782 : /*
783 : * If instrumentation options were supplied, allocate space for the data.
784 : * It only gets partially initialized here; the rest happens during
785 : * ExecParallelInitializeDSM.
786 : */
787 742 : if (estate->es_instrument)
788 : {
789 : Instrumentation *instrument;
790 : int i;
791 :
792 180 : instrumentation = shm_toc_allocate(pcxt->toc, instrumentation_len);
793 180 : instrumentation->instrument_options = estate->es_instrument;
794 180 : instrumentation->instrument_offset = instrument_offset;
795 180 : instrumentation->num_workers = nworkers;
796 180 : instrumentation->num_plan_nodes = e.nnodes;
797 180 : instrument = GetInstrumentationArray(instrumentation);
798 1860 : for (i = 0; i < nworkers * e.nnodes; ++i)
799 1680 : InstrInit(&instrument[i], estate->es_instrument);
800 180 : shm_toc_insert(pcxt->toc, PARALLEL_KEY_INSTRUMENTATION,
801 : instrumentation);
802 180 : pei->instrumentation = instrumentation;
803 :
804 180 : if (estate->es_jit_flags != PGJIT_NONE)
805 : {
806 24 : jit_instrumentation = shm_toc_allocate(pcxt->toc,
807 : jit_instrumentation_len);
808 24 : jit_instrumentation->num_workers = nworkers;
809 24 : memset(jit_instrumentation->jit_instr, 0,
810 : sizeof(JitInstrumentation) * nworkers);
811 24 : shm_toc_insert(pcxt->toc, PARALLEL_KEY_JIT_INSTRUMENTATION,
812 : jit_instrumentation);
813 24 : pei->jit_instrumentation = jit_instrumentation;
814 : }
815 : }
816 :
817 : /*
818 : * Create a DSA area that can be used by the leader and all workers.
819 : * (However, if we failed to create a DSM and are using private memory
820 : * instead, then skip this.)
821 : */
822 742 : if (pcxt->seg != NULL)
823 : {
824 : char *area_space;
825 :
826 706 : area_space = shm_toc_allocate(pcxt->toc, dsa_minsize);
827 706 : shm_toc_insert(pcxt->toc, PARALLEL_KEY_DSA, area_space);
828 706 : pei->area = dsa_create_in_place(area_space, dsa_minsize,
829 : LWTRANCHE_PARALLEL_QUERY_DSA,
830 : pcxt->seg);
831 :
832 : /*
833 : * Serialize parameters, if any, using DSA storage. We don't dare use
834 : * the main parallel query DSM for this because we might relaunch
835 : * workers after the values have changed (and thus the amount of
836 : * storage required has changed).
837 : */
838 706 : if (!bms_is_empty(sendParams))
839 : {
840 24 : pei->param_exec = SerializeParamExecParams(estate, sendParams,
841 : pei->area);
842 24 : fpes->param_exec = pei->param_exec;
843 : }
844 : }
845 :
846 : /*
847 : * Give parallel-aware nodes a chance to initialize their shared data.
848 : * This also initializes the elements of instrumentation->ps_instrument,
849 : * if it exists.
850 : */
851 742 : d.pcxt = pcxt;
852 742 : d.instrumentation = instrumentation;
853 742 : d.nnodes = 0;
854 :
855 : /* Install our DSA area while initializing the plan. */
856 742 : estate->es_query_dsa = pei->area;
857 742 : ExecParallelInitializeDSM(planstate, &d);
858 742 : estate->es_query_dsa = NULL;
859 :
860 : /*
861 : * Make sure that the world hasn't shifted under our feet. This could
862 : * probably just be an Assert(), but let's be conservative for now.
863 : */
864 742 : if (e.nnodes != d.nnodes)
865 0 : elog(ERROR, "inconsistent count of PlanState nodes");
866 :
867 : /* OK, we're ready to rock and roll. */
868 742 : return pei;
869 : }
870 :
871 : /*
872 : * Set up tuple queue readers to read the results of a parallel subplan.
873 : *
874 : * This is separate from ExecInitParallelPlan() because we can launch the
875 : * worker processes and let them start doing something before we do this.
876 : */
877 : void
878 946 : ExecParallelCreateReaders(ParallelExecutorInfo *pei)
879 : {
880 946 : int nworkers = pei->pcxt->nworkers_launched;
881 : int i;
882 :
883 : Assert(pei->reader == NULL);
884 :
885 946 : if (nworkers > 0)
886 : {
887 946 : pei->reader = (TupleQueueReader **)
888 946 : palloc(nworkers * sizeof(TupleQueueReader *));
889 :
890 3460 : for (i = 0; i < nworkers; i++)
891 : {
892 2514 : shm_mq_set_handle(pei->tqueue[i],
893 2514 : pei->pcxt->worker[i].bgwhandle);
894 2514 : pei->reader[i] = CreateTupleQueueReader(pei->tqueue[i]);
895 : }
896 : }
897 946 : }
898 :
899 : /*
900 : * Re-initialize the parallel executor shared memory state before launching
901 : * a fresh batch of workers.
902 : */
903 : void
904 258 : ExecParallelReinitialize(PlanState *planstate,
905 : ParallelExecutorInfo *pei,
906 : Bitmapset *sendParams)
907 : {
908 258 : EState *estate = planstate->state;
909 : FixedParallelExecutorState *fpes;
910 :
911 : /* Old workers must already be shut down */
912 : Assert(pei->finished);
913 :
914 : /*
915 : * Force any initplan outputs that we're going to pass to workers to be
916 : * evaluated, if they weren't already (see comments in
917 : * ExecInitParallelPlan).
918 : */
919 258 : ExecSetParamPlanMulti(sendParams, GetPerTupleExprContext(estate));
920 :
921 258 : ReinitializeParallelDSM(pei->pcxt);
922 258 : pei->tqueue = ExecParallelSetupTupleQueues(pei->pcxt, true);
923 258 : pei->reader = NULL;
924 258 : pei->finished = false;
925 :
926 258 : fpes = shm_toc_lookup(pei->pcxt->toc, PARALLEL_KEY_EXECUTOR_FIXED, false);
927 :
928 : /* Free any serialized parameters from the last round. */
929 258 : if (DsaPointerIsValid(fpes->param_exec))
930 : {
931 0 : dsa_free(pei->area, fpes->param_exec);
932 0 : fpes->param_exec = InvalidDsaPointer;
933 : }
934 :
935 : /* Serialize current parameter values if required. */
936 258 : if (!bms_is_empty(sendParams))
937 : {
938 0 : pei->param_exec = SerializeParamExecParams(estate, sendParams,
939 : pei->area);
940 0 : fpes->param_exec = pei->param_exec;
941 : }
942 :
943 : /* Traverse plan tree and let each child node reset associated state. */
944 258 : estate->es_query_dsa = pei->area;
945 258 : ExecParallelReInitializeDSM(planstate, pei->pcxt);
946 258 : estate->es_query_dsa = NULL;
947 258 : }
948 :
949 : /*
950 : * Traverse plan tree to reinitialize per-node dynamic shared memory state
951 : */
952 : static bool
953 666 : ExecParallelReInitializeDSM(PlanState *planstate,
954 : ParallelContext *pcxt)
955 : {
956 666 : if (planstate == NULL)
957 0 : return false;
958 :
959 : /*
960 : * Call reinitializers for DSM-using plan nodes.
961 : */
962 666 : switch (nodeTag(planstate))
963 : {
964 276 : case T_SeqScanState:
965 276 : if (planstate->plan->parallel_aware)
966 228 : ExecSeqScanReInitializeDSM((SeqScanState *) planstate,
967 : pcxt);
968 276 : break;
969 12 : case T_IndexScanState:
970 12 : if (planstate->plan->parallel_aware)
971 12 : ExecIndexScanReInitializeDSM((IndexScanState *) planstate,
972 : pcxt);
973 12 : break;
974 12 : case T_IndexOnlyScanState:
975 12 : if (planstate->plan->parallel_aware)
976 12 : ExecIndexOnlyScanReInitializeDSM((IndexOnlyScanState *) planstate,
977 : pcxt);
978 12 : break;
979 0 : case T_ForeignScanState:
980 0 : if (planstate->plan->parallel_aware)
981 0 : ExecForeignScanReInitializeDSM((ForeignScanState *) planstate,
982 : pcxt);
983 0 : break;
984 0 : case T_AppendState:
985 0 : if (planstate->plan->parallel_aware)
986 0 : ExecAppendReInitializeDSM((AppendState *) planstate, pcxt);
987 0 : break;
988 0 : case T_CustomScanState:
989 0 : if (planstate->plan->parallel_aware)
990 0 : ExecCustomScanReInitializeDSM((CustomScanState *) planstate,
991 : pcxt);
992 0 : break;
993 54 : case T_BitmapHeapScanState:
994 54 : if (planstate->plan->parallel_aware)
995 54 : ExecBitmapHeapReInitializeDSM((BitmapHeapScanState *) planstate,
996 : pcxt);
997 54 : break;
998 96 : case T_HashJoinState:
999 96 : if (planstate->plan->parallel_aware)
1000 48 : ExecHashJoinReInitializeDSM((HashJoinState *) planstate,
1001 : pcxt);
1002 96 : break;
1003 126 : case T_HashState:
1004 : case T_SortState:
1005 : case T_IncrementalSortState:
1006 : case T_MemoizeState:
1007 : /* these nodes have DSM state, but no reinitialization is required */
1008 126 : break;
1009 :
1010 90 : default:
1011 90 : break;
1012 : }
1013 :
1014 666 : return planstate_tree_walker(planstate, ExecParallelReInitializeDSM, pcxt);
1015 : }
1016 :
1017 : /*
1018 : * Copy instrumentation information about this node and its descendants from
1019 : * dynamic shared memory.
1020 : */
1021 : static bool
1022 1026 : ExecParallelRetrieveInstrumentation(PlanState *planstate,
1023 : SharedExecutorInstrumentation *instrumentation)
1024 : {
1025 : Instrumentation *instrument;
1026 : int i;
1027 : int n;
1028 : int ibytes;
1029 1026 : int plan_node_id = planstate->plan->plan_node_id;
1030 : MemoryContext oldcontext;
1031 :
1032 : /* Find the instrumentation for this node. */
1033 4638 : for (i = 0; i < instrumentation->num_plan_nodes; ++i)
1034 4638 : if (instrumentation->plan_node_id[i] == plan_node_id)
1035 1026 : break;
1036 1026 : if (i >= instrumentation->num_plan_nodes)
1037 0 : elog(ERROR, "plan node %d not found", plan_node_id);
1038 :
1039 : /* Accumulate the statistics from all workers. */
1040 1026 : instrument = GetInstrumentationArray(instrumentation);
1041 1026 : instrument += i * instrumentation->num_workers;
1042 2706 : for (n = 0; n < instrumentation->num_workers; ++n)
1043 1680 : InstrAggNode(planstate->instrument, &instrument[n]);
1044 :
1045 : /*
1046 : * Also store the per-worker detail.
1047 : *
1048 : * Worker instrumentation should be allocated in the same context as the
1049 : * regular instrumentation information, which is the per-query context.
1050 : * Switch into per-query memory context.
1051 : */
1052 1026 : oldcontext = MemoryContextSwitchTo(planstate->state->es_query_cxt);
1053 1026 : ibytes = mul_size(instrumentation->num_workers, sizeof(Instrumentation));
1054 1026 : planstate->worker_instrument =
1055 1026 : palloc(ibytes + offsetof(WorkerInstrumentation, instrument));
1056 1026 : MemoryContextSwitchTo(oldcontext);
1057 :
1058 1026 : planstate->worker_instrument->num_workers = instrumentation->num_workers;
1059 1026 : memcpy(&planstate->worker_instrument->instrument, instrument, ibytes);
1060 :
1061 : /* Perform any node-type-specific work that needs to be done. */
1062 1026 : switch (nodeTag(planstate))
1063 : {
1064 12 : case T_SortState:
1065 12 : ExecSortRetrieveInstrumentation((SortState *) planstate);
1066 12 : break;
1067 0 : case T_IncrementalSortState:
1068 0 : ExecIncrementalSortRetrieveInstrumentation((IncrementalSortState *) planstate);
1069 0 : break;
1070 84 : case T_HashState:
1071 84 : ExecHashRetrieveInstrumentation((HashState *) planstate);
1072 84 : break;
1073 102 : case T_AggState:
1074 102 : ExecAggRetrieveInstrumentation((AggState *) planstate);
1075 102 : break;
1076 0 : case T_MemoizeState:
1077 0 : ExecMemoizeRetrieveInstrumentation((MemoizeState *) planstate);
1078 0 : break;
1079 0 : case T_BitmapHeapScanState:
1080 0 : ExecBitmapHeapRetrieveInstrumentation((BitmapHeapScanState *) planstate);
1081 0 : break;
1082 828 : default:
1083 828 : break;
1084 : }
1085 :
1086 1026 : return planstate_tree_walker(planstate, ExecParallelRetrieveInstrumentation,
1087 : instrumentation);
1088 : }
1089 :
1090 : /*
1091 : * Add up the workers' JIT instrumentation from dynamic shared memory.
1092 : */
1093 : static void
1094 24 : ExecParallelRetrieveJitInstrumentation(PlanState *planstate,
1095 : SharedJitInstrumentation *shared_jit)
1096 : {
1097 : JitInstrumentation *combined;
1098 : int ibytes;
1099 :
1100 : int n;
1101 :
1102 : /*
1103 : * Accumulate worker JIT instrumentation into the combined JIT
1104 : * instrumentation, allocating it if required.
1105 : */
1106 24 : if (!planstate->state->es_jit_worker_instr)
1107 24 : planstate->state->es_jit_worker_instr =
1108 24 : MemoryContextAllocZero(planstate->state->es_query_cxt, sizeof(JitInstrumentation));
1109 24 : combined = planstate->state->es_jit_worker_instr;
1110 :
1111 : /* Accumulate all the workers' instrumentations. */
1112 72 : for (n = 0; n < shared_jit->num_workers; ++n)
1113 48 : InstrJitAgg(combined, &shared_jit->jit_instr[n]);
1114 :
1115 : /*
1116 : * Store the per-worker detail.
1117 : *
1118 : * Similar to ExecParallelRetrieveInstrumentation(), allocate the
1119 : * instrumentation in per-query context.
1120 : */
1121 24 : ibytes = offsetof(SharedJitInstrumentation, jit_instr)
1122 24 : + mul_size(shared_jit->num_workers, sizeof(JitInstrumentation));
1123 24 : planstate->worker_jit_instrument =
1124 24 : MemoryContextAlloc(planstate->state->es_query_cxt, ibytes);
1125 :
1126 24 : memcpy(planstate->worker_jit_instrument, shared_jit, ibytes);
1127 24 : }
1128 :
1129 : /*
1130 : * Finish parallel execution. We wait for parallel workers to finish, and
1131 : * accumulate their buffer/WAL usage.
1132 : */
1133 : void
1134 1778 : ExecParallelFinish(ParallelExecutorInfo *pei)
1135 : {
1136 1778 : int nworkers = pei->pcxt->nworkers_launched;
1137 : int i;
1138 :
1139 : /* Make this be a no-op if called twice in a row. */
1140 1778 : if (pei->finished)
1141 790 : return;
1142 :
1143 : /*
1144 : * Detach from tuple queues ASAP, so that any still-active workers will
1145 : * notice that no further results are wanted.
1146 : */
1147 988 : if (pei->tqueue != NULL)
1148 : {
1149 3454 : for (i = 0; i < nworkers; i++)
1150 2502 : shm_mq_detach(pei->tqueue[i]);
1151 952 : pfree(pei->tqueue);
1152 952 : pei->tqueue = NULL;
1153 : }
1154 :
1155 : /*
1156 : * While we're waiting for the workers to finish, let's get rid of the
1157 : * tuple queue readers. (Any other local cleanup could be done here too.)
1158 : */
1159 988 : if (pei->reader != NULL)
1160 : {
1161 3436 : for (i = 0; i < nworkers; i++)
1162 2502 : DestroyTupleQueueReader(pei->reader[i]);
1163 934 : pfree(pei->reader);
1164 934 : pei->reader = NULL;
1165 : }
1166 :
1167 : /* Now wait for the workers to finish. */
1168 988 : WaitForParallelWorkersToFinish(pei->pcxt);
1169 :
1170 : /*
1171 : * Next, accumulate buffer/WAL usage. (This must wait for the workers to
1172 : * finish, or we might get incomplete data.)
1173 : */
1174 3490 : for (i = 0; i < nworkers; i++)
1175 2502 : InstrAccumParallelQuery(&pei->buffer_usage[i], &pei->wal_usage[i]);
1176 :
1177 988 : pei->finished = true;
1178 : }
1179 :
1180 : /*
1181 : * Accumulate instrumentation, and then clean up whatever ParallelExecutorInfo
1182 : * resources still exist after ExecParallelFinish. We separate these
1183 : * routines because someone might want to examine the contents of the DSM
1184 : * after ExecParallelFinish and before calling this routine.
1185 : */
1186 : void
1187 730 : ExecParallelCleanup(ParallelExecutorInfo *pei)
1188 : {
1189 : /* Accumulate instrumentation, if any. */
1190 730 : if (pei->instrumentation)
1191 180 : ExecParallelRetrieveInstrumentation(pei->planstate,
1192 : pei->instrumentation);
1193 :
1194 : /* Accumulate JIT instrumentation, if any. */
1195 730 : if (pei->jit_instrumentation)
1196 24 : ExecParallelRetrieveJitInstrumentation(pei->planstate,
1197 24 : pei->jit_instrumentation);
1198 :
1199 : /* Free any serialized parameters. */
1200 730 : if (DsaPointerIsValid(pei->param_exec))
1201 : {
1202 24 : dsa_free(pei->area, pei->param_exec);
1203 24 : pei->param_exec = InvalidDsaPointer;
1204 : }
1205 730 : if (pei->area != NULL)
1206 : {
1207 694 : dsa_detach(pei->area);
1208 694 : pei->area = NULL;
1209 : }
1210 730 : if (pei->pcxt != NULL)
1211 : {
1212 730 : DestroyParallelContext(pei->pcxt);
1213 730 : pei->pcxt = NULL;
1214 : }
1215 730 : pfree(pei);
1216 730 : }
1217 :
1218 : /*
1219 : * Create a DestReceiver to write tuples we produce to the shm_mq designated
1220 : * for that purpose.
1221 : */
1222 : static DestReceiver *
1223 2514 : ExecParallelGetReceiver(dsm_segment *seg, shm_toc *toc)
1224 : {
1225 : char *mqspace;
1226 : shm_mq *mq;
1227 :
1228 2514 : mqspace = shm_toc_lookup(toc, PARALLEL_KEY_TUPLE_QUEUE, false);
1229 2514 : mqspace += ParallelWorkerNumber * PARALLEL_TUPLE_QUEUE_SIZE;
1230 2514 : mq = (shm_mq *) mqspace;
1231 2514 : shm_mq_set_sender(mq, MyProc);
1232 2514 : return CreateTupleQueueDestReceiver(shm_mq_attach(mq, seg, NULL));
1233 : }
1234 :
1235 : /*
1236 : * Create a QueryDesc for the PlannedStmt we are to execute, and return it.
1237 : */
1238 : static QueryDesc *
1239 2514 : ExecParallelGetQueryDesc(shm_toc *toc, DestReceiver *receiver,
1240 : int instrument_options)
1241 : {
1242 : char *pstmtspace;
1243 : char *paramspace;
1244 : PlannedStmt *pstmt;
1245 : ParamListInfo paramLI;
1246 : char *queryString;
1247 :
1248 : /* Get the query string from shared memory */
1249 2514 : queryString = shm_toc_lookup(toc, PARALLEL_KEY_QUERY_TEXT, false);
1250 :
1251 : /* Reconstruct leader-supplied PlannedStmt. */
1252 2514 : pstmtspace = shm_toc_lookup(toc, PARALLEL_KEY_PLANNEDSTMT, false);
1253 2514 : pstmt = (PlannedStmt *) stringToNode(pstmtspace);
1254 :
1255 : /* Reconstruct ParamListInfo. */
1256 2514 : paramspace = shm_toc_lookup(toc, PARALLEL_KEY_PARAMLISTINFO, false);
1257 2514 : paramLI = RestoreParamList(¶mspace);
1258 :
1259 : /* Create a QueryDesc for the query. */
1260 2514 : return CreateQueryDesc(pstmt,
1261 : queryString,
1262 : GetActiveSnapshot(), InvalidSnapshot,
1263 : receiver, paramLI, NULL, instrument_options);
1264 : }
1265 :
1266 : /*
1267 : * Copy instrumentation information from this node and its descendants into
1268 : * dynamic shared memory, so that the parallel leader can retrieve it.
1269 : */
1270 : static bool
1271 2376 : ExecParallelReportInstrumentation(PlanState *planstate,
1272 : SharedExecutorInstrumentation *instrumentation)
1273 : {
1274 : int i;
1275 2376 : int plan_node_id = planstate->plan->plan_node_id;
1276 : Instrumentation *instrument;
1277 :
1278 2376 : InstrEndLoop(planstate->instrument);
1279 :
1280 : /*
1281 : * If we shuffled the plan_node_id values in ps_instrument into sorted
1282 : * order, we could use binary search here. This might matter someday if
1283 : * we're pushing down sufficiently large plan trees. For now, do it the
1284 : * slow, dumb way.
1285 : */
1286 7812 : for (i = 0; i < instrumentation->num_plan_nodes; ++i)
1287 7812 : if (instrumentation->plan_node_id[i] == plan_node_id)
1288 2376 : break;
1289 2376 : if (i >= instrumentation->num_plan_nodes)
1290 0 : elog(ERROR, "plan node %d not found", plan_node_id);
1291 :
1292 : /*
1293 : * Add our statistics to the per-node, per-worker totals. It's possible
1294 : * that this could happen more than once if we relaunched workers.
1295 : */
1296 2376 : instrument = GetInstrumentationArray(instrumentation);
1297 2376 : instrument += i * instrumentation->num_workers;
1298 : Assert(IsParallelWorker());
1299 : Assert(ParallelWorkerNumber < instrumentation->num_workers);
1300 2376 : InstrAggNode(&instrument[ParallelWorkerNumber], planstate->instrument);
1301 :
1302 2376 : return planstate_tree_walker(planstate, ExecParallelReportInstrumentation,
1303 : instrumentation);
1304 : }
1305 :
1306 : /*
1307 : * Initialize the PlanState and its descendants with the information
1308 : * retrieved from shared memory. This has to be done once the PlanState
1309 : * is allocated and initialized by executor; that is, after ExecutorStart().
1310 : */
1311 : static bool
1312 8148 : ExecParallelInitializeWorker(PlanState *planstate, ParallelWorkerContext *pwcxt)
1313 : {
1314 8148 : if (planstate == NULL)
1315 0 : return false;
1316 :
1317 8148 : switch (nodeTag(planstate))
1318 : {
1319 3310 : case T_SeqScanState:
1320 3310 : if (planstate->plan->parallel_aware)
1321 2682 : ExecSeqScanInitializeWorker((SeqScanState *) planstate, pwcxt);
1322 3310 : break;
1323 396 : case T_IndexScanState:
1324 396 : if (planstate->plan->parallel_aware)
1325 120 : ExecIndexScanInitializeWorker((IndexScanState *) planstate,
1326 : pwcxt);
1327 396 : break;
1328 236 : case T_IndexOnlyScanState:
1329 236 : if (planstate->plan->parallel_aware)
1330 200 : ExecIndexOnlyScanInitializeWorker((IndexOnlyScanState *) planstate,
1331 : pwcxt);
1332 236 : break;
1333 0 : case T_ForeignScanState:
1334 0 : if (planstate->plan->parallel_aware)
1335 0 : ExecForeignScanInitializeWorker((ForeignScanState *) planstate,
1336 : pwcxt);
1337 0 : break;
1338 378 : case T_AppendState:
1339 378 : if (planstate->plan->parallel_aware)
1340 318 : ExecAppendInitializeWorker((AppendState *) planstate, pwcxt);
1341 378 : break;
1342 0 : case T_CustomScanState:
1343 0 : if (planstate->plan->parallel_aware)
1344 0 : ExecCustomScanInitializeWorker((CustomScanState *) planstate,
1345 : pwcxt);
1346 0 : break;
1347 272 : case T_BitmapHeapScanState:
1348 272 : if (planstate->plan->parallel_aware)
1349 270 : ExecBitmapHeapInitializeWorker((BitmapHeapScanState *) planstate,
1350 : pwcxt);
1351 272 : break;
1352 548 : case T_HashJoinState:
1353 548 : if (planstate->plan->parallel_aware)
1354 308 : ExecHashJoinInitializeWorker((HashJoinState *) planstate,
1355 : pwcxt);
1356 548 : break;
1357 548 : case T_HashState:
1358 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
1359 548 : ExecHashInitializeWorker((HashState *) planstate, pwcxt);
1360 548 : break;
1361 452 : case T_SortState:
1362 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
1363 452 : ExecSortInitializeWorker((SortState *) planstate, pwcxt);
1364 452 : break;
1365 0 : case T_IncrementalSortState:
1366 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
1367 0 : ExecIncrementalSortInitializeWorker((IncrementalSortState *) planstate,
1368 : pwcxt);
1369 0 : break;
1370 1548 : case T_AggState:
1371 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
1372 1548 : ExecAggInitializeWorker((AggState *) planstate, pwcxt);
1373 1548 : break;
1374 12 : case T_MemoizeState:
1375 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
1376 12 : ExecMemoizeInitializeWorker((MemoizeState *) planstate, pwcxt);
1377 12 : break;
1378 448 : default:
1379 448 : break;
1380 : }
1381 :
1382 8148 : return planstate_tree_walker(planstate, ExecParallelInitializeWorker,
1383 : pwcxt);
1384 : }
1385 :
1386 : /*
1387 : * Main entrypoint for parallel query worker processes.
1388 : *
1389 : * We reach this function from ParallelWorkerMain, so the setup necessary to
1390 : * create a sensible parallel environment has already been done;
1391 : * ParallelWorkerMain worries about stuff like the transaction state, combo
1392 : * CID mappings, and GUC values, so we don't need to deal with any of that
1393 : * here.
1394 : *
1395 : * Our job is to deal with concerns specific to the executor. The parallel
1396 : * group leader will have stored a serialized PlannedStmt, and it's our job
1397 : * to execute that plan and write the resulting tuples to the appropriate
1398 : * tuple queue. Various bits of supporting information that we need in order
1399 : * to do this are also stored in the dsm_segment and can be accessed through
1400 : * the shm_toc.
1401 : */
1402 : void
1403 2514 : ParallelQueryMain(dsm_segment *seg, shm_toc *toc)
1404 : {
1405 : FixedParallelExecutorState *fpes;
1406 : BufferUsage *buffer_usage;
1407 : WalUsage *wal_usage;
1408 : DestReceiver *receiver;
1409 : QueryDesc *queryDesc;
1410 : SharedExecutorInstrumentation *instrumentation;
1411 : SharedJitInstrumentation *jit_instrumentation;
1412 2514 : int instrument_options = 0;
1413 : void *area_space;
1414 : dsa_area *area;
1415 : ParallelWorkerContext pwcxt;
1416 :
1417 : /* Get fixed-size state. */
1418 2514 : fpes = shm_toc_lookup(toc, PARALLEL_KEY_EXECUTOR_FIXED, false);
1419 :
1420 : /* Set up DestReceiver, SharedExecutorInstrumentation, and QueryDesc. */
1421 2514 : receiver = ExecParallelGetReceiver(seg, toc);
1422 2514 : instrumentation = shm_toc_lookup(toc, PARALLEL_KEY_INSTRUMENTATION, true);
1423 2514 : if (instrumentation != NULL)
1424 726 : instrument_options = instrumentation->instrument_options;
1425 2514 : jit_instrumentation = shm_toc_lookup(toc, PARALLEL_KEY_JIT_INSTRUMENTATION,
1426 : true);
1427 2514 : queryDesc = ExecParallelGetQueryDesc(toc, receiver, instrument_options);
1428 :
1429 : /* Setting debug_query_string for individual workers */
1430 2514 : debug_query_string = queryDesc->sourceText;
1431 :
1432 : /* Report workers' query for monitoring purposes */
1433 2514 : pgstat_report_activity(STATE_RUNNING, debug_query_string);
1434 :
1435 : /* Attach to the dynamic shared memory area. */
1436 2514 : area_space = shm_toc_lookup(toc, PARALLEL_KEY_DSA, false);
1437 2514 : area = dsa_attach_in_place(area_space, seg);
1438 :
1439 : /* Start up the executor */
1440 2514 : queryDesc->plannedstmt->jitFlags = fpes->jit_flags;
1441 2514 : ExecutorStart(queryDesc, fpes->eflags);
1442 :
1443 : /* Special executor initialization steps for parallel workers */
1444 2514 : queryDesc->planstate->state->es_query_dsa = area;
1445 2514 : if (DsaPointerIsValid(fpes->param_exec))
1446 : {
1447 : char *paramexec_space;
1448 :
1449 72 : paramexec_space = dsa_get_address(area, fpes->param_exec);
1450 72 : RestoreParamExecParams(paramexec_space, queryDesc->estate);
1451 : }
1452 2514 : pwcxt.toc = toc;
1453 2514 : pwcxt.seg = seg;
1454 2514 : ExecParallelInitializeWorker(queryDesc->planstate, &pwcxt);
1455 :
1456 : /* Pass down any tuple bound */
1457 2514 : ExecSetTupleBound(fpes->tuples_needed, queryDesc->planstate);
1458 :
1459 : /*
1460 : * Prepare to track buffer/WAL usage during query execution.
1461 : *
1462 : * We do this after starting up the executor to match what happens in the
1463 : * leader, which also doesn't count buffer accesses and WAL activity that
1464 : * occur during executor startup.
1465 : */
1466 2514 : InstrStartParallelQuery();
1467 :
1468 : /*
1469 : * Run the plan. If we specified a tuple bound, be careful not to demand
1470 : * more tuples than that.
1471 : */
1472 2514 : ExecutorRun(queryDesc,
1473 : ForwardScanDirection,
1474 2514 : fpes->tuples_needed < 0 ? (int64) 0 : fpes->tuples_needed,
1475 : true);
1476 :
1477 : /* Shut down the executor */
1478 2502 : ExecutorFinish(queryDesc);
1479 :
1480 : /* Report buffer/WAL usage during parallel execution. */
1481 2502 : buffer_usage = shm_toc_lookup(toc, PARALLEL_KEY_BUFFER_USAGE, false);
1482 2502 : wal_usage = shm_toc_lookup(toc, PARALLEL_KEY_WAL_USAGE, false);
1483 2502 : InstrEndParallelQuery(&buffer_usage[ParallelWorkerNumber],
1484 2502 : &wal_usage[ParallelWorkerNumber]);
1485 :
1486 : /* Report instrumentation data if any instrumentation options are set. */
1487 2502 : if (instrumentation != NULL)
1488 726 : ExecParallelReportInstrumentation(queryDesc->planstate,
1489 : instrumentation);
1490 :
1491 : /* Report JIT instrumentation data if any */
1492 2502 : if (queryDesc->estate->es_jit && jit_instrumentation != NULL)
1493 : {
1494 : Assert(ParallelWorkerNumber < jit_instrumentation->num_workers);
1495 144 : jit_instrumentation->jit_instr[ParallelWorkerNumber] =
1496 144 : queryDesc->estate->es_jit->instr;
1497 : }
1498 :
1499 : /* Must do this after capturing instrumentation. */
1500 2502 : ExecutorEnd(queryDesc);
1501 :
1502 : /* Cleanup. */
1503 2502 : dsa_detach(area);
1504 2502 : FreeQueryDesc(queryDesc);
1505 2502 : receiver->rDestroy(receiver);
1506 2502 : }
|