Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * execParallel.c
4 : * Support routines for parallel execution.
5 : *
6 : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : * This file contains routines that are intended to support setting up,
10 : * using, and tearing down a ParallelContext from within the PostgreSQL
11 : * executor. The ParallelContext machinery will handle starting the
12 : * workers and ensuring that their state generally matches that of the
13 : * leader; see src/backend/access/transam/README.parallel for details.
14 : * However, we must save and restore relevant executor state, such as
15 : * any ParamListInfo associated with the query, buffer/WAL usage info, and
16 : * the actual plan to be passed down to the worker.
17 : *
18 : * IDENTIFICATION
19 : * src/backend/executor/execParallel.c
20 : *
21 : *-------------------------------------------------------------------------
22 : */
23 :
24 : #include "postgres.h"
25 :
26 : #include "executor/execParallel.h"
27 : #include "executor/executor.h"
28 : #include "executor/nodeAgg.h"
29 : #include "executor/nodeAppend.h"
30 : #include "executor/nodeBitmapHeapscan.h"
31 : #include "executor/nodeBitmapIndexscan.h"
32 : #include "executor/nodeCustom.h"
33 : #include "executor/nodeForeignscan.h"
34 : #include "executor/nodeHash.h"
35 : #include "executor/nodeHashjoin.h"
36 : #include "executor/nodeIncrementalSort.h"
37 : #include "executor/nodeIndexonlyscan.h"
38 : #include "executor/nodeIndexscan.h"
39 : #include "executor/nodeMemoize.h"
40 : #include "executor/nodeSeqscan.h"
41 : #include "executor/nodeSort.h"
42 : #include "executor/nodeSubplan.h"
43 : #include "executor/tqueue.h"
44 : #include "jit/jit.h"
45 : #include "nodes/nodeFuncs.h"
46 : #include "pgstat.h"
47 : #include "tcop/tcopprot.h"
48 : #include "utils/datum.h"
49 : #include "utils/dsa.h"
50 : #include "utils/lsyscache.h"
51 : #include "utils/snapmgr.h"
52 :
53 : /*
54 : * Magic numbers for parallel executor communication. We use constants
55 : * greater than any 32-bit integer here so that values < 2^32 can be used
56 : * by individual parallel nodes to store their own state.
57 : */
58 : #define PARALLEL_KEY_EXECUTOR_FIXED UINT64CONST(0xE000000000000001)
59 : #define PARALLEL_KEY_PLANNEDSTMT UINT64CONST(0xE000000000000002)
60 : #define PARALLEL_KEY_PARAMLISTINFO UINT64CONST(0xE000000000000003)
61 : #define PARALLEL_KEY_BUFFER_USAGE UINT64CONST(0xE000000000000004)
62 : #define PARALLEL_KEY_TUPLE_QUEUE UINT64CONST(0xE000000000000005)
63 : #define PARALLEL_KEY_INSTRUMENTATION UINT64CONST(0xE000000000000006)
64 : #define PARALLEL_KEY_DSA UINT64CONST(0xE000000000000007)
65 : #define PARALLEL_KEY_QUERY_TEXT UINT64CONST(0xE000000000000008)
66 : #define PARALLEL_KEY_JIT_INSTRUMENTATION UINT64CONST(0xE000000000000009)
67 : #define PARALLEL_KEY_WAL_USAGE UINT64CONST(0xE00000000000000A)
68 :
69 : #define PARALLEL_TUPLE_QUEUE_SIZE 65536
70 :
71 : /*
72 : * Fixed-size random stuff that we need to pass to parallel workers.
73 : */
74 : typedef struct FixedParallelExecutorState
75 : {
76 : int64 tuples_needed; /* tuple bound, see ExecSetTupleBound */
77 : dsa_pointer param_exec;
78 : int eflags;
79 : int jit_flags;
80 : } FixedParallelExecutorState;
81 :
82 : /*
83 : * DSM structure for accumulating per-PlanState instrumentation.
84 : *
85 : * instrument_options: Same meaning here as in instrument.c.
86 : *
87 : * instrument_offset: Offset, relative to the start of this structure,
88 : * of the first Instrumentation object. This will depend on the length of
89 : * the plan_node_id array.
90 : *
91 : * num_workers: Number of workers.
92 : *
93 : * num_plan_nodes: Number of plan nodes.
94 : *
95 : * plan_node_id: Array of plan nodes for which we are gathering instrumentation
96 : * from parallel workers. The length of this array is given by num_plan_nodes.
97 : */
98 : struct SharedExecutorInstrumentation
99 : {
100 : int instrument_options;
101 : int instrument_offset;
102 : int num_workers;
103 : int num_plan_nodes;
104 : int plan_node_id[FLEXIBLE_ARRAY_MEMBER];
105 : /* array of num_plan_nodes * num_workers Instrumentation objects follows */
106 : };
107 : #define GetInstrumentationArray(sei) \
108 : (AssertVariableIsOfTypeMacro(sei, SharedExecutorInstrumentation *), \
109 : (Instrumentation *) (((char *) sei) + sei->instrument_offset))
110 :
111 : /* Context object for ExecParallelEstimate. */
112 : typedef struct ExecParallelEstimateContext
113 : {
114 : ParallelContext *pcxt;
115 : int nnodes;
116 : } ExecParallelEstimateContext;
117 :
118 : /* Context object for ExecParallelInitializeDSM. */
119 : typedef struct ExecParallelInitializeDSMContext
120 : {
121 : ParallelContext *pcxt;
122 : SharedExecutorInstrumentation *instrumentation;
123 : int nnodes;
124 : } ExecParallelInitializeDSMContext;
125 :
126 : /* Helper functions that run in the parallel leader. */
127 : static char *ExecSerializePlan(Plan *plan, EState *estate);
128 : static bool ExecParallelEstimate(PlanState *planstate,
129 : ExecParallelEstimateContext *e);
130 : static bool ExecParallelInitializeDSM(PlanState *planstate,
131 : ExecParallelInitializeDSMContext *d);
132 : static shm_mq_handle **ExecParallelSetupTupleQueues(ParallelContext *pcxt,
133 : bool reinitialize);
134 : static bool ExecParallelReInitializeDSM(PlanState *planstate,
135 : ParallelContext *pcxt);
136 : static bool ExecParallelRetrieveInstrumentation(PlanState *planstate,
137 : SharedExecutorInstrumentation *instrumentation);
138 :
139 : /* Helper function that runs in the parallel worker. */
140 : static DestReceiver *ExecParallelGetReceiver(dsm_segment *seg, shm_toc *toc);
141 :
142 : /*
143 : * Create a serialized representation of the plan to be sent to each worker.
144 : */
145 : static char *
146 718 : ExecSerializePlan(Plan *plan, EState *estate)
147 : {
148 : PlannedStmt *pstmt;
149 : ListCell *lc;
150 :
151 : /* We can't scribble on the original plan, so make a copy. */
152 718 : plan = copyObject(plan);
153 :
154 : /*
155 : * The worker will start its own copy of the executor, and that copy will
156 : * insert a junk filter if the toplevel node has any resjunk entries. We
157 : * don't want that to happen, because while resjunk columns shouldn't be
158 : * sent back to the user, here the tuples are coming back to another
159 : * backend which may very well need them. So mutate the target list
160 : * accordingly. This is sort of a hack; there might be better ways to do
161 : * this...
162 : */
163 1978 : foreach(lc, plan->targetlist)
164 : {
165 1260 : TargetEntry *tle = lfirst_node(TargetEntry, lc);
166 :
167 1260 : tle->resjunk = false;
168 : }
169 :
170 : /*
171 : * Create a dummy PlannedStmt. Most of the fields don't need to be valid
172 : * for our purposes, but the worker will need at least a minimal
173 : * PlannedStmt to start the executor.
174 : */
175 718 : pstmt = makeNode(PlannedStmt);
176 718 : pstmt->commandType = CMD_SELECT;
177 718 : pstmt->queryId = pgstat_get_my_query_id();
178 718 : pstmt->planId = pgstat_get_my_plan_id();
179 718 : pstmt->hasReturning = false;
180 718 : pstmt->hasModifyingCTE = false;
181 718 : pstmt->canSetTag = true;
182 718 : pstmt->transientPlan = false;
183 718 : pstmt->dependsOnRole = false;
184 718 : pstmt->parallelModeNeeded = false;
185 718 : pstmt->planTree = plan;
186 718 : pstmt->partPruneInfos = estate->es_part_prune_infos;
187 718 : pstmt->rtable = estate->es_range_table;
188 718 : pstmt->unprunableRelids = estate->es_unpruned_relids;
189 718 : pstmt->permInfos = estate->es_rteperminfos;
190 718 : pstmt->resultRelations = NIL;
191 718 : pstmt->appendRelations = NIL;
192 718 : pstmt->cached_plan_type = PLAN_CACHE_NONE;
193 :
194 : /*
195 : * Transfer only parallel-safe subplans, leaving a NULL "hole" in the list
196 : * for unsafe ones (so that the list indexes of the safe ones are
197 : * preserved). This positively ensures that the worker won't try to run,
198 : * or even do ExecInitNode on, an unsafe subplan. That's important to
199 : * protect, eg, non-parallel-aware FDWs from getting into trouble.
200 : */
201 718 : pstmt->subplans = NIL;
202 772 : foreach(lc, estate->es_plannedstmt->subplans)
203 : {
204 54 : Plan *subplan = (Plan *) lfirst(lc);
205 :
206 54 : if (subplan && !subplan->parallel_safe)
207 12 : subplan = NULL;
208 54 : pstmt->subplans = lappend(pstmt->subplans, subplan);
209 : }
210 :
211 718 : pstmt->rewindPlanIDs = NULL;
212 718 : pstmt->rowMarks = NIL;
213 718 : pstmt->relationOids = NIL;
214 718 : pstmt->invalItems = NIL; /* workers can't replan anyway... */
215 718 : pstmt->paramExecTypes = estate->es_plannedstmt->paramExecTypes;
216 718 : pstmt->utilityStmt = NULL;
217 718 : pstmt->stmt_location = -1;
218 718 : pstmt->stmt_len = -1;
219 :
220 : /* Return serialized copy of our dummy PlannedStmt. */
221 718 : return nodeToString(pstmt);
222 : }
223 :
224 : /*
225 : * Parallel-aware plan nodes (and occasionally others) may need some state
226 : * which is shared across all parallel workers. Before we size the DSM, give
227 : * them a chance to call shm_toc_estimate_chunk or shm_toc_estimate_keys on
228 : * &pcxt->estimator.
229 : *
230 : * While we're at it, count the number of PlanState nodes in the tree, so
231 : * we know how many Instrumentation structures we need.
232 : */
233 : static bool
234 2960 : ExecParallelEstimate(PlanState *planstate, ExecParallelEstimateContext *e)
235 : {
236 2960 : if (planstate == NULL)
237 0 : return false;
238 :
239 : /* Count this node. */
240 2960 : e->nnodes++;
241 :
242 2960 : switch (nodeTag(planstate))
243 : {
244 1144 : case T_SeqScanState:
245 1144 : if (planstate->plan->parallel_aware)
246 906 : ExecSeqScanEstimate((SeqScanState *) planstate,
247 : e->pcxt);
248 1144 : break;
249 294 : case T_IndexScanState:
250 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
251 294 : ExecIndexScanEstimate((IndexScanState *) planstate,
252 : e->pcxt);
253 294 : break;
254 58 : case T_IndexOnlyScanState:
255 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
256 58 : ExecIndexOnlyScanEstimate((IndexOnlyScanState *) planstate,
257 : e->pcxt);
258 58 : break;
259 20 : case T_BitmapIndexScanState:
260 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
261 20 : ExecBitmapIndexScanEstimate((BitmapIndexScanState *) planstate,
262 : e->pcxt);
263 20 : break;
264 0 : case T_ForeignScanState:
265 0 : if (planstate->plan->parallel_aware)
266 0 : ExecForeignScanEstimate((ForeignScanState *) planstate,
267 : e->pcxt);
268 0 : break;
269 186 : case T_AppendState:
270 186 : if (planstate->plan->parallel_aware)
271 138 : ExecAppendEstimate((AppendState *) planstate,
272 : e->pcxt);
273 186 : break;
274 0 : case T_CustomScanState:
275 0 : if (planstate->plan->parallel_aware)
276 0 : ExecCustomScanEstimate((CustomScanState *) planstate,
277 : e->pcxt);
278 0 : break;
279 20 : case T_BitmapHeapScanState:
280 20 : if (planstate->plan->parallel_aware)
281 18 : ExecBitmapHeapEstimate((BitmapHeapScanState *) planstate,
282 : e->pcxt);
283 20 : break;
284 192 : case T_HashJoinState:
285 192 : if (planstate->plan->parallel_aware)
286 120 : ExecHashJoinEstimate((HashJoinState *) planstate,
287 : e->pcxt);
288 192 : break;
289 192 : case T_HashState:
290 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
291 192 : ExecHashEstimate((HashState *) planstate, e->pcxt);
292 192 : break;
293 152 : case T_SortState:
294 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
295 152 : ExecSortEstimate((SortState *) planstate, e->pcxt);
296 152 : break;
297 0 : case T_IncrementalSortState:
298 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
299 0 : ExecIncrementalSortEstimate((IncrementalSortState *) planstate, e->pcxt);
300 0 : break;
301 554 : case T_AggState:
302 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
303 554 : ExecAggEstimate((AggState *) planstate, e->pcxt);
304 554 : break;
305 6 : case T_MemoizeState:
306 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
307 6 : ExecMemoizeEstimate((MemoizeState *) planstate, e->pcxt);
308 6 : break;
309 142 : default:
310 142 : break;
311 : }
312 :
313 2960 : return planstate_tree_walker(planstate, ExecParallelEstimate, e);
314 : }
315 :
316 : /*
317 : * Estimate the amount of space required to serialize the indicated parameters.
318 : */
319 : static Size
320 24 : EstimateParamExecSpace(EState *estate, Bitmapset *params)
321 : {
322 : int paramid;
323 24 : Size sz = sizeof(int);
324 :
325 24 : paramid = -1;
326 54 : while ((paramid = bms_next_member(params, paramid)) >= 0)
327 : {
328 : Oid typeOid;
329 : int16 typLen;
330 : bool typByVal;
331 : ParamExecData *prm;
332 :
333 30 : prm = &(estate->es_param_exec_vals[paramid]);
334 30 : typeOid = list_nth_oid(estate->es_plannedstmt->paramExecTypes,
335 : paramid);
336 :
337 30 : sz = add_size(sz, sizeof(int)); /* space for paramid */
338 :
339 : /* space for datum/isnull */
340 30 : if (OidIsValid(typeOid))
341 30 : get_typlenbyval(typeOid, &typLen, &typByVal);
342 : else
343 : {
344 : /* If no type OID, assume by-value, like copyParamList does. */
345 0 : typLen = sizeof(Datum);
346 0 : typByVal = true;
347 : }
348 30 : sz = add_size(sz,
349 30 : datumEstimateSpace(prm->value, prm->isnull,
350 : typByVal, typLen));
351 : }
352 24 : return sz;
353 : }
354 :
355 : /*
356 : * Serialize specified PARAM_EXEC parameters.
357 : *
358 : * We write the number of parameters first, as a 4-byte integer, and then
359 : * write details for each parameter in turn. The details for each parameter
360 : * consist of a 4-byte paramid (location of param in execution time internal
361 : * parameter array) and then the datum as serialized by datumSerialize().
362 : */
363 : static dsa_pointer
364 24 : SerializeParamExecParams(EState *estate, Bitmapset *params, dsa_area *area)
365 : {
366 : Size size;
367 : int nparams;
368 : int paramid;
369 : ParamExecData *prm;
370 : dsa_pointer handle;
371 : char *start_address;
372 :
373 : /* Allocate enough space for the current parameter values. */
374 24 : size = EstimateParamExecSpace(estate, params);
375 24 : handle = dsa_allocate(area, size);
376 24 : start_address = dsa_get_address(area, handle);
377 :
378 : /* First write the number of parameters as a 4-byte integer. */
379 24 : nparams = bms_num_members(params);
380 24 : memcpy(start_address, &nparams, sizeof(int));
381 24 : start_address += sizeof(int);
382 :
383 : /* Write details for each parameter in turn. */
384 24 : paramid = -1;
385 54 : while ((paramid = bms_next_member(params, paramid)) >= 0)
386 : {
387 : Oid typeOid;
388 : int16 typLen;
389 : bool typByVal;
390 :
391 30 : prm = &(estate->es_param_exec_vals[paramid]);
392 30 : typeOid = list_nth_oid(estate->es_plannedstmt->paramExecTypes,
393 : paramid);
394 :
395 : /* Write paramid. */
396 30 : memcpy(start_address, ¶mid, sizeof(int));
397 30 : start_address += sizeof(int);
398 :
399 : /* Write datum/isnull */
400 30 : if (OidIsValid(typeOid))
401 30 : get_typlenbyval(typeOid, &typLen, &typByVal);
402 : else
403 : {
404 : /* If no type OID, assume by-value, like copyParamList does. */
405 0 : typLen = sizeof(Datum);
406 0 : typByVal = true;
407 : }
408 30 : datumSerialize(prm->value, prm->isnull, typByVal, typLen,
409 : &start_address);
410 : }
411 :
412 24 : return handle;
413 : }
414 :
415 : /*
416 : * Restore specified PARAM_EXEC parameters.
417 : */
418 : static void
419 70 : RestoreParamExecParams(char *start_address, EState *estate)
420 : {
421 : int nparams;
422 : int i;
423 : int paramid;
424 :
425 70 : memcpy(&nparams, start_address, sizeof(int));
426 70 : start_address += sizeof(int);
427 :
428 150 : for (i = 0; i < nparams; i++)
429 : {
430 : ParamExecData *prm;
431 :
432 : /* Read paramid */
433 80 : memcpy(¶mid, start_address, sizeof(int));
434 80 : start_address += sizeof(int);
435 80 : prm = &(estate->es_param_exec_vals[paramid]);
436 :
437 : /* Read datum/isnull. */
438 80 : prm->value = datumRestore(&start_address, &prm->isnull);
439 80 : prm->execPlan = NULL;
440 : }
441 70 : }
442 :
443 : /*
444 : * Initialize the dynamic shared memory segment that will be used to control
445 : * parallel execution.
446 : */
447 : static bool
448 2960 : ExecParallelInitializeDSM(PlanState *planstate,
449 : ExecParallelInitializeDSMContext *d)
450 : {
451 2960 : if (planstate == NULL)
452 0 : return false;
453 :
454 : /* If instrumentation is enabled, initialize slot for this node. */
455 2960 : if (d->instrumentation != NULL)
456 1026 : d->instrumentation->plan_node_id[d->nnodes] =
457 1026 : planstate->plan->plan_node_id;
458 :
459 : /* Count this node. */
460 2960 : d->nnodes++;
461 :
462 : /*
463 : * Call initializers for DSM-using plan nodes.
464 : *
465 : * Most plan nodes won't do anything here, but plan nodes that allocated
466 : * DSM may need to initialize shared state in the DSM before parallel
467 : * workers are launched. They can allocate the space they previously
468 : * estimated using shm_toc_allocate, and add the keys they previously
469 : * estimated using shm_toc_insert, in each case targeting pcxt->toc.
470 : */
471 2960 : switch (nodeTag(planstate))
472 : {
473 1144 : case T_SeqScanState:
474 1144 : if (planstate->plan->parallel_aware)
475 906 : ExecSeqScanInitializeDSM((SeqScanState *) planstate,
476 : d->pcxt);
477 1144 : break;
478 294 : case T_IndexScanState:
479 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
480 294 : ExecIndexScanInitializeDSM((IndexScanState *) planstate, d->pcxt);
481 294 : break;
482 58 : case T_IndexOnlyScanState:
483 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
484 58 : ExecIndexOnlyScanInitializeDSM((IndexOnlyScanState *) planstate,
485 : d->pcxt);
486 58 : break;
487 20 : case T_BitmapIndexScanState:
488 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
489 20 : ExecBitmapIndexScanInitializeDSM((BitmapIndexScanState *) planstate, d->pcxt);
490 20 : break;
491 0 : case T_ForeignScanState:
492 0 : if (planstate->plan->parallel_aware)
493 0 : ExecForeignScanInitializeDSM((ForeignScanState *) planstate,
494 : d->pcxt);
495 0 : break;
496 186 : case T_AppendState:
497 186 : if (planstate->plan->parallel_aware)
498 138 : ExecAppendInitializeDSM((AppendState *) planstate,
499 : d->pcxt);
500 186 : break;
501 0 : case T_CustomScanState:
502 0 : if (planstate->plan->parallel_aware)
503 0 : ExecCustomScanInitializeDSM((CustomScanState *) planstate,
504 : d->pcxt);
505 0 : break;
506 20 : case T_BitmapHeapScanState:
507 20 : if (planstate->plan->parallel_aware)
508 18 : ExecBitmapHeapInitializeDSM((BitmapHeapScanState *) planstate,
509 : d->pcxt);
510 20 : break;
511 192 : case T_HashJoinState:
512 192 : if (planstate->plan->parallel_aware)
513 120 : ExecHashJoinInitializeDSM((HashJoinState *) planstate,
514 : d->pcxt);
515 192 : break;
516 192 : case T_HashState:
517 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
518 192 : ExecHashInitializeDSM((HashState *) planstate, d->pcxt);
519 192 : break;
520 152 : case T_SortState:
521 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
522 152 : ExecSortInitializeDSM((SortState *) planstate, d->pcxt);
523 152 : break;
524 0 : case T_IncrementalSortState:
525 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
526 0 : ExecIncrementalSortInitializeDSM((IncrementalSortState *) planstate, d->pcxt);
527 0 : break;
528 554 : case T_AggState:
529 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
530 554 : ExecAggInitializeDSM((AggState *) planstate, d->pcxt);
531 554 : break;
532 6 : case T_MemoizeState:
533 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
534 6 : ExecMemoizeInitializeDSM((MemoizeState *) planstate, d->pcxt);
535 6 : break;
536 142 : default:
537 142 : break;
538 : }
539 :
540 2960 : return planstate_tree_walker(planstate, ExecParallelInitializeDSM, d);
541 : }
542 :
543 : /*
544 : * It sets up the response queues for backend workers to return tuples
545 : * to the main backend and start the workers.
546 : */
547 : static shm_mq_handle **
548 976 : ExecParallelSetupTupleQueues(ParallelContext *pcxt, bool reinitialize)
549 : {
550 : shm_mq_handle **responseq;
551 : char *tqueuespace;
552 : int i;
553 :
554 : /* Skip this if no workers. */
555 976 : if (pcxt->nworkers == 0)
556 0 : return NULL;
557 :
558 : /* Allocate memory for shared memory queue handles. */
559 : responseq = (shm_mq_handle **)
560 976 : palloc(pcxt->nworkers * sizeof(shm_mq_handle *));
561 :
562 : /*
563 : * If not reinitializing, allocate space from the DSM for the queues;
564 : * otherwise, find the already allocated space.
565 : */
566 976 : if (!reinitialize)
567 : tqueuespace =
568 718 : shm_toc_allocate(pcxt->toc,
569 : mul_size(PARALLEL_TUPLE_QUEUE_SIZE,
570 718 : pcxt->nworkers));
571 : else
572 258 : tqueuespace = shm_toc_lookup(pcxt->toc, PARALLEL_KEY_TUPLE_QUEUE, false);
573 :
574 : /* Create the queues, and become the receiver for each. */
575 3586 : for (i = 0; i < pcxt->nworkers; ++i)
576 : {
577 : shm_mq *mq;
578 :
579 2610 : mq = shm_mq_create(tqueuespace +
580 2610 : ((Size) i) * PARALLEL_TUPLE_QUEUE_SIZE,
581 : (Size) PARALLEL_TUPLE_QUEUE_SIZE);
582 :
583 2610 : shm_mq_set_receiver(mq, MyProc);
584 2610 : responseq[i] = shm_mq_attach(mq, pcxt->seg, NULL);
585 : }
586 :
587 : /* Add array of queues to shm_toc, so others can find it. */
588 976 : if (!reinitialize)
589 718 : shm_toc_insert(pcxt->toc, PARALLEL_KEY_TUPLE_QUEUE, tqueuespace);
590 :
591 : /* Return array of handles. */
592 976 : return responseq;
593 : }
594 :
595 : /*
596 : * Sets up the required infrastructure for backend workers to perform
597 : * execution and return results to the main backend.
598 : */
599 : ParallelExecutorInfo *
600 718 : ExecInitParallelPlan(PlanState *planstate, EState *estate,
601 : Bitmapset *sendParams, int nworkers,
602 : int64 tuples_needed)
603 : {
604 : ParallelExecutorInfo *pei;
605 : ParallelContext *pcxt;
606 : ExecParallelEstimateContext e;
607 : ExecParallelInitializeDSMContext d;
608 : FixedParallelExecutorState *fpes;
609 : char *pstmt_data;
610 : char *pstmt_space;
611 : char *paramlistinfo_space;
612 : BufferUsage *bufusage_space;
613 : WalUsage *walusage_space;
614 718 : SharedExecutorInstrumentation *instrumentation = NULL;
615 718 : SharedJitInstrumentation *jit_instrumentation = NULL;
616 : int pstmt_len;
617 : int paramlistinfo_len;
618 718 : int instrumentation_len = 0;
619 718 : int jit_instrumentation_len = 0;
620 718 : int instrument_offset = 0;
621 718 : Size dsa_minsize = dsa_minimum_size();
622 : char *query_string;
623 : int query_len;
624 :
625 : /*
626 : * Force any initplan outputs that we're going to pass to workers to be
627 : * evaluated, if they weren't already.
628 : *
629 : * For simplicity, we use the EState's per-output-tuple ExprContext here.
630 : * That risks intra-query memory leakage, since we might pass through here
631 : * many times before that ExprContext gets reset; but ExecSetParamPlan
632 : * doesn't normally leak any memory in the context (see its comments), so
633 : * it doesn't seem worth complicating this function's API to pass it a
634 : * shorter-lived ExprContext. This might need to change someday.
635 : */
636 718 : ExecSetParamPlanMulti(sendParams, GetPerTupleExprContext(estate));
637 :
638 : /* Allocate object for return value. */
639 718 : pei = palloc0(sizeof(ParallelExecutorInfo));
640 718 : pei->finished = false;
641 718 : pei->planstate = planstate;
642 :
643 : /* Fix up and serialize plan to be sent to workers. */
644 718 : pstmt_data = ExecSerializePlan(planstate->plan, estate);
645 :
646 : /* Create a parallel context. */
647 718 : pcxt = CreateParallelContext("postgres", "ParallelQueryMain", nworkers);
648 718 : pei->pcxt = pcxt;
649 :
650 : /*
651 : * Before telling the parallel context to create a dynamic shared memory
652 : * segment, we need to figure out how big it should be. Estimate space
653 : * for the various things we need to store.
654 : */
655 :
656 : /* Estimate space for fixed-size state. */
657 718 : shm_toc_estimate_chunk(&pcxt->estimator,
658 : sizeof(FixedParallelExecutorState));
659 718 : shm_toc_estimate_keys(&pcxt->estimator, 1);
660 :
661 : /* Estimate space for query text. */
662 718 : query_len = strlen(estate->es_sourceText);
663 718 : shm_toc_estimate_chunk(&pcxt->estimator, query_len + 1);
664 718 : shm_toc_estimate_keys(&pcxt->estimator, 1);
665 :
666 : /* Estimate space for serialized PlannedStmt. */
667 718 : pstmt_len = strlen(pstmt_data) + 1;
668 718 : shm_toc_estimate_chunk(&pcxt->estimator, pstmt_len);
669 718 : shm_toc_estimate_keys(&pcxt->estimator, 1);
670 :
671 : /* Estimate space for serialized ParamListInfo. */
672 718 : paramlistinfo_len = EstimateParamListSpace(estate->es_param_list_info);
673 718 : shm_toc_estimate_chunk(&pcxt->estimator, paramlistinfo_len);
674 718 : shm_toc_estimate_keys(&pcxt->estimator, 1);
675 :
676 : /*
677 : * Estimate space for BufferUsage.
678 : *
679 : * If EXPLAIN is not in use and there are no extensions loaded that care,
680 : * we could skip this. But we have no way of knowing whether anyone's
681 : * looking at pgBufferUsage, so do it unconditionally.
682 : */
683 718 : shm_toc_estimate_chunk(&pcxt->estimator,
684 : mul_size(sizeof(BufferUsage), pcxt->nworkers));
685 718 : shm_toc_estimate_keys(&pcxt->estimator, 1);
686 :
687 : /*
688 : * Same thing for WalUsage.
689 : */
690 718 : shm_toc_estimate_chunk(&pcxt->estimator,
691 : mul_size(sizeof(WalUsage), pcxt->nworkers));
692 718 : shm_toc_estimate_keys(&pcxt->estimator, 1);
693 :
694 : /* Estimate space for tuple queues. */
695 718 : shm_toc_estimate_chunk(&pcxt->estimator,
696 : mul_size(PARALLEL_TUPLE_QUEUE_SIZE, pcxt->nworkers));
697 718 : shm_toc_estimate_keys(&pcxt->estimator, 1);
698 :
699 : /*
700 : * Give parallel-aware nodes a chance to add to the estimates, and get a
701 : * count of how many PlanState nodes there are.
702 : */
703 718 : e.pcxt = pcxt;
704 718 : e.nnodes = 0;
705 718 : ExecParallelEstimate(planstate, &e);
706 :
707 : /* Estimate space for instrumentation, if required. */
708 718 : if (estate->es_instrument)
709 : {
710 180 : instrumentation_len =
711 : offsetof(SharedExecutorInstrumentation, plan_node_id) +
712 180 : sizeof(int) * e.nnodes;
713 180 : instrumentation_len = MAXALIGN(instrumentation_len);
714 180 : instrument_offset = instrumentation_len;
715 180 : instrumentation_len +=
716 180 : mul_size(sizeof(Instrumentation),
717 180 : mul_size(e.nnodes, nworkers));
718 180 : shm_toc_estimate_chunk(&pcxt->estimator, instrumentation_len);
719 180 : shm_toc_estimate_keys(&pcxt->estimator, 1);
720 :
721 : /* Estimate space for JIT instrumentation, if required. */
722 180 : if (estate->es_jit_flags != PGJIT_NONE)
723 : {
724 24 : jit_instrumentation_len =
725 24 : offsetof(SharedJitInstrumentation, jit_instr) +
726 : sizeof(JitInstrumentation) * nworkers;
727 24 : shm_toc_estimate_chunk(&pcxt->estimator, jit_instrumentation_len);
728 24 : shm_toc_estimate_keys(&pcxt->estimator, 1);
729 : }
730 : }
731 :
732 : /* Estimate space for DSA area. */
733 718 : shm_toc_estimate_chunk(&pcxt->estimator, dsa_minsize);
734 718 : shm_toc_estimate_keys(&pcxt->estimator, 1);
735 :
736 : /*
737 : * InitializeParallelDSM() passes the active snapshot to the parallel
738 : * worker, which uses it to set es_snapshot. Make sure we don't set
739 : * es_snapshot differently in the child.
740 : */
741 : Assert(GetActiveSnapshot() == estate->es_snapshot);
742 :
743 : /* Everyone's had a chance to ask for space, so now create the DSM. */
744 718 : InitializeParallelDSM(pcxt);
745 :
746 : /*
747 : * OK, now we have a dynamic shared memory segment, and it should be big
748 : * enough to store all of the data we estimated we would want to put into
749 : * it, plus whatever general stuff (not specifically executor-related) the
750 : * ParallelContext itself needs to store there. None of the space we
751 : * asked for has been allocated or initialized yet, though, so do that.
752 : */
753 :
754 : /* Store fixed-size state. */
755 718 : fpes = shm_toc_allocate(pcxt->toc, sizeof(FixedParallelExecutorState));
756 718 : fpes->tuples_needed = tuples_needed;
757 718 : fpes->param_exec = InvalidDsaPointer;
758 718 : fpes->eflags = estate->es_top_eflags;
759 718 : fpes->jit_flags = estate->es_jit_flags;
760 718 : shm_toc_insert(pcxt->toc, PARALLEL_KEY_EXECUTOR_FIXED, fpes);
761 :
762 : /* Store query string */
763 718 : query_string = shm_toc_allocate(pcxt->toc, query_len + 1);
764 718 : memcpy(query_string, estate->es_sourceText, query_len + 1);
765 718 : shm_toc_insert(pcxt->toc, PARALLEL_KEY_QUERY_TEXT, query_string);
766 :
767 : /* Store serialized PlannedStmt. */
768 718 : pstmt_space = shm_toc_allocate(pcxt->toc, pstmt_len);
769 718 : memcpy(pstmt_space, pstmt_data, pstmt_len);
770 718 : shm_toc_insert(pcxt->toc, PARALLEL_KEY_PLANNEDSTMT, pstmt_space);
771 :
772 : /* Store serialized ParamListInfo. */
773 718 : paramlistinfo_space = shm_toc_allocate(pcxt->toc, paramlistinfo_len);
774 718 : shm_toc_insert(pcxt->toc, PARALLEL_KEY_PARAMLISTINFO, paramlistinfo_space);
775 718 : SerializeParamList(estate->es_param_list_info, ¶mlistinfo_space);
776 :
777 : /* Allocate space for each worker's BufferUsage; no need to initialize. */
778 718 : bufusage_space = shm_toc_allocate(pcxt->toc,
779 718 : mul_size(sizeof(BufferUsage), pcxt->nworkers));
780 718 : shm_toc_insert(pcxt->toc, PARALLEL_KEY_BUFFER_USAGE, bufusage_space);
781 718 : pei->buffer_usage = bufusage_space;
782 :
783 : /* Same for WalUsage. */
784 718 : walusage_space = shm_toc_allocate(pcxt->toc,
785 718 : mul_size(sizeof(WalUsage), pcxt->nworkers));
786 718 : shm_toc_insert(pcxt->toc, PARALLEL_KEY_WAL_USAGE, walusage_space);
787 718 : pei->wal_usage = walusage_space;
788 :
789 : /* Set up the tuple queues that the workers will write into. */
790 718 : pei->tqueue = ExecParallelSetupTupleQueues(pcxt, false);
791 :
792 : /* We don't need the TupleQueueReaders yet, though. */
793 718 : pei->reader = NULL;
794 :
795 : /*
796 : * If instrumentation options were supplied, allocate space for the data.
797 : * It only gets partially initialized here; the rest happens during
798 : * ExecParallelInitializeDSM.
799 : */
800 718 : if (estate->es_instrument)
801 : {
802 : Instrumentation *instrument;
803 : int i;
804 :
805 180 : instrumentation = shm_toc_allocate(pcxt->toc, instrumentation_len);
806 180 : instrumentation->instrument_options = estate->es_instrument;
807 180 : instrumentation->instrument_offset = instrument_offset;
808 180 : instrumentation->num_workers = nworkers;
809 180 : instrumentation->num_plan_nodes = e.nnodes;
810 180 : instrument = GetInstrumentationArray(instrumentation);
811 1860 : for (i = 0; i < nworkers * e.nnodes; ++i)
812 1680 : InstrInit(&instrument[i], estate->es_instrument);
813 180 : shm_toc_insert(pcxt->toc, PARALLEL_KEY_INSTRUMENTATION,
814 : instrumentation);
815 180 : pei->instrumentation = instrumentation;
816 :
817 180 : if (estate->es_jit_flags != PGJIT_NONE)
818 : {
819 24 : jit_instrumentation = shm_toc_allocate(pcxt->toc,
820 : jit_instrumentation_len);
821 24 : jit_instrumentation->num_workers = nworkers;
822 24 : memset(jit_instrumentation->jit_instr, 0,
823 : sizeof(JitInstrumentation) * nworkers);
824 24 : shm_toc_insert(pcxt->toc, PARALLEL_KEY_JIT_INSTRUMENTATION,
825 : jit_instrumentation);
826 24 : pei->jit_instrumentation = jit_instrumentation;
827 : }
828 : }
829 :
830 : /*
831 : * Create a DSA area that can be used by the leader and all workers.
832 : * (However, if we failed to create a DSM and are using private memory
833 : * instead, then skip this.)
834 : */
835 718 : if (pcxt->seg != NULL)
836 : {
837 : char *area_space;
838 :
839 718 : area_space = shm_toc_allocate(pcxt->toc, dsa_minsize);
840 718 : shm_toc_insert(pcxt->toc, PARALLEL_KEY_DSA, area_space);
841 718 : pei->area = dsa_create_in_place(area_space, dsa_minsize,
842 : LWTRANCHE_PARALLEL_QUERY_DSA,
843 : pcxt->seg);
844 :
845 : /*
846 : * Serialize parameters, if any, using DSA storage. We don't dare use
847 : * the main parallel query DSM for this because we might relaunch
848 : * workers after the values have changed (and thus the amount of
849 : * storage required has changed).
850 : */
851 718 : if (!bms_is_empty(sendParams))
852 : {
853 24 : pei->param_exec = SerializeParamExecParams(estate, sendParams,
854 : pei->area);
855 24 : fpes->param_exec = pei->param_exec;
856 : }
857 : }
858 :
859 : /*
860 : * Give parallel-aware nodes a chance to initialize their shared data.
861 : * This also initializes the elements of instrumentation->ps_instrument,
862 : * if it exists.
863 : */
864 718 : d.pcxt = pcxt;
865 718 : d.instrumentation = instrumentation;
866 718 : d.nnodes = 0;
867 :
868 : /* Install our DSA area while initializing the plan. */
869 718 : estate->es_query_dsa = pei->area;
870 718 : ExecParallelInitializeDSM(planstate, &d);
871 718 : estate->es_query_dsa = NULL;
872 :
873 : /*
874 : * Make sure that the world hasn't shifted under our feet. This could
875 : * probably just be an Assert(), but let's be conservative for now.
876 : */
877 718 : if (e.nnodes != d.nnodes)
878 0 : elog(ERROR, "inconsistent count of PlanState nodes");
879 :
880 : /* OK, we're ready to rock and roll. */
881 718 : return pei;
882 : }
883 :
884 : /*
885 : * Set up tuple queue readers to read the results of a parallel subplan.
886 : *
887 : * This is separate from ExecInitParallelPlan() because we can launch the
888 : * worker processes and let them start doing something before we do this.
889 : */
890 : void
891 958 : ExecParallelCreateReaders(ParallelExecutorInfo *pei)
892 : {
893 958 : int nworkers = pei->pcxt->nworkers_launched;
894 : int i;
895 :
896 : Assert(pei->reader == NULL);
897 :
898 958 : if (nworkers > 0)
899 : {
900 958 : pei->reader = (TupleQueueReader **)
901 958 : palloc(nworkers * sizeof(TupleQueueReader *));
902 :
903 3484 : for (i = 0; i < nworkers; i++)
904 : {
905 2526 : shm_mq_set_handle(pei->tqueue[i],
906 2526 : pei->pcxt->worker[i].bgwhandle);
907 2526 : pei->reader[i] = CreateTupleQueueReader(pei->tqueue[i]);
908 : }
909 : }
910 958 : }
911 :
912 : /*
913 : * Re-initialize the parallel executor shared memory state before launching
914 : * a fresh batch of workers.
915 : */
916 : void
917 258 : ExecParallelReinitialize(PlanState *planstate,
918 : ParallelExecutorInfo *pei,
919 : Bitmapset *sendParams)
920 : {
921 258 : EState *estate = planstate->state;
922 : FixedParallelExecutorState *fpes;
923 :
924 : /* Old workers must already be shut down */
925 : Assert(pei->finished);
926 :
927 : /*
928 : * Force any initplan outputs that we're going to pass to workers to be
929 : * evaluated, if they weren't already (see comments in
930 : * ExecInitParallelPlan).
931 : */
932 258 : ExecSetParamPlanMulti(sendParams, GetPerTupleExprContext(estate));
933 :
934 258 : ReinitializeParallelDSM(pei->pcxt);
935 258 : pei->tqueue = ExecParallelSetupTupleQueues(pei->pcxt, true);
936 258 : pei->reader = NULL;
937 258 : pei->finished = false;
938 :
939 258 : fpes = shm_toc_lookup(pei->pcxt->toc, PARALLEL_KEY_EXECUTOR_FIXED, false);
940 :
941 : /* Free any serialized parameters from the last round. */
942 258 : if (DsaPointerIsValid(fpes->param_exec))
943 : {
944 0 : dsa_free(pei->area, fpes->param_exec);
945 0 : fpes->param_exec = InvalidDsaPointer;
946 : }
947 :
948 : /* Serialize current parameter values if required. */
949 258 : if (!bms_is_empty(sendParams))
950 : {
951 0 : pei->param_exec = SerializeParamExecParams(estate, sendParams,
952 : pei->area);
953 0 : fpes->param_exec = pei->param_exec;
954 : }
955 :
956 : /* Traverse plan tree and let each child node reset associated state. */
957 258 : estate->es_query_dsa = pei->area;
958 258 : ExecParallelReInitializeDSM(planstate, pei->pcxt);
959 258 : estate->es_query_dsa = NULL;
960 258 : }
961 :
962 : /*
963 : * Traverse plan tree to reinitialize per-node dynamic shared memory state
964 : */
965 : static bool
966 666 : ExecParallelReInitializeDSM(PlanState *planstate,
967 : ParallelContext *pcxt)
968 : {
969 666 : if (planstate == NULL)
970 0 : return false;
971 :
972 : /*
973 : * Call reinitializers for DSM-using plan nodes.
974 : */
975 666 : switch (nodeTag(planstate))
976 : {
977 276 : case T_SeqScanState:
978 276 : if (planstate->plan->parallel_aware)
979 228 : ExecSeqScanReInitializeDSM((SeqScanState *) planstate,
980 : pcxt);
981 276 : break;
982 12 : case T_IndexScanState:
983 12 : if (planstate->plan->parallel_aware)
984 12 : ExecIndexScanReInitializeDSM((IndexScanState *) planstate,
985 : pcxt);
986 12 : break;
987 12 : case T_IndexOnlyScanState:
988 12 : if (planstate->plan->parallel_aware)
989 12 : ExecIndexOnlyScanReInitializeDSM((IndexOnlyScanState *) planstate,
990 : pcxt);
991 12 : break;
992 0 : case T_ForeignScanState:
993 0 : if (planstate->plan->parallel_aware)
994 0 : ExecForeignScanReInitializeDSM((ForeignScanState *) planstate,
995 : pcxt);
996 0 : break;
997 0 : case T_AppendState:
998 0 : if (planstate->plan->parallel_aware)
999 0 : ExecAppendReInitializeDSM((AppendState *) planstate, pcxt);
1000 0 : break;
1001 0 : case T_CustomScanState:
1002 0 : if (planstate->plan->parallel_aware)
1003 0 : ExecCustomScanReInitializeDSM((CustomScanState *) planstate,
1004 : pcxt);
1005 0 : break;
1006 54 : case T_BitmapHeapScanState:
1007 54 : if (planstate->plan->parallel_aware)
1008 54 : ExecBitmapHeapReInitializeDSM((BitmapHeapScanState *) planstate,
1009 : pcxt);
1010 54 : break;
1011 96 : case T_HashJoinState:
1012 96 : if (planstate->plan->parallel_aware)
1013 48 : ExecHashJoinReInitializeDSM((HashJoinState *) planstate,
1014 : pcxt);
1015 96 : break;
1016 180 : case T_BitmapIndexScanState:
1017 : case T_HashState:
1018 : case T_SortState:
1019 : case T_IncrementalSortState:
1020 : case T_MemoizeState:
1021 : /* these nodes have DSM state, but no reinitialization is required */
1022 180 : break;
1023 :
1024 36 : default:
1025 36 : break;
1026 : }
1027 :
1028 666 : return planstate_tree_walker(planstate, ExecParallelReInitializeDSM, pcxt);
1029 : }
1030 :
1031 : /*
1032 : * Copy instrumentation information about this node and its descendants from
1033 : * dynamic shared memory.
1034 : */
1035 : static bool
1036 1026 : ExecParallelRetrieveInstrumentation(PlanState *planstate,
1037 : SharedExecutorInstrumentation *instrumentation)
1038 : {
1039 : Instrumentation *instrument;
1040 : int i;
1041 : int n;
1042 : int ibytes;
1043 1026 : int plan_node_id = planstate->plan->plan_node_id;
1044 : MemoryContext oldcontext;
1045 :
1046 : /* Find the instrumentation for this node. */
1047 4638 : for (i = 0; i < instrumentation->num_plan_nodes; ++i)
1048 4638 : if (instrumentation->plan_node_id[i] == plan_node_id)
1049 1026 : break;
1050 1026 : if (i >= instrumentation->num_plan_nodes)
1051 0 : elog(ERROR, "plan node %d not found", plan_node_id);
1052 :
1053 : /* Accumulate the statistics from all workers. */
1054 1026 : instrument = GetInstrumentationArray(instrumentation);
1055 1026 : instrument += i * instrumentation->num_workers;
1056 2706 : for (n = 0; n < instrumentation->num_workers; ++n)
1057 1680 : InstrAggNode(planstate->instrument, &instrument[n]);
1058 :
1059 : /*
1060 : * Also store the per-worker detail.
1061 : *
1062 : * Worker instrumentation should be allocated in the same context as the
1063 : * regular instrumentation information, which is the per-query context.
1064 : * Switch into per-query memory context.
1065 : */
1066 1026 : oldcontext = MemoryContextSwitchTo(planstate->state->es_query_cxt);
1067 1026 : ibytes = mul_size(instrumentation->num_workers, sizeof(Instrumentation));
1068 1026 : planstate->worker_instrument =
1069 1026 : palloc(ibytes + offsetof(WorkerInstrumentation, instrument));
1070 1026 : MemoryContextSwitchTo(oldcontext);
1071 :
1072 1026 : planstate->worker_instrument->num_workers = instrumentation->num_workers;
1073 1026 : memcpy(&planstate->worker_instrument->instrument, instrument, ibytes);
1074 :
1075 : /* Perform any node-type-specific work that needs to be done. */
1076 1026 : switch (nodeTag(planstate))
1077 : {
1078 270 : case T_IndexScanState:
1079 270 : ExecIndexScanRetrieveInstrumentation((IndexScanState *) planstate);
1080 270 : break;
1081 0 : case T_IndexOnlyScanState:
1082 0 : ExecIndexOnlyScanRetrieveInstrumentation((IndexOnlyScanState *) planstate);
1083 0 : break;
1084 0 : case T_BitmapIndexScanState:
1085 0 : ExecBitmapIndexScanRetrieveInstrumentation((BitmapIndexScanState *) planstate);
1086 0 : break;
1087 12 : case T_SortState:
1088 12 : ExecSortRetrieveInstrumentation((SortState *) planstate);
1089 12 : break;
1090 0 : case T_IncrementalSortState:
1091 0 : ExecIncrementalSortRetrieveInstrumentation((IncrementalSortState *) planstate);
1092 0 : break;
1093 84 : case T_HashState:
1094 84 : ExecHashRetrieveInstrumentation((HashState *) planstate);
1095 84 : break;
1096 102 : case T_AggState:
1097 102 : ExecAggRetrieveInstrumentation((AggState *) planstate);
1098 102 : break;
1099 0 : case T_MemoizeState:
1100 0 : ExecMemoizeRetrieveInstrumentation((MemoizeState *) planstate);
1101 0 : break;
1102 0 : case T_BitmapHeapScanState:
1103 0 : ExecBitmapHeapRetrieveInstrumentation((BitmapHeapScanState *) planstate);
1104 0 : break;
1105 558 : default:
1106 558 : break;
1107 : }
1108 :
1109 1026 : return planstate_tree_walker(planstate, ExecParallelRetrieveInstrumentation,
1110 : instrumentation);
1111 : }
1112 :
1113 : /*
1114 : * Add up the workers' JIT instrumentation from dynamic shared memory.
1115 : */
1116 : static void
1117 24 : ExecParallelRetrieveJitInstrumentation(PlanState *planstate,
1118 : SharedJitInstrumentation *shared_jit)
1119 : {
1120 : JitInstrumentation *combined;
1121 : int ibytes;
1122 :
1123 : int n;
1124 :
1125 : /*
1126 : * Accumulate worker JIT instrumentation into the combined JIT
1127 : * instrumentation, allocating it if required.
1128 : */
1129 24 : if (!planstate->state->es_jit_worker_instr)
1130 24 : planstate->state->es_jit_worker_instr =
1131 24 : MemoryContextAllocZero(planstate->state->es_query_cxt, sizeof(JitInstrumentation));
1132 24 : combined = planstate->state->es_jit_worker_instr;
1133 :
1134 : /* Accumulate all the workers' instrumentations. */
1135 72 : for (n = 0; n < shared_jit->num_workers; ++n)
1136 48 : InstrJitAgg(combined, &shared_jit->jit_instr[n]);
1137 :
1138 : /*
1139 : * Store the per-worker detail.
1140 : *
1141 : * Similar to ExecParallelRetrieveInstrumentation(), allocate the
1142 : * instrumentation in per-query context.
1143 : */
1144 24 : ibytes = offsetof(SharedJitInstrumentation, jit_instr)
1145 24 : + mul_size(shared_jit->num_workers, sizeof(JitInstrumentation));
1146 24 : planstate->worker_jit_instrument =
1147 24 : MemoryContextAlloc(planstate->state->es_query_cxt, ibytes);
1148 :
1149 24 : memcpy(planstate->worker_jit_instrument, shared_jit, ibytes);
1150 24 : }
1151 :
1152 : /*
1153 : * Finish parallel execution. We wait for parallel workers to finish, and
1154 : * accumulate their buffer/WAL usage.
1155 : */
1156 : void
1157 1760 : ExecParallelFinish(ParallelExecutorInfo *pei)
1158 : {
1159 1760 : int nworkers = pei->pcxt->nworkers_launched;
1160 : int i;
1161 :
1162 : /* Make this be a no-op if called twice in a row. */
1163 1760 : if (pei->finished)
1164 796 : return;
1165 :
1166 : /*
1167 : * Detach from tuple queues ASAP, so that any still-active workers will
1168 : * notice that no further results are wanted.
1169 : */
1170 964 : if (pei->tqueue != NULL)
1171 : {
1172 3478 : for (i = 0; i < nworkers; i++)
1173 2514 : shm_mq_detach(pei->tqueue[i]);
1174 964 : pfree(pei->tqueue);
1175 964 : pei->tqueue = NULL;
1176 : }
1177 :
1178 : /*
1179 : * While we're waiting for the workers to finish, let's get rid of the
1180 : * tuple queue readers. (Any other local cleanup could be done here too.)
1181 : */
1182 964 : if (pei->reader != NULL)
1183 : {
1184 3460 : for (i = 0; i < nworkers; i++)
1185 2514 : DestroyTupleQueueReader(pei->reader[i]);
1186 946 : pfree(pei->reader);
1187 946 : pei->reader = NULL;
1188 : }
1189 :
1190 : /* Now wait for the workers to finish. */
1191 964 : WaitForParallelWorkersToFinish(pei->pcxt);
1192 :
1193 : /*
1194 : * Next, accumulate buffer/WAL usage. (This must wait for the workers to
1195 : * finish, or we might get incomplete data.)
1196 : */
1197 3478 : for (i = 0; i < nworkers; i++)
1198 2514 : InstrAccumParallelQuery(&pei->buffer_usage[i], &pei->wal_usage[i]);
1199 :
1200 964 : pei->finished = true;
1201 : }
1202 :
1203 : /*
1204 : * Accumulate instrumentation, and then clean up whatever ParallelExecutorInfo
1205 : * resources still exist after ExecParallelFinish. We separate these
1206 : * routines because someone might want to examine the contents of the DSM
1207 : * after ExecParallelFinish and before calling this routine.
1208 : */
1209 : void
1210 706 : ExecParallelCleanup(ParallelExecutorInfo *pei)
1211 : {
1212 : /* Accumulate instrumentation, if any. */
1213 706 : if (pei->instrumentation)
1214 180 : ExecParallelRetrieveInstrumentation(pei->planstate,
1215 : pei->instrumentation);
1216 :
1217 : /* Accumulate JIT instrumentation, if any. */
1218 706 : if (pei->jit_instrumentation)
1219 24 : ExecParallelRetrieveJitInstrumentation(pei->planstate,
1220 24 : pei->jit_instrumentation);
1221 :
1222 : /* Free any serialized parameters. */
1223 706 : if (DsaPointerIsValid(pei->param_exec))
1224 : {
1225 24 : dsa_free(pei->area, pei->param_exec);
1226 24 : pei->param_exec = InvalidDsaPointer;
1227 : }
1228 706 : if (pei->area != NULL)
1229 : {
1230 706 : dsa_detach(pei->area);
1231 706 : pei->area = NULL;
1232 : }
1233 706 : if (pei->pcxt != NULL)
1234 : {
1235 706 : DestroyParallelContext(pei->pcxt);
1236 706 : pei->pcxt = NULL;
1237 : }
1238 706 : pfree(pei);
1239 706 : }
1240 :
1241 : /*
1242 : * Create a DestReceiver to write tuples we produce to the shm_mq designated
1243 : * for that purpose.
1244 : */
1245 : static DestReceiver *
1246 2526 : ExecParallelGetReceiver(dsm_segment *seg, shm_toc *toc)
1247 : {
1248 : char *mqspace;
1249 : shm_mq *mq;
1250 :
1251 2526 : mqspace = shm_toc_lookup(toc, PARALLEL_KEY_TUPLE_QUEUE, false);
1252 2526 : mqspace += ParallelWorkerNumber * PARALLEL_TUPLE_QUEUE_SIZE;
1253 2526 : mq = (shm_mq *) mqspace;
1254 2526 : shm_mq_set_sender(mq, MyProc);
1255 2526 : return CreateTupleQueueDestReceiver(shm_mq_attach(mq, seg, NULL));
1256 : }
1257 :
1258 : /*
1259 : * Create a QueryDesc for the PlannedStmt we are to execute, and return it.
1260 : */
1261 : static QueryDesc *
1262 2526 : ExecParallelGetQueryDesc(shm_toc *toc, DestReceiver *receiver,
1263 : int instrument_options)
1264 : {
1265 : char *pstmtspace;
1266 : char *paramspace;
1267 : PlannedStmt *pstmt;
1268 : ParamListInfo paramLI;
1269 : char *queryString;
1270 :
1271 : /* Get the query string from shared memory */
1272 2526 : queryString = shm_toc_lookup(toc, PARALLEL_KEY_QUERY_TEXT, false);
1273 :
1274 : /* Reconstruct leader-supplied PlannedStmt. */
1275 2526 : pstmtspace = shm_toc_lookup(toc, PARALLEL_KEY_PLANNEDSTMT, false);
1276 2526 : pstmt = (PlannedStmt *) stringToNode(pstmtspace);
1277 :
1278 : /* Reconstruct ParamListInfo. */
1279 2526 : paramspace = shm_toc_lookup(toc, PARALLEL_KEY_PARAMLISTINFO, false);
1280 2526 : paramLI = RestoreParamList(¶mspace);
1281 :
1282 : /* Create a QueryDesc for the query. */
1283 2526 : return CreateQueryDesc(pstmt,
1284 : queryString,
1285 : GetActiveSnapshot(), InvalidSnapshot,
1286 : receiver, paramLI, NULL, instrument_options);
1287 : }
1288 :
1289 : /*
1290 : * Copy instrumentation information from this node and its descendants into
1291 : * dynamic shared memory, so that the parallel leader can retrieve it.
1292 : */
1293 : static bool
1294 2368 : ExecParallelReportInstrumentation(PlanState *planstate,
1295 : SharedExecutorInstrumentation *instrumentation)
1296 : {
1297 : int i;
1298 2368 : int plan_node_id = planstate->plan->plan_node_id;
1299 : Instrumentation *instrument;
1300 :
1301 2368 : InstrEndLoop(planstate->instrument);
1302 :
1303 : /*
1304 : * If we shuffled the plan_node_id values in ps_instrument into sorted
1305 : * order, we could use binary search here. This might matter someday if
1306 : * we're pushing down sufficiently large plan trees. For now, do it the
1307 : * slow, dumb way.
1308 : */
1309 7792 : for (i = 0; i < instrumentation->num_plan_nodes; ++i)
1310 7792 : if (instrumentation->plan_node_id[i] == plan_node_id)
1311 2368 : break;
1312 2368 : if (i >= instrumentation->num_plan_nodes)
1313 0 : elog(ERROR, "plan node %d not found", plan_node_id);
1314 :
1315 : /*
1316 : * Add our statistics to the per-node, per-worker totals. It's possible
1317 : * that this could happen more than once if we relaunched workers.
1318 : */
1319 2368 : instrument = GetInstrumentationArray(instrumentation);
1320 2368 : instrument += i * instrumentation->num_workers;
1321 : Assert(IsParallelWorker());
1322 : Assert(ParallelWorkerNumber < instrumentation->num_workers);
1323 2368 : InstrAggNode(&instrument[ParallelWorkerNumber], planstate->instrument);
1324 :
1325 2368 : return planstate_tree_walker(planstate, ExecParallelReportInstrumentation,
1326 : instrumentation);
1327 : }
1328 :
1329 : /*
1330 : * Initialize the PlanState and its descendants with the information
1331 : * retrieved from shared memory. This has to be done once the PlanState
1332 : * is allocated and initialized by executor; that is, after ExecutorStart().
1333 : */
1334 : static bool
1335 8140 : ExecParallelInitializeWorker(PlanState *planstate, ParallelWorkerContext *pwcxt)
1336 : {
1337 8140 : if (planstate == NULL)
1338 0 : return false;
1339 :
1340 8140 : switch (nodeTag(planstate))
1341 : {
1342 3308 : case T_SeqScanState:
1343 3308 : if (planstate->plan->parallel_aware)
1344 2680 : ExecSeqScanInitializeWorker((SeqScanState *) planstate, pwcxt);
1345 3308 : break;
1346 396 : case T_IndexScanState:
1347 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
1348 396 : ExecIndexScanInitializeWorker((IndexScanState *) planstate, pwcxt);
1349 396 : break;
1350 242 : case T_IndexOnlyScanState:
1351 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
1352 242 : ExecIndexOnlyScanInitializeWorker((IndexOnlyScanState *) planstate,
1353 : pwcxt);
1354 242 : break;
1355 272 : case T_BitmapIndexScanState:
1356 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
1357 272 : ExecBitmapIndexScanInitializeWorker((BitmapIndexScanState *) planstate,
1358 : pwcxt);
1359 272 : break;
1360 0 : case T_ForeignScanState:
1361 0 : if (planstate->plan->parallel_aware)
1362 0 : ExecForeignScanInitializeWorker((ForeignScanState *) planstate,
1363 : pwcxt);
1364 0 : break;
1365 374 : case T_AppendState:
1366 374 : if (planstate->plan->parallel_aware)
1367 314 : ExecAppendInitializeWorker((AppendState *) planstate, pwcxt);
1368 374 : break;
1369 0 : case T_CustomScanState:
1370 0 : if (planstate->plan->parallel_aware)
1371 0 : ExecCustomScanInitializeWorker((CustomScanState *) planstate,
1372 : pwcxt);
1373 0 : break;
1374 272 : case T_BitmapHeapScanState:
1375 272 : if (planstate->plan->parallel_aware)
1376 270 : ExecBitmapHeapInitializeWorker((BitmapHeapScanState *) planstate,
1377 : pwcxt);
1378 272 : break;
1379 546 : case T_HashJoinState:
1380 546 : if (planstate->plan->parallel_aware)
1381 306 : ExecHashJoinInitializeWorker((HashJoinState *) planstate,
1382 : pwcxt);
1383 546 : break;
1384 546 : case T_HashState:
1385 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
1386 546 : ExecHashInitializeWorker((HashState *) planstate, pwcxt);
1387 546 : break;
1388 450 : case T_SortState:
1389 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
1390 450 : ExecSortInitializeWorker((SortState *) planstate, pwcxt);
1391 450 : break;
1392 0 : case T_IncrementalSortState:
1393 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
1394 0 : ExecIncrementalSortInitializeWorker((IncrementalSortState *) planstate,
1395 : pwcxt);
1396 0 : break;
1397 1544 : case T_AggState:
1398 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
1399 1544 : ExecAggInitializeWorker((AggState *) planstate, pwcxt);
1400 1544 : break;
1401 12 : case T_MemoizeState:
1402 : /* even when not parallel-aware, for EXPLAIN ANALYZE */
1403 12 : ExecMemoizeInitializeWorker((MemoizeState *) planstate, pwcxt);
1404 12 : break;
1405 178 : default:
1406 178 : break;
1407 : }
1408 :
1409 8140 : return planstate_tree_walker(planstate, ExecParallelInitializeWorker,
1410 : pwcxt);
1411 : }
1412 :
1413 : /*
1414 : * Main entrypoint for parallel query worker processes.
1415 : *
1416 : * We reach this function from ParallelWorkerMain, so the setup necessary to
1417 : * create a sensible parallel environment has already been done;
1418 : * ParallelWorkerMain worries about stuff like the transaction state, combo
1419 : * CID mappings, and GUC values, so we don't need to deal with any of that
1420 : * here.
1421 : *
1422 : * Our job is to deal with concerns specific to the executor. The parallel
1423 : * group leader will have stored a serialized PlannedStmt, and it's our job
1424 : * to execute that plan and write the resulting tuples to the appropriate
1425 : * tuple queue. Various bits of supporting information that we need in order
1426 : * to do this are also stored in the dsm_segment and can be accessed through
1427 : * the shm_toc.
1428 : */
1429 : void
1430 2526 : ParallelQueryMain(dsm_segment *seg, shm_toc *toc)
1431 : {
1432 : FixedParallelExecutorState *fpes;
1433 : BufferUsage *buffer_usage;
1434 : WalUsage *wal_usage;
1435 : DestReceiver *receiver;
1436 : QueryDesc *queryDesc;
1437 : SharedExecutorInstrumentation *instrumentation;
1438 : SharedJitInstrumentation *jit_instrumentation;
1439 2526 : int instrument_options = 0;
1440 : void *area_space;
1441 : dsa_area *area;
1442 : ParallelWorkerContext pwcxt;
1443 :
1444 : /* Get fixed-size state. */
1445 2526 : fpes = shm_toc_lookup(toc, PARALLEL_KEY_EXECUTOR_FIXED, false);
1446 :
1447 : /* Set up DestReceiver, SharedExecutorInstrumentation, and QueryDesc. */
1448 2526 : receiver = ExecParallelGetReceiver(seg, toc);
1449 2526 : instrumentation = shm_toc_lookup(toc, PARALLEL_KEY_INSTRUMENTATION, true);
1450 2526 : if (instrumentation != NULL)
1451 724 : instrument_options = instrumentation->instrument_options;
1452 2526 : jit_instrumentation = shm_toc_lookup(toc, PARALLEL_KEY_JIT_INSTRUMENTATION,
1453 : true);
1454 2526 : queryDesc = ExecParallelGetQueryDesc(toc, receiver, instrument_options);
1455 :
1456 : /* Setting debug_query_string for individual workers */
1457 2526 : debug_query_string = queryDesc->sourceText;
1458 :
1459 : /* Report workers' query for monitoring purposes */
1460 2526 : pgstat_report_activity(STATE_RUNNING, debug_query_string);
1461 :
1462 : /* Attach to the dynamic shared memory area. */
1463 2526 : area_space = shm_toc_lookup(toc, PARALLEL_KEY_DSA, false);
1464 2526 : area = dsa_attach_in_place(area_space, seg);
1465 :
1466 : /* Start up the executor */
1467 2526 : queryDesc->plannedstmt->jitFlags = fpes->jit_flags;
1468 2526 : ExecutorStart(queryDesc, fpes->eflags);
1469 :
1470 : /* Special executor initialization steps for parallel workers */
1471 2526 : queryDesc->planstate->state->es_query_dsa = area;
1472 2526 : if (DsaPointerIsValid(fpes->param_exec))
1473 : {
1474 : char *paramexec_space;
1475 :
1476 70 : paramexec_space = dsa_get_address(area, fpes->param_exec);
1477 70 : RestoreParamExecParams(paramexec_space, queryDesc->estate);
1478 : }
1479 2526 : pwcxt.toc = toc;
1480 2526 : pwcxt.seg = seg;
1481 2526 : ExecParallelInitializeWorker(queryDesc->planstate, &pwcxt);
1482 :
1483 : /* Pass down any tuple bound */
1484 2526 : ExecSetTupleBound(fpes->tuples_needed, queryDesc->planstate);
1485 :
1486 : /*
1487 : * Prepare to track buffer/WAL usage during query execution.
1488 : *
1489 : * We do this after starting up the executor to match what happens in the
1490 : * leader, which also doesn't count buffer accesses and WAL activity that
1491 : * occur during executor startup.
1492 : */
1493 2526 : InstrStartParallelQuery();
1494 :
1495 : /*
1496 : * Run the plan. If we specified a tuple bound, be careful not to demand
1497 : * more tuples than that.
1498 : */
1499 2526 : ExecutorRun(queryDesc,
1500 : ForwardScanDirection,
1501 2526 : fpes->tuples_needed < 0 ? (int64) 0 : fpes->tuples_needed);
1502 :
1503 : /* Shut down the executor */
1504 2514 : ExecutorFinish(queryDesc);
1505 :
1506 : /* Report buffer/WAL usage during parallel execution. */
1507 2514 : buffer_usage = shm_toc_lookup(toc, PARALLEL_KEY_BUFFER_USAGE, false);
1508 2514 : wal_usage = shm_toc_lookup(toc, PARALLEL_KEY_WAL_USAGE, false);
1509 2514 : InstrEndParallelQuery(&buffer_usage[ParallelWorkerNumber],
1510 2514 : &wal_usage[ParallelWorkerNumber]);
1511 :
1512 : /* Report instrumentation data if any instrumentation options are set. */
1513 2514 : if (instrumentation != NULL)
1514 724 : ExecParallelReportInstrumentation(queryDesc->planstate,
1515 : instrumentation);
1516 :
1517 : /* Report JIT instrumentation data if any */
1518 2514 : if (queryDesc->estate->es_jit && jit_instrumentation != NULL)
1519 : {
1520 : Assert(ParallelWorkerNumber < jit_instrumentation->num_workers);
1521 144 : jit_instrumentation->jit_instr[ParallelWorkerNumber] =
1522 144 : queryDesc->estate->es_jit->instr;
1523 : }
1524 :
1525 : /* Must do this after capturing instrumentation. */
1526 2514 : ExecutorEnd(queryDesc);
1527 :
1528 : /* Cleanup. */
1529 2514 : dsa_detach(area);
1530 2514 : FreeQueryDesc(queryDesc);
1531 2514 : receiver->rDestroy(receiver);
1532 2514 : }
|