Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * nodeHash.c
4 : * Routines to hash relations for hashjoin
5 : *
6 : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/executor/nodeHash.c
12 : *
13 : * See note on parallelism in nodeHashjoin.c.
14 : *
15 : *-------------------------------------------------------------------------
16 : */
17 : /*
18 : * INTERFACE ROUTINES
19 : * MultiExecHash - generate an in-memory hash table of the relation
20 : * ExecInitHash - initialize node and subnodes
21 : * ExecEndHash - shutdown node and subnodes
22 : */
23 :
24 : #include "postgres.h"
25 :
26 : #include <math.h>
27 : #include <limits.h>
28 :
29 : #include "access/htup_details.h"
30 : #include "access/parallel.h"
31 : #include "catalog/pg_statistic.h"
32 : #include "commands/tablespace.h"
33 : #include "executor/executor.h"
34 : #include "executor/hashjoin.h"
35 : #include "executor/nodeHash.h"
36 : #include "executor/nodeHashjoin.h"
37 : #include "miscadmin.h"
38 : #include "port/pg_bitutils.h"
39 : #include "utils/dynahash.h"
40 : #include "utils/lsyscache.h"
41 : #include "utils/memutils.h"
42 : #include "utils/syscache.h"
43 : #include "utils/wait_event.h"
44 :
45 : static void ExecHashIncreaseNumBatches(HashJoinTable hashtable);
46 : static void ExecHashIncreaseNumBuckets(HashJoinTable hashtable);
47 : static void ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable);
48 : static void ExecParallelHashIncreaseNumBuckets(HashJoinTable hashtable);
49 : static void ExecHashBuildSkewHash(HashState *hashstate,
50 : HashJoinTable hashtable, Hash *node,
51 : int mcvsToUse);
52 : static void ExecHashSkewTableInsert(HashJoinTable hashtable,
53 : TupleTableSlot *slot,
54 : uint32 hashvalue,
55 : int bucketNumber);
56 : static void ExecHashRemoveNextSkewBucket(HashJoinTable hashtable);
57 :
58 : static void *dense_alloc(HashJoinTable hashtable, Size size);
59 : static HashJoinTuple ExecParallelHashTupleAlloc(HashJoinTable hashtable,
60 : size_t size,
61 : dsa_pointer *shared);
62 : static void MultiExecPrivateHash(HashState *node);
63 : static void MultiExecParallelHash(HashState *node);
64 : static inline HashJoinTuple ExecParallelHashFirstTuple(HashJoinTable hashtable,
65 : int bucketno);
66 : static inline HashJoinTuple ExecParallelHashNextTuple(HashJoinTable hashtable,
67 : HashJoinTuple tuple);
68 : static inline void ExecParallelHashPushTuple(dsa_pointer_atomic *head,
69 : HashJoinTuple tuple,
70 : dsa_pointer tuple_shared);
71 : static void ExecParallelHashJoinSetUpBatches(HashJoinTable hashtable, int nbatch);
72 : static void ExecParallelHashEnsureBatchAccessors(HashJoinTable hashtable);
73 : static void ExecParallelHashRepartitionFirst(HashJoinTable hashtable);
74 : static void ExecParallelHashRepartitionRest(HashJoinTable hashtable);
75 : static HashMemoryChunk ExecParallelHashPopChunkQueue(HashJoinTable hashtable,
76 : dsa_pointer *shared);
77 : static bool ExecParallelHashTuplePrealloc(HashJoinTable hashtable,
78 : int batchno,
79 : size_t size);
80 : static void ExecParallelHashMergeCounters(HashJoinTable hashtable);
81 : static void ExecParallelHashCloseBatchAccessors(HashJoinTable hashtable);
82 :
83 :
84 : /* ----------------------------------------------------------------
85 : * ExecHash
86 : *
87 : * stub for pro forma compliance
88 : * ----------------------------------------------------------------
89 : */
90 : static TupleTableSlot *
91 0 : ExecHash(PlanState *pstate)
92 : {
93 0 : elog(ERROR, "Hash node does not support ExecProcNode call convention");
94 : return NULL;
95 : }
96 :
97 : /* ----------------------------------------------------------------
98 : * MultiExecHash
99 : *
100 : * build hash table for hashjoin, doing partitioning if more
101 : * than one batch is required.
102 : * ----------------------------------------------------------------
103 : */
104 : Node *
105 19280 : MultiExecHash(HashState *node)
106 : {
107 : /* must provide our own instrumentation support */
108 19280 : if (node->ps.instrument)
109 306 : InstrStartNode(node->ps.instrument);
110 :
111 19280 : if (node->parallel_state != NULL)
112 398 : MultiExecParallelHash(node);
113 : else
114 18882 : MultiExecPrivateHash(node);
115 :
116 : /* must provide our own instrumentation support */
117 19280 : if (node->ps.instrument)
118 306 : InstrStopNode(node->ps.instrument, node->hashtable->partialTuples);
119 :
120 : /*
121 : * We do not return the hash table directly because it's not a subtype of
122 : * Node, and so would violate the MultiExecProcNode API. Instead, our
123 : * parent Hashjoin node is expected to know how to fish it out of our node
124 : * state. Ugly but not really worth cleaning up, since Hashjoin knows
125 : * quite a bit more about Hash besides that.
126 : */
127 19280 : return NULL;
128 : }
129 :
130 : /* ----------------------------------------------------------------
131 : * MultiExecPrivateHash
132 : *
133 : * parallel-oblivious version, building a backend-private
134 : * hash table and (if necessary) batch files.
135 : * ----------------------------------------------------------------
136 : */
137 : static void
138 18882 : MultiExecPrivateHash(HashState *node)
139 : {
140 : PlanState *outerNode;
141 : HashJoinTable hashtable;
142 : TupleTableSlot *slot;
143 : ExprContext *econtext;
144 :
145 : /*
146 : * get state info from node
147 : */
148 18882 : outerNode = outerPlanState(node);
149 18882 : hashtable = node->hashtable;
150 :
151 : /*
152 : * set expression context
153 : */
154 18882 : econtext = node->ps.ps_ExprContext;
155 :
156 : /*
157 : * Get all tuples from the node below the Hash node and insert into the
158 : * hash table (or temp files).
159 : */
160 : for (;;)
161 7585222 : {
162 : bool isnull;
163 : Datum hashdatum;
164 :
165 7604104 : slot = ExecProcNode(outerNode);
166 7604104 : if (TupIsNull(slot))
167 : break;
168 : /* We have to compute the hash value */
169 7585222 : econtext->ecxt_outertuple = slot;
170 :
171 7585222 : ResetExprContext(econtext);
172 :
173 7585222 : hashdatum = ExecEvalExprSwitchContext(node->hash_expr, econtext,
174 : &isnull);
175 :
176 7585222 : if (!isnull)
177 : {
178 7585210 : uint32 hashvalue = DatumGetUInt32(hashdatum);
179 : int bucketNumber;
180 :
181 7585210 : bucketNumber = ExecHashGetSkewBucket(hashtable, hashvalue);
182 7585210 : if (bucketNumber != INVALID_SKEW_BUCKET_NO)
183 : {
184 : /* It's a skew tuple, so put it into that hash table */
185 588 : ExecHashSkewTableInsert(hashtable, slot, hashvalue,
186 : bucketNumber);
187 588 : hashtable->skewTuples += 1;
188 : }
189 : else
190 : {
191 : /* Not subject to skew optimization, so insert normally */
192 7584622 : ExecHashTableInsert(hashtable, slot, hashvalue);
193 : }
194 7585210 : hashtable->totalTuples += 1;
195 : }
196 : }
197 :
198 : /* resize the hash table if needed (NTUP_PER_BUCKET exceeded) */
199 18882 : if (hashtable->nbuckets != hashtable->nbuckets_optimal)
200 72 : ExecHashIncreaseNumBuckets(hashtable);
201 :
202 : /* Account for the buckets in spaceUsed (reported in EXPLAIN ANALYZE) */
203 18882 : hashtable->spaceUsed += hashtable->nbuckets * sizeof(HashJoinTuple);
204 18882 : if (hashtable->spaceUsed > hashtable->spacePeak)
205 18852 : hashtable->spacePeak = hashtable->spaceUsed;
206 :
207 18882 : hashtable->partialTuples = hashtable->totalTuples;
208 18882 : }
209 :
210 : /* ----------------------------------------------------------------
211 : * MultiExecParallelHash
212 : *
213 : * parallel-aware version, building a shared hash table and
214 : * (if necessary) batch files using the combined effort of
215 : * a set of co-operating backends.
216 : * ----------------------------------------------------------------
217 : */
218 : static void
219 398 : MultiExecParallelHash(HashState *node)
220 : {
221 : ParallelHashJoinState *pstate;
222 : PlanState *outerNode;
223 : HashJoinTable hashtable;
224 : TupleTableSlot *slot;
225 : ExprContext *econtext;
226 : uint32 hashvalue;
227 : Barrier *build_barrier;
228 : int i;
229 :
230 : /*
231 : * get state info from node
232 : */
233 398 : outerNode = outerPlanState(node);
234 398 : hashtable = node->hashtable;
235 :
236 : /*
237 : * set expression context
238 : */
239 398 : econtext = node->ps.ps_ExprContext;
240 :
241 : /*
242 : * Synchronize the parallel hash table build. At this stage we know that
243 : * the shared hash table has been or is being set up by
244 : * ExecHashTableCreate(), but we don't know if our peers have returned
245 : * from there or are here in MultiExecParallelHash(), and if so how far
246 : * through they are. To find out, we check the build_barrier phase then
247 : * and jump to the right step in the build algorithm.
248 : */
249 398 : pstate = hashtable->parallel_state;
250 398 : build_barrier = &pstate->build_barrier;
251 : Assert(BarrierPhase(build_barrier) >= PHJ_BUILD_ALLOCATE);
252 398 : switch (BarrierPhase(build_barrier))
253 : {
254 168 : case PHJ_BUILD_ALLOCATE:
255 :
256 : /*
257 : * Either I just allocated the initial hash table in
258 : * ExecHashTableCreate(), or someone else is doing that. Either
259 : * way, wait for everyone to arrive here so we can proceed.
260 : */
261 168 : BarrierArriveAndWait(build_barrier, WAIT_EVENT_HASH_BUILD_ALLOCATE);
262 : /* Fall through. */
263 :
264 244 : case PHJ_BUILD_HASH_INNER:
265 :
266 : /*
267 : * It's time to begin hashing, or if we just arrived here then
268 : * hashing is already underway, so join in that effort. While
269 : * hashing we have to be prepared to help increase the number of
270 : * batches or buckets at any time, and if we arrived here when
271 : * that was already underway we'll have to help complete that work
272 : * immediately so that it's safe to access batches and buckets
273 : * below.
274 : */
275 244 : if (PHJ_GROW_BATCHES_PHASE(BarrierAttach(&pstate->grow_batches_barrier)) !=
276 : PHJ_GROW_BATCHES_ELECT)
277 0 : ExecParallelHashIncreaseNumBatches(hashtable);
278 244 : if (PHJ_GROW_BUCKETS_PHASE(BarrierAttach(&pstate->grow_buckets_barrier)) !=
279 : PHJ_GROW_BUCKETS_ELECT)
280 0 : ExecParallelHashIncreaseNumBuckets(hashtable);
281 244 : ExecParallelHashEnsureBatchAccessors(hashtable);
282 244 : ExecParallelHashTableSetCurrentBatch(hashtable, 0);
283 : for (;;)
284 2160098 : {
285 : bool isnull;
286 :
287 2160342 : slot = ExecProcNode(outerNode);
288 2160342 : if (TupIsNull(slot))
289 : break;
290 2160098 : econtext->ecxt_outertuple = slot;
291 :
292 2160098 : ResetExprContext(econtext);
293 :
294 2160098 : hashvalue = DatumGetUInt32(ExecEvalExprSwitchContext(node->hash_expr,
295 : econtext,
296 : &isnull));
297 :
298 2160098 : if (!isnull)
299 2160098 : ExecParallelHashTableInsert(hashtable, slot, hashvalue);
300 2160098 : hashtable->partialTuples++;
301 : }
302 :
303 : /*
304 : * Make sure that any tuples we wrote to disk are visible to
305 : * others before anyone tries to load them.
306 : */
307 1040 : for (i = 0; i < hashtable->nbatch; ++i)
308 796 : sts_end_write(hashtable->batches[i].inner_tuples);
309 :
310 : /*
311 : * Update shared counters. We need an accurate total tuple count
312 : * to control the empty table optimization.
313 : */
314 244 : ExecParallelHashMergeCounters(hashtable);
315 :
316 244 : BarrierDetach(&pstate->grow_buckets_barrier);
317 244 : BarrierDetach(&pstate->grow_batches_barrier);
318 :
319 : /*
320 : * Wait for everyone to finish building and flushing files and
321 : * counters.
322 : */
323 244 : if (BarrierArriveAndWait(build_barrier,
324 : WAIT_EVENT_HASH_BUILD_HASH_INNER))
325 : {
326 : /*
327 : * Elect one backend to disable any further growth. Batches
328 : * are now fixed. While building them we made sure they'd fit
329 : * in our memory budget when we load them back in later (or we
330 : * tried to do that and gave up because we detected extreme
331 : * skew).
332 : */
333 168 : pstate->growth = PHJ_GROWTH_DISABLED;
334 : }
335 : }
336 :
337 : /*
338 : * We're not yet attached to a batch. We all agree on the dimensions and
339 : * number of inner tuples (for the empty table optimization).
340 : */
341 398 : hashtable->curbatch = -1;
342 398 : hashtable->nbuckets = pstate->nbuckets;
343 398 : hashtable->log2_nbuckets = my_log2(hashtable->nbuckets);
344 398 : hashtable->totalTuples = pstate->total_tuples;
345 :
346 : /*
347 : * Unless we're completely done and the batch state has been freed, make
348 : * sure we have accessors.
349 : */
350 398 : if (BarrierPhase(build_barrier) < PHJ_BUILD_FREE)
351 396 : ExecParallelHashEnsureBatchAccessors(hashtable);
352 :
353 : /*
354 : * The next synchronization point is in ExecHashJoin's HJ_BUILD_HASHTABLE
355 : * case, which will bring the build phase to PHJ_BUILD_RUN (if it isn't
356 : * there already).
357 : */
358 : Assert(BarrierPhase(build_barrier) == PHJ_BUILD_HASH_OUTER ||
359 : BarrierPhase(build_barrier) == PHJ_BUILD_RUN ||
360 : BarrierPhase(build_barrier) == PHJ_BUILD_FREE);
361 398 : }
362 :
363 : /* ----------------------------------------------------------------
364 : * ExecInitHash
365 : *
366 : * Init routine for Hash node
367 : * ----------------------------------------------------------------
368 : */
369 : HashState *
370 30248 : ExecInitHash(Hash *node, EState *estate, int eflags)
371 : {
372 : HashState *hashstate;
373 :
374 : /* check for unsupported flags */
375 : Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
376 :
377 : /*
378 : * create state structure
379 : */
380 30248 : hashstate = makeNode(HashState);
381 30248 : hashstate->ps.plan = (Plan *) node;
382 30248 : hashstate->ps.state = estate;
383 30248 : hashstate->ps.ExecProcNode = ExecHash;
384 : /* delay building hashtable until ExecHashTableCreate() in executor run */
385 30248 : hashstate->hashtable = NULL;
386 :
387 : /*
388 : * Miscellaneous initialization
389 : *
390 : * create expression context for node
391 : */
392 30248 : ExecAssignExprContext(estate, &hashstate->ps);
393 :
394 : /*
395 : * initialize child nodes
396 : */
397 30248 : outerPlanState(hashstate) = ExecInitNode(outerPlan(node), estate, eflags);
398 :
399 : /*
400 : * initialize our result slot and type. No need to build projection
401 : * because this node doesn't do projections.
402 : */
403 30248 : ExecInitResultTupleSlotTL(&hashstate->ps, &TTSOpsMinimalTuple);
404 30248 : hashstate->ps.ps_ProjInfo = NULL;
405 :
406 : Assert(node->plan.qual == NIL);
407 :
408 : /*
409 : * Delay initialization of hash_expr until ExecInitHashJoin(). We cannot
410 : * build the ExprState here as we don't yet know the join type we're going
411 : * to be hashing values for and we need to know that before calling
412 : * ExecBuildHash32Expr as the keep_nulls parameter depends on the join
413 : * type.
414 : */
415 30248 : hashstate->hash_expr = NULL;
416 :
417 30248 : return hashstate;
418 : }
419 :
420 : /* ---------------------------------------------------------------
421 : * ExecEndHash
422 : *
423 : * clean up routine for Hash node
424 : * ----------------------------------------------------------------
425 : */
426 : void
427 30136 : ExecEndHash(HashState *node)
428 : {
429 : PlanState *outerPlan;
430 :
431 : /*
432 : * shut down the subplan
433 : */
434 30136 : outerPlan = outerPlanState(node);
435 30136 : ExecEndNode(outerPlan);
436 30136 : }
437 :
438 :
439 : /* ----------------------------------------------------------------
440 : * ExecHashTableCreate
441 : *
442 : * create an empty hashtable data structure for hashjoin.
443 : * ----------------------------------------------------------------
444 : */
445 : HashJoinTable
446 19280 : ExecHashTableCreate(HashState *state)
447 : {
448 : Hash *node;
449 : HashJoinTable hashtable;
450 : Plan *outerNode;
451 : size_t space_allowed;
452 : int nbuckets;
453 : int nbatch;
454 : double rows;
455 : int num_skew_mcvs;
456 : int log2_nbuckets;
457 : MemoryContext oldcxt;
458 :
459 : /*
460 : * Get information about the size of the relation to be hashed (it's the
461 : * "outer" subtree of this node, but the inner relation of the hashjoin).
462 : * Compute the appropriate size of the hash table.
463 : */
464 19280 : node = (Hash *) state->ps.plan;
465 19280 : outerNode = outerPlan(node);
466 :
467 : /*
468 : * If this is shared hash table with a partial plan, then we can't use
469 : * outerNode->plan_rows to estimate its size. We need an estimate of the
470 : * total number of rows across all copies of the partial plan.
471 : */
472 19280 : rows = node->plan.parallel_aware ? node->rows_total : outerNode->plan_rows;
473 :
474 18882 : ExecChooseHashTableSize(rows, outerNode->plan_width,
475 19280 : OidIsValid(node->skewTable),
476 19280 : state->parallel_state != NULL,
477 19280 : state->parallel_state != NULL ?
478 398 : state->parallel_state->nparticipants - 1 : 0,
479 : &space_allowed,
480 : &nbuckets, &nbatch, &num_skew_mcvs);
481 :
482 : /* nbuckets must be a power of 2 */
483 19280 : log2_nbuckets = my_log2(nbuckets);
484 : Assert(nbuckets == (1 << log2_nbuckets));
485 :
486 : /*
487 : * Initialize the hash table control block.
488 : *
489 : * The hashtable control block is just palloc'd from the executor's
490 : * per-query memory context. Everything else should be kept inside the
491 : * subsidiary hashCxt, batchCxt or spillCxt.
492 : */
493 19280 : hashtable = palloc_object(HashJoinTableData);
494 19280 : hashtable->nbuckets = nbuckets;
495 19280 : hashtable->nbuckets_original = nbuckets;
496 19280 : hashtable->nbuckets_optimal = nbuckets;
497 19280 : hashtable->log2_nbuckets = log2_nbuckets;
498 19280 : hashtable->log2_nbuckets_optimal = log2_nbuckets;
499 19280 : hashtable->buckets.unshared = NULL;
500 19280 : hashtable->skewEnabled = false;
501 19280 : hashtable->skewBucket = NULL;
502 19280 : hashtable->skewBucketLen = 0;
503 19280 : hashtable->nSkewBuckets = 0;
504 19280 : hashtable->skewBucketNums = NULL;
505 19280 : hashtable->nbatch = nbatch;
506 19280 : hashtable->curbatch = 0;
507 19280 : hashtable->nbatch_original = nbatch;
508 19280 : hashtable->nbatch_outstart = nbatch;
509 19280 : hashtable->growEnabled = true;
510 19280 : hashtable->totalTuples = 0;
511 19280 : hashtable->partialTuples = 0;
512 19280 : hashtable->skewTuples = 0;
513 19280 : hashtable->innerBatchFile = NULL;
514 19280 : hashtable->outerBatchFile = NULL;
515 19280 : hashtable->spaceUsed = 0;
516 19280 : hashtable->spacePeak = 0;
517 19280 : hashtable->spaceAllowed = space_allowed;
518 19280 : hashtable->spaceUsedSkew = 0;
519 19280 : hashtable->spaceAllowedSkew =
520 19280 : hashtable->spaceAllowed * SKEW_HASH_MEM_PERCENT / 100;
521 19280 : hashtable->chunks = NULL;
522 19280 : hashtable->current_chunk = NULL;
523 19280 : hashtable->parallel_state = state->parallel_state;
524 19280 : hashtable->area = state->ps.state->es_query_dsa;
525 19280 : hashtable->batches = NULL;
526 :
527 : #ifdef HJDEBUG
528 : printf("Hashjoin %p: initial nbatch = %d, nbuckets = %d\n",
529 : hashtable, nbatch, nbuckets);
530 : #endif
531 :
532 : /*
533 : * Create temporary memory contexts in which to keep the hashtable working
534 : * storage. See notes in executor/hashjoin.h.
535 : */
536 19280 : hashtable->hashCxt = AllocSetContextCreate(CurrentMemoryContext,
537 : "HashTableContext",
538 : ALLOCSET_DEFAULT_SIZES);
539 :
540 19280 : hashtable->batchCxt = AllocSetContextCreate(hashtable->hashCxt,
541 : "HashBatchContext",
542 : ALLOCSET_DEFAULT_SIZES);
543 :
544 19280 : hashtable->spillCxt = AllocSetContextCreate(hashtable->hashCxt,
545 : "HashSpillContext",
546 : ALLOCSET_DEFAULT_SIZES);
547 :
548 : /* Allocate data that will live for the life of the hashjoin */
549 :
550 19280 : oldcxt = MemoryContextSwitchTo(hashtable->hashCxt);
551 :
552 19280 : if (nbatch > 1 && hashtable->parallel_state == NULL)
553 : {
554 : MemoryContext oldctx;
555 :
556 : /*
557 : * allocate and initialize the file arrays in hashCxt (not needed for
558 : * parallel case which uses shared tuplestores instead of raw files)
559 : */
560 106 : oldctx = MemoryContextSwitchTo(hashtable->spillCxt);
561 :
562 106 : hashtable->innerBatchFile = palloc0_array(BufFile *, nbatch);
563 106 : hashtable->outerBatchFile = palloc0_array(BufFile *, nbatch);
564 :
565 106 : MemoryContextSwitchTo(oldctx);
566 :
567 : /* The files will not be opened until needed... */
568 : /* ... but make sure we have temp tablespaces established for them */
569 106 : PrepareTempTablespaces();
570 : }
571 :
572 19280 : MemoryContextSwitchTo(oldcxt);
573 :
574 19280 : if (hashtable->parallel_state)
575 : {
576 398 : ParallelHashJoinState *pstate = hashtable->parallel_state;
577 : Barrier *build_barrier;
578 :
579 : /*
580 : * Attach to the build barrier. The corresponding detach operation is
581 : * in ExecHashTableDetach. Note that we won't attach to the
582 : * batch_barrier for batch 0 yet. We'll attach later and start it out
583 : * in PHJ_BATCH_PROBE phase, because batch 0 is allocated up front and
584 : * then loaded while hashing (the standard hybrid hash join
585 : * algorithm), and we'll coordinate that using build_barrier.
586 : */
587 398 : build_barrier = &pstate->build_barrier;
588 398 : BarrierAttach(build_barrier);
589 :
590 : /*
591 : * So far we have no idea whether there are any other participants,
592 : * and if so, what phase they are working on. The only thing we care
593 : * about at this point is whether someone has already created the
594 : * SharedHashJoinBatch objects and the hash table for batch 0. One
595 : * backend will be elected to do that now if necessary.
596 : */
597 566 : if (BarrierPhase(build_barrier) == PHJ_BUILD_ELECT &&
598 168 : BarrierArriveAndWait(build_barrier, WAIT_EVENT_HASH_BUILD_ELECT))
599 : {
600 168 : pstate->nbatch = nbatch;
601 168 : pstate->space_allowed = space_allowed;
602 168 : pstate->growth = PHJ_GROWTH_OK;
603 :
604 : /* Set up the shared state for coordinating batches. */
605 168 : ExecParallelHashJoinSetUpBatches(hashtable, nbatch);
606 :
607 : /*
608 : * Allocate batch 0's hash table up front so we can load it
609 : * directly while hashing.
610 : */
611 168 : pstate->nbuckets = nbuckets;
612 168 : ExecParallelHashTableAlloc(hashtable, 0);
613 : }
614 :
615 : /*
616 : * The next Parallel Hash synchronization point is in
617 : * MultiExecParallelHash(), which will progress it all the way to
618 : * PHJ_BUILD_RUN. The caller must not return control from this
619 : * executor node between now and then.
620 : */
621 : }
622 : else
623 : {
624 : /*
625 : * Prepare context for the first-scan space allocations; allocate the
626 : * hashbucket array therein, and set each bucket "empty".
627 : */
628 18882 : MemoryContextSwitchTo(hashtable->batchCxt);
629 :
630 18882 : hashtable->buckets.unshared = palloc0_array(HashJoinTuple, nbuckets);
631 :
632 : /*
633 : * Set up for skew optimization, if possible and there's a need for
634 : * more than one batch. (In a one-batch join, there's no point in
635 : * it.)
636 : */
637 18882 : if (nbatch > 1)
638 106 : ExecHashBuildSkewHash(state, hashtable, node, num_skew_mcvs);
639 :
640 18882 : MemoryContextSwitchTo(oldcxt);
641 : }
642 :
643 19280 : return hashtable;
644 : }
645 :
646 :
647 : /*
648 : * Compute appropriate size for hashtable given the estimated size of the
649 : * relation to be hashed (number of rows and average row width).
650 : *
651 : * This is exported so that the planner's costsize.c can use it.
652 : */
653 :
654 : /* Target bucket loading (tuples per bucket) */
655 : #define NTUP_PER_BUCKET 1
656 :
657 : void
658 1056642 : ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
659 : bool try_combined_hash_mem,
660 : int parallel_workers,
661 : size_t *space_allowed,
662 : int *numbuckets,
663 : int *numbatches,
664 : int *num_skew_mcvs)
665 : {
666 : int tupsize;
667 : double inner_rel_bytes;
668 : size_t hash_table_bytes;
669 : size_t bucket_bytes;
670 : size_t max_pointers;
671 1056642 : int nbatch = 1;
672 : int nbuckets;
673 : double dbuckets;
674 :
675 : /* Force a plausible relation size if no info */
676 1056642 : if (ntuples <= 0.0)
677 150 : ntuples = 1000.0;
678 :
679 : /*
680 : * Estimate tupsize based on footprint of tuple in hashtable... note this
681 : * does not allow for any palloc overhead. The manipulations of spaceUsed
682 : * don't count palloc overhead either.
683 : */
684 1056642 : tupsize = HJTUPLE_OVERHEAD +
685 1056642 : MAXALIGN(SizeofMinimalTupleHeader) +
686 1056642 : MAXALIGN(tupwidth);
687 1056642 : inner_rel_bytes = ntuples * tupsize;
688 :
689 : /*
690 : * Compute in-memory hashtable size limit from GUCs.
691 : */
692 1056642 : hash_table_bytes = get_hash_memory_limit();
693 :
694 : /*
695 : * Parallel Hash tries to use the combined hash_mem of all workers to
696 : * avoid the need to batch. If that won't work, it falls back to hash_mem
697 : * per worker and tries to process batches in parallel.
698 : */
699 1056642 : if (try_combined_hash_mem)
700 : {
701 : /* Careful, this could overflow size_t */
702 : double newlimit;
703 :
704 12902 : newlimit = (double) hash_table_bytes * (double) (parallel_workers + 1);
705 12902 : newlimit = Min(newlimit, (double) SIZE_MAX);
706 12902 : hash_table_bytes = (size_t) newlimit;
707 : }
708 :
709 1056642 : *space_allowed = hash_table_bytes;
710 :
711 : /*
712 : * If skew optimization is possible, estimate the number of skew buckets
713 : * that will fit in the memory allowed, and decrement the assumed space
714 : * available for the main hash table accordingly.
715 : *
716 : * We make the optimistic assumption that each skew bucket will contain
717 : * one inner-relation tuple. If that turns out to be low, we will recover
718 : * at runtime by reducing the number of skew buckets.
719 : *
720 : * hashtable->skewBucket will have up to 8 times as many HashSkewBucket
721 : * pointers as the number of MCVs we allow, since ExecHashBuildSkewHash
722 : * will round up to the next power of 2 and then multiply by 4 to reduce
723 : * collisions.
724 : */
725 1056642 : if (useskew)
726 : {
727 : size_t bytes_per_mcv;
728 : size_t skew_mcvs;
729 :
730 : /*----------
731 : * Compute number of MCVs we could hold in hash_table_bytes
732 : *
733 : * Divisor is:
734 : * size of a hash tuple +
735 : * worst-case size of skewBucket[] per MCV +
736 : * size of skewBucketNums[] entry +
737 : * size of skew bucket struct itself
738 : *----------
739 : */
740 1052544 : bytes_per_mcv = tupsize +
741 : (8 * sizeof(HashSkewBucket *)) +
742 1052544 : sizeof(int) +
743 : SKEW_BUCKET_OVERHEAD;
744 1052544 : skew_mcvs = hash_table_bytes / bytes_per_mcv;
745 :
746 : /*
747 : * Now scale by SKEW_HASH_MEM_PERCENT (we do it in this order so as
748 : * not to worry about size_t overflow in the multiplication)
749 : */
750 1052544 : skew_mcvs = (skew_mcvs * SKEW_HASH_MEM_PERCENT) / 100;
751 :
752 : /* Now clamp to integer range */
753 1052544 : skew_mcvs = Min(skew_mcvs, INT_MAX);
754 :
755 1052544 : *num_skew_mcvs = (int) skew_mcvs;
756 :
757 : /* Reduce hash_table_bytes by the amount needed for the skew table */
758 1052544 : if (skew_mcvs > 0)
759 1052544 : hash_table_bytes -= skew_mcvs * bytes_per_mcv;
760 : }
761 : else
762 4098 : *num_skew_mcvs = 0;
763 :
764 : /*
765 : * Set nbuckets to achieve an average bucket load of NTUP_PER_BUCKET when
766 : * memory is filled, assuming a single batch; but limit the value so that
767 : * the pointer arrays we'll try to allocate do not exceed hash_table_bytes
768 : * nor MaxAllocSize.
769 : *
770 : * Note that both nbuckets and nbatch must be powers of 2 to make
771 : * ExecHashGetBucketAndBatch fast.
772 : */
773 1056642 : max_pointers = hash_table_bytes / sizeof(HashJoinTuple);
774 1056642 : max_pointers = Min(max_pointers, MaxAllocSize / sizeof(HashJoinTuple));
775 : /* If max_pointers isn't a power of 2, must round it down to one */
776 1056642 : max_pointers = pg_prevpower2_size_t(max_pointers);
777 :
778 : /* Also ensure we avoid integer overflow in nbatch and nbuckets */
779 : /* (this step is redundant given the current value of MaxAllocSize) */
780 1056642 : max_pointers = Min(max_pointers, INT_MAX / 2 + 1);
781 :
782 1056642 : dbuckets = ceil(ntuples / NTUP_PER_BUCKET);
783 1056642 : dbuckets = Min(dbuckets, max_pointers);
784 1056642 : nbuckets = (int) dbuckets;
785 : /* don't let nbuckets be really small, though ... */
786 1056642 : nbuckets = Max(nbuckets, 1024);
787 : /* ... and force it to be a power of 2. */
788 1056642 : nbuckets = pg_nextpower2_32(nbuckets);
789 :
790 : /*
791 : * If there's not enough space to store the projected number of tuples and
792 : * the required bucket headers, we will need multiple batches.
793 : */
794 1056642 : bucket_bytes = sizeof(HashJoinTuple) * nbuckets;
795 1056642 : if (inner_rel_bytes + bucket_bytes > hash_table_bytes)
796 : {
797 : /* We'll need multiple batches */
798 : size_t sbuckets;
799 : double dbatch;
800 : int minbatch;
801 : size_t bucket_size;
802 :
803 : /*
804 : * If Parallel Hash with combined hash_mem would still need multiple
805 : * batches, we'll have to fall back to regular hash_mem budget.
806 : */
807 5118 : if (try_combined_hash_mem)
808 : {
809 246 : ExecChooseHashTableSize(ntuples, tupwidth, useskew,
810 : false, parallel_workers,
811 : space_allowed,
812 : numbuckets,
813 : numbatches,
814 : num_skew_mcvs);
815 246 : return;
816 : }
817 :
818 : /*
819 : * Estimate the number of buckets we'll want to have when hash_mem is
820 : * entirely full. Each bucket will contain a bucket pointer plus
821 : * NTUP_PER_BUCKET tuples, whose projected size already includes
822 : * overhead for the hash code, pointer to the next tuple, etc.
823 : */
824 4872 : bucket_size = (tupsize * NTUP_PER_BUCKET + sizeof(HashJoinTuple));
825 4872 : if (hash_table_bytes <= bucket_size)
826 0 : sbuckets = 1; /* avoid pg_nextpower2_size_t(0) */
827 : else
828 4872 : sbuckets = pg_nextpower2_size_t(hash_table_bytes / bucket_size);
829 4872 : sbuckets = Min(sbuckets, max_pointers);
830 4872 : nbuckets = (int) sbuckets;
831 4872 : nbuckets = pg_nextpower2_32(nbuckets);
832 4872 : bucket_bytes = nbuckets * sizeof(HashJoinTuple);
833 :
834 : /*
835 : * Buckets are simple pointers to hashjoin tuples, while tupsize
836 : * includes the pointer, hash code, and MinimalTupleData. So buckets
837 : * should never really exceed 25% of hash_mem (even for
838 : * NTUP_PER_BUCKET=1); except maybe for hash_mem values that are not
839 : * 2^N bytes, where we might get more because of doubling. So let's
840 : * look for 50% here.
841 : */
842 : Assert(bucket_bytes <= hash_table_bytes / 2);
843 :
844 : /* Calculate required number of batches. */
845 4872 : dbatch = ceil(inner_rel_bytes / (hash_table_bytes - bucket_bytes));
846 4872 : dbatch = Min(dbatch, max_pointers);
847 4872 : minbatch = (int) dbatch;
848 4872 : nbatch = pg_nextpower2_32(Max(2, minbatch));
849 : }
850 :
851 : /*
852 : * Optimize the total amount of memory consumed by the hash node.
853 : *
854 : * The nbatch calculation above focuses on the size of the in-memory hash
855 : * table, assuming no per-batch overhead. Now adjust the number of batches
856 : * and the size of the hash table to minimize total memory consumed by the
857 : * hash node.
858 : *
859 : * Each batch file has a BLCKSZ buffer, and we may need two files per
860 : * batch (inner and outer side). So with enough batches this can be
861 : * significantly more memory than the hashtable itself.
862 : *
863 : * The total memory usage may be expressed by this formula:
864 : *
865 : * (inner_rel_bytes / nbatch) + (2 * nbatch * BLCKSZ) <= hash_table_bytes
866 : *
867 : * where (inner_rel_bytes / nbatch) is the size of the in-memory hash
868 : * table and (2 * nbatch * BLCKSZ) is the amount of memory used by file
869 : * buffers. But for sufficiently large values of inner_rel_bytes value
870 : * there may not be a nbatch value that would make both parts fit into
871 : * hash_table_bytes.
872 : *
873 : * In this case we can't enforce the memory limit - we're going to exceed
874 : * it. We can however minimize the impact and use as little memory as
875 : * possible. (We haven't really enforced it before either, as we simply
876 : * ignored the batch files.)
877 : *
878 : * The formula for total memory usage says that given an inner relation of
879 : * size inner_rel_bytes, we may divide it into an arbitrary number of
880 : * batches. This determines both the size of the in-memory hash table and
881 : * the amount of memory needed for batch files. These two terms work in
882 : * opposite ways - when one decreases, the other increases.
883 : *
884 : * For low nbatch values, the hash table takes most of the memory, but at
885 : * some point the batch files start to dominate. If you combine these two
886 : * terms, the memory consumption (for a fixed size of the inner relation)
887 : * has a u-shape, with a minimum at some nbatch value.
888 : *
889 : * Our goal is to find this nbatch value, minimizing the memory usage. We
890 : * calculate the memory usage with half the batches (i.e. nbatch/2), and
891 : * if it's lower than the current memory usage we know it's better to use
892 : * fewer batches. We repeat this until reducing the number of batches does
893 : * not reduce the memory usage - we found the optimum. We know the optimum
894 : * exists, thanks to the u-shape.
895 : *
896 : * We only want to do this when exceeding the memory limit, not every
897 : * time. The goal is not to minimize memory usage in every case, but to
898 : * minimize the memory usage when we can't stay within the memory limit.
899 : *
900 : * For this reason we only consider reducing the number of batches. We
901 : * could try the opposite direction too, but that would save memory only
902 : * when most of the memory is used by the hash table. And the hash table
903 : * was used for the initial sizing, so we shouldn't be exceeding the
904 : * memory limit too much. We might save memory by using more batches, but
905 : * it would result in spilling more batch files, which does not seem like
906 : * a great trade off.
907 : *
908 : * While growing the hashtable, we also adjust the number of buckets, to
909 : * not have more than one tuple per bucket (load factor 1). We can only do
910 : * this during the initial sizing - once we start building the hash,
911 : * nbucket is fixed.
912 : */
913 1057336 : while (nbatch > 0)
914 : {
915 : /* how much memory are we using with current nbatch value */
916 1057336 : size_t current_space = hash_table_bytes + (2 * nbatch * BLCKSZ);
917 :
918 : /* how much memory would we use with half the batches */
919 1057336 : size_t new_space = hash_table_bytes * 2 + (nbatch * BLCKSZ);
920 :
921 : /* If the memory usage would not decrease, we found the optimum. */
922 1057336 : if (current_space < new_space)
923 1056396 : break;
924 :
925 : /*
926 : * It's better to use half the batches, so do that and adjust the
927 : * nbucket in the opposite direction, and double the allowance.
928 : */
929 940 : nbatch /= 2;
930 940 : nbuckets *= 2;
931 :
932 940 : *space_allowed = (*space_allowed) * 2;
933 : }
934 :
935 : Assert(nbuckets > 0);
936 : Assert(nbatch > 0);
937 :
938 1056396 : *numbuckets = nbuckets;
939 1056396 : *numbatches = nbatch;
940 : }
941 :
942 :
943 : /* ----------------------------------------------------------------
944 : * ExecHashTableDestroy
945 : *
946 : * destroy a hash table
947 : * ----------------------------------------------------------------
948 : */
949 : void
950 19170 : ExecHashTableDestroy(HashJoinTable hashtable)
951 : {
952 : int i;
953 :
954 : /*
955 : * Make sure all the temp files are closed. We skip batch 0, since it
956 : * can't have any temp files (and the arrays might not even exist if
957 : * nbatch is only 1). Parallel hash joins don't use these files.
958 : */
959 19170 : if (hashtable->innerBatchFile != NULL)
960 : {
961 836 : for (i = 1; i < hashtable->nbatch; i++)
962 : {
963 670 : if (hashtable->innerBatchFile[i])
964 0 : BufFileClose(hashtable->innerBatchFile[i]);
965 670 : if (hashtable->outerBatchFile[i])
966 0 : BufFileClose(hashtable->outerBatchFile[i]);
967 : }
968 : }
969 :
970 : /* Release working memory (batchCxt is a child, so it goes away too) */
971 19170 : MemoryContextDelete(hashtable->hashCxt);
972 :
973 : /* And drop the control block */
974 19170 : pfree(hashtable);
975 19170 : }
976 :
977 : /*
978 : * Consider adjusting the allowed hash table size, depending on the number
979 : * of batches, to minimize the overall memory usage (for both the hashtable
980 : * and batch files).
981 : *
982 : * We're adjusting the size of the hash table, not the (optimal) number of
983 : * buckets. We can't change that once we start building the hash, due to how
984 : * ExecHashGetBucketAndBatch calculates batchno/bucketno from the hash. This
985 : * means the load factor may not be optimal, but we're in damage control so
986 : * we accept slower lookups. It's still much better than batch explosion.
987 : *
988 : * Returns true if we chose to increase the batch size (and thus we don't
989 : * need to add batches), and false if we should increase nbatch.
990 : */
991 : static bool
992 126 : ExecHashIncreaseBatchSize(HashJoinTable hashtable)
993 : {
994 : /*
995 : * How much additional memory would doubling nbatch use? Each batch may
996 : * require two buffered files (inner/outer), with a BLCKSZ buffer.
997 : */
998 126 : size_t batchSpace = (hashtable->nbatch * 2 * BLCKSZ);
999 :
1000 : /*
1001 : * Compare the new space needed for doubling nbatch and for enlarging the
1002 : * in-memory hash table. If doubling the hash table needs less memory,
1003 : * just do that. Otherwise, continue with doubling the nbatch.
1004 : *
1005 : * We're either doubling spaceAllowed of batchSpace, so which of those
1006 : * increases the memory usage the least is the same as comparing the
1007 : * values directly.
1008 : */
1009 126 : if (hashtable->spaceAllowed <= batchSpace)
1010 : {
1011 0 : hashtable->spaceAllowed *= 2;
1012 0 : return true;
1013 : }
1014 :
1015 126 : return false;
1016 : }
1017 :
1018 : /*
1019 : * ExecHashIncreaseNumBatches
1020 : * increase the original number of batches in order to reduce
1021 : * current memory consumption
1022 : */
1023 : static void
1024 518226 : ExecHashIncreaseNumBatches(HashJoinTable hashtable)
1025 : {
1026 518226 : int oldnbatch = hashtable->nbatch;
1027 518226 : int curbatch = hashtable->curbatch;
1028 : int nbatch;
1029 : long ninmemory;
1030 : long nfreed;
1031 : HashMemoryChunk oldchunks;
1032 :
1033 : /* do nothing if we've decided to shut off growth */
1034 518226 : if (!hashtable->growEnabled)
1035 518100 : return;
1036 :
1037 : /* safety check to avoid overflow */
1038 126 : if (oldnbatch > Min(INT_MAX / 2, MaxAllocSize / (sizeof(void *) * 2)))
1039 0 : return;
1040 :
1041 : /* consider increasing size of the in-memory hash table instead */
1042 126 : if (ExecHashIncreaseBatchSize(hashtable))
1043 0 : return;
1044 :
1045 126 : nbatch = oldnbatch * 2;
1046 : Assert(nbatch > 1);
1047 :
1048 : #ifdef HJDEBUG
1049 : printf("Hashjoin %p: increasing nbatch to %d because space = %zu\n",
1050 : hashtable, nbatch, hashtable->spaceUsed);
1051 : #endif
1052 :
1053 126 : if (hashtable->innerBatchFile == NULL)
1054 : {
1055 60 : MemoryContext oldcxt = MemoryContextSwitchTo(hashtable->spillCxt);
1056 :
1057 : /* we had no file arrays before */
1058 60 : hashtable->innerBatchFile = palloc0_array(BufFile *, nbatch);
1059 60 : hashtable->outerBatchFile = palloc0_array(BufFile *, nbatch);
1060 :
1061 60 : MemoryContextSwitchTo(oldcxt);
1062 :
1063 : /* time to establish the temp tablespaces, too */
1064 60 : PrepareTempTablespaces();
1065 : }
1066 : else
1067 : {
1068 : /* enlarge arrays and zero out added entries */
1069 66 : hashtable->innerBatchFile = repalloc0_array(hashtable->innerBatchFile, BufFile *, oldnbatch, nbatch);
1070 66 : hashtable->outerBatchFile = repalloc0_array(hashtable->outerBatchFile, BufFile *, oldnbatch, nbatch);
1071 : }
1072 :
1073 126 : hashtable->nbatch = nbatch;
1074 :
1075 : /*
1076 : * Scan through the existing hash table entries and dump out any that are
1077 : * no longer of the current batch.
1078 : */
1079 126 : ninmemory = nfreed = 0;
1080 :
1081 : /* If know we need to resize nbuckets, we can do it while rebatching. */
1082 126 : if (hashtable->nbuckets_optimal != hashtable->nbuckets)
1083 : {
1084 : /* we never decrease the number of buckets */
1085 : Assert(hashtable->nbuckets_optimal > hashtable->nbuckets);
1086 :
1087 60 : hashtable->nbuckets = hashtable->nbuckets_optimal;
1088 60 : hashtable->log2_nbuckets = hashtable->log2_nbuckets_optimal;
1089 :
1090 60 : hashtable->buckets.unshared =
1091 60 : repalloc_array(hashtable->buckets.unshared,
1092 : HashJoinTuple, hashtable->nbuckets);
1093 : }
1094 :
1095 : /*
1096 : * We will scan through the chunks directly, so that we can reset the
1097 : * buckets now and not have to keep track which tuples in the buckets have
1098 : * already been processed. We will free the old chunks as we go.
1099 : */
1100 126 : memset(hashtable->buckets.unshared, 0,
1101 126 : sizeof(HashJoinTuple) * hashtable->nbuckets);
1102 126 : oldchunks = hashtable->chunks;
1103 126 : hashtable->chunks = NULL;
1104 :
1105 : /* so, let's scan through the old chunks, and all tuples in each chunk */
1106 630 : while (oldchunks != NULL)
1107 : {
1108 504 : HashMemoryChunk nextchunk = oldchunks->next.unshared;
1109 :
1110 : /* position within the buffer (up to oldchunks->used) */
1111 504 : size_t idx = 0;
1112 :
1113 : /* process all tuples stored in this chunk (and then free it) */
1114 344178 : while (idx < oldchunks->used)
1115 : {
1116 343674 : HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(oldchunks) + idx);
1117 343674 : MinimalTuple tuple = HJTUPLE_MINTUPLE(hashTuple);
1118 343674 : int hashTupleSize = (HJTUPLE_OVERHEAD + tuple->t_len);
1119 : int bucketno;
1120 : int batchno;
1121 :
1122 343674 : ninmemory++;
1123 343674 : ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
1124 : &bucketno, &batchno);
1125 :
1126 343674 : if (batchno == curbatch)
1127 : {
1128 : /* keep tuple in memory - copy it into the new chunk */
1129 : HashJoinTuple copyTuple;
1130 :
1131 129894 : copyTuple = (HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
1132 129894 : memcpy(copyTuple, hashTuple, hashTupleSize);
1133 :
1134 : /* and add it back to the appropriate bucket */
1135 129894 : copyTuple->next.unshared = hashtable->buckets.unshared[bucketno];
1136 129894 : hashtable->buckets.unshared[bucketno] = copyTuple;
1137 : }
1138 : else
1139 : {
1140 : /* dump it out */
1141 : Assert(batchno > curbatch);
1142 213780 : ExecHashJoinSaveTuple(HJTUPLE_MINTUPLE(hashTuple),
1143 : hashTuple->hashvalue,
1144 213780 : &hashtable->innerBatchFile[batchno],
1145 : hashtable);
1146 :
1147 213780 : hashtable->spaceUsed -= hashTupleSize;
1148 213780 : nfreed++;
1149 : }
1150 :
1151 : /* next tuple in this chunk */
1152 343674 : idx += MAXALIGN(hashTupleSize);
1153 :
1154 : /* allow this loop to be cancellable */
1155 343674 : CHECK_FOR_INTERRUPTS();
1156 : }
1157 :
1158 : /* we're done with this chunk - free it and proceed to the next one */
1159 504 : pfree(oldchunks);
1160 504 : oldchunks = nextchunk;
1161 : }
1162 :
1163 : #ifdef HJDEBUG
1164 : printf("Hashjoin %p: freed %ld of %ld tuples, space now %zu\n",
1165 : hashtable, nfreed, ninmemory, hashtable->spaceUsed);
1166 : #endif
1167 :
1168 : /*
1169 : * If we dumped out either all or none of the tuples in the table, disable
1170 : * further expansion of nbatch. This situation implies that we have
1171 : * enough tuples of identical hashvalues to overflow spaceAllowed.
1172 : * Increasing nbatch will not fix it since there's no way to subdivide the
1173 : * group any more finely. We have to just gut it out and hope the server
1174 : * has enough RAM.
1175 : */
1176 126 : if (nfreed == 0 || nfreed == ninmemory)
1177 : {
1178 30 : hashtable->growEnabled = false;
1179 : #ifdef HJDEBUG
1180 : printf("Hashjoin %p: disabling further increase of nbatch\n",
1181 : hashtable);
1182 : #endif
1183 : }
1184 : }
1185 :
1186 : /*
1187 : * ExecParallelHashIncreaseNumBatches
1188 : * Every participant attached to grow_batches_barrier must run this
1189 : * function when it observes growth == PHJ_GROWTH_NEED_MORE_BATCHES.
1190 : */
1191 : static void
1192 48 : ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
1193 : {
1194 48 : ParallelHashJoinState *pstate = hashtable->parallel_state;
1195 :
1196 : Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASH_INNER);
1197 :
1198 : /*
1199 : * It's unlikely, but we need to be prepared for new participants to show
1200 : * up while we're in the middle of this operation so we need to switch on
1201 : * barrier phase here.
1202 : */
1203 48 : switch (PHJ_GROW_BATCHES_PHASE(BarrierPhase(&pstate->grow_batches_barrier)))
1204 : {
1205 48 : case PHJ_GROW_BATCHES_ELECT:
1206 :
1207 : /*
1208 : * Elect one participant to prepare to grow the number of batches.
1209 : * This involves reallocating or resetting the buckets of batch 0
1210 : * in preparation for all participants to begin repartitioning the
1211 : * tuples.
1212 : */
1213 48 : if (BarrierArriveAndWait(&pstate->grow_batches_barrier,
1214 : WAIT_EVENT_HASH_GROW_BATCHES_ELECT))
1215 : {
1216 : dsa_pointer_atomic *buckets;
1217 : ParallelHashJoinBatch *old_batch0;
1218 : int new_nbatch;
1219 : int i;
1220 :
1221 : /* Move the old batch out of the way. */
1222 48 : old_batch0 = hashtable->batches[0].shared;
1223 48 : pstate->old_batches = pstate->batches;
1224 48 : pstate->old_nbatch = hashtable->nbatch;
1225 48 : pstate->batches = InvalidDsaPointer;
1226 :
1227 : /* Free this backend's old accessors. */
1228 48 : ExecParallelHashCloseBatchAccessors(hashtable);
1229 :
1230 : /* Figure out how many batches to use. */
1231 48 : if (hashtable->nbatch == 1)
1232 : {
1233 : /*
1234 : * We are going from single-batch to multi-batch. We need
1235 : * to switch from one large combined memory budget to the
1236 : * regular hash_mem budget.
1237 : */
1238 36 : pstate->space_allowed = get_hash_memory_limit();
1239 :
1240 : /*
1241 : * The combined hash_mem of all participants wasn't
1242 : * enough. Therefore one batch per participant would be
1243 : * approximately equivalent and would probably also be
1244 : * insufficient. So try two batches per participant,
1245 : * rounded up to a power of two.
1246 : */
1247 36 : new_nbatch = pg_nextpower2_32(pstate->nparticipants * 2);
1248 : }
1249 : else
1250 : {
1251 : /*
1252 : * We were already multi-batched. Try doubling the number
1253 : * of batches.
1254 : */
1255 12 : new_nbatch = hashtable->nbatch * 2;
1256 : }
1257 :
1258 : /* Allocate new larger generation of batches. */
1259 : Assert(hashtable->nbatch == pstate->nbatch);
1260 48 : ExecParallelHashJoinSetUpBatches(hashtable, new_nbatch);
1261 : Assert(hashtable->nbatch == pstate->nbatch);
1262 :
1263 : /* Replace or recycle batch 0's bucket array. */
1264 48 : if (pstate->old_nbatch == 1)
1265 : {
1266 : double dtuples;
1267 : double dbuckets;
1268 : int new_nbuckets;
1269 : uint32 max_buckets;
1270 :
1271 : /*
1272 : * We probably also need a smaller bucket array. How many
1273 : * tuples do we expect per batch, assuming we have only
1274 : * half of them so far? Normally we don't need to change
1275 : * the bucket array's size, because the size of each batch
1276 : * stays the same as we add more batches, but in this
1277 : * special case we move from a large batch to many smaller
1278 : * batches and it would be wasteful to keep the large
1279 : * array.
1280 : */
1281 36 : dtuples = (old_batch0->ntuples * 2.0) / new_nbatch;
1282 :
1283 : /*
1284 : * We need to calculate the maximum number of buckets to
1285 : * stay within the MaxAllocSize boundary. Round the
1286 : * maximum number to the previous power of 2 given that
1287 : * later we round the number to the next power of 2.
1288 : */
1289 36 : max_buckets = pg_prevpower2_32((uint32)
1290 : (MaxAllocSize / sizeof(dsa_pointer_atomic)));
1291 36 : dbuckets = ceil(dtuples / NTUP_PER_BUCKET);
1292 36 : dbuckets = Min(dbuckets, max_buckets);
1293 36 : new_nbuckets = (int) dbuckets;
1294 36 : new_nbuckets = Max(new_nbuckets, 1024);
1295 36 : new_nbuckets = pg_nextpower2_32(new_nbuckets);
1296 36 : dsa_free(hashtable->area, old_batch0->buckets);
1297 72 : hashtable->batches[0].shared->buckets =
1298 36 : dsa_allocate(hashtable->area,
1299 : sizeof(dsa_pointer_atomic) * new_nbuckets);
1300 : buckets = (dsa_pointer_atomic *)
1301 36 : dsa_get_address(hashtable->area,
1302 36 : hashtable->batches[0].shared->buckets);
1303 110628 : for (i = 0; i < new_nbuckets; ++i)
1304 110592 : dsa_pointer_atomic_init(&buckets[i], InvalidDsaPointer);
1305 36 : pstate->nbuckets = new_nbuckets;
1306 : }
1307 : else
1308 : {
1309 : /* Recycle the existing bucket array. */
1310 12 : hashtable->batches[0].shared->buckets = old_batch0->buckets;
1311 : buckets = (dsa_pointer_atomic *)
1312 12 : dsa_get_address(hashtable->area, old_batch0->buckets);
1313 49164 : for (i = 0; i < hashtable->nbuckets; ++i)
1314 49152 : dsa_pointer_atomic_write(&buckets[i], InvalidDsaPointer);
1315 : }
1316 :
1317 : /* Move all chunks to the work queue for parallel processing. */
1318 48 : pstate->chunk_work_queue = old_batch0->chunks;
1319 :
1320 : /* Disable further growth temporarily while we're growing. */
1321 48 : pstate->growth = PHJ_GROWTH_DISABLED;
1322 : }
1323 : else
1324 : {
1325 : /* All other participants just flush their tuples to disk. */
1326 0 : ExecParallelHashCloseBatchAccessors(hashtable);
1327 : }
1328 : /* Fall through. */
1329 :
1330 : case PHJ_GROW_BATCHES_REALLOCATE:
1331 : /* Wait for the above to be finished. */
1332 48 : BarrierArriveAndWait(&pstate->grow_batches_barrier,
1333 : WAIT_EVENT_HASH_GROW_BATCHES_REALLOCATE);
1334 : /* Fall through. */
1335 :
1336 48 : case PHJ_GROW_BATCHES_REPARTITION:
1337 : /* Make sure that we have the current dimensions and buckets. */
1338 48 : ExecParallelHashEnsureBatchAccessors(hashtable);
1339 48 : ExecParallelHashTableSetCurrentBatch(hashtable, 0);
1340 : /* Then partition, flush counters. */
1341 48 : ExecParallelHashRepartitionFirst(hashtable);
1342 48 : ExecParallelHashRepartitionRest(hashtable);
1343 48 : ExecParallelHashMergeCounters(hashtable);
1344 : /* Wait for the above to be finished. */
1345 48 : BarrierArriveAndWait(&pstate->grow_batches_barrier,
1346 : WAIT_EVENT_HASH_GROW_BATCHES_REPARTITION);
1347 : /* Fall through. */
1348 :
1349 48 : case PHJ_GROW_BATCHES_DECIDE:
1350 :
1351 : /*
1352 : * Elect one participant to clean up and decide whether further
1353 : * repartitioning is needed, or should be disabled because it's
1354 : * not helping.
1355 : */
1356 48 : if (BarrierArriveAndWait(&pstate->grow_batches_barrier,
1357 : WAIT_EVENT_HASH_GROW_BATCHES_DECIDE))
1358 : {
1359 : ParallelHashJoinBatch *old_batches;
1360 48 : bool space_exhausted = false;
1361 48 : bool extreme_skew_detected = false;
1362 :
1363 : /* Make sure that we have the current dimensions and buckets. */
1364 48 : ExecParallelHashEnsureBatchAccessors(hashtable);
1365 48 : ExecParallelHashTableSetCurrentBatch(hashtable, 0);
1366 :
1367 48 : old_batches = dsa_get_address(hashtable->area, pstate->old_batches);
1368 :
1369 : /* Are any of the new generation of batches exhausted? */
1370 336 : for (int i = 0; i < hashtable->nbatch; ++i)
1371 : {
1372 : ParallelHashJoinBatch *batch;
1373 : ParallelHashJoinBatch *old_batch;
1374 : int parent;
1375 :
1376 288 : batch = hashtable->batches[i].shared;
1377 288 : if (batch->space_exhausted ||
1378 288 : batch->estimated_size > pstate->space_allowed)
1379 24 : space_exhausted = true;
1380 :
1381 288 : parent = i % pstate->old_nbatch;
1382 288 : old_batch = NthParallelHashJoinBatch(old_batches, parent);
1383 288 : if (old_batch->space_exhausted ||
1384 72 : batch->estimated_size > pstate->space_allowed)
1385 : {
1386 : /*
1387 : * Did this batch receive ALL of the tuples from its
1388 : * parent batch? That would indicate that further
1389 : * repartitioning isn't going to help (the hash values
1390 : * are probably all the same).
1391 : */
1392 216 : if (batch->ntuples == hashtable->batches[parent].shared->old_ntuples)
1393 24 : extreme_skew_detected = true;
1394 : }
1395 : }
1396 :
1397 : /* Don't keep growing if it's not helping or we'd overflow. */
1398 48 : if (extreme_skew_detected || hashtable->nbatch >= INT_MAX / 2)
1399 24 : pstate->growth = PHJ_GROWTH_DISABLED;
1400 24 : else if (space_exhausted)
1401 0 : pstate->growth = PHJ_GROWTH_NEED_MORE_BATCHES;
1402 : else
1403 24 : pstate->growth = PHJ_GROWTH_OK;
1404 :
1405 : /* Free the old batches in shared memory. */
1406 48 : dsa_free(hashtable->area, pstate->old_batches);
1407 48 : pstate->old_batches = InvalidDsaPointer;
1408 : }
1409 : /* Fall through. */
1410 :
1411 : case PHJ_GROW_BATCHES_FINISH:
1412 : /* Wait for the above to complete. */
1413 48 : BarrierArriveAndWait(&pstate->grow_batches_barrier,
1414 : WAIT_EVENT_HASH_GROW_BATCHES_FINISH);
1415 : }
1416 48 : }
1417 :
1418 : /*
1419 : * Repartition the tuples currently loaded into memory for inner batch 0
1420 : * because the number of batches has been increased. Some tuples are retained
1421 : * in memory and some are written out to a later batch.
1422 : */
1423 : static void
1424 48 : ExecParallelHashRepartitionFirst(HashJoinTable hashtable)
1425 : {
1426 : dsa_pointer chunk_shared;
1427 : HashMemoryChunk chunk;
1428 :
1429 : Assert(hashtable->nbatch == hashtable->parallel_state->nbatch);
1430 :
1431 336 : while ((chunk = ExecParallelHashPopChunkQueue(hashtable, &chunk_shared)))
1432 : {
1433 288 : size_t idx = 0;
1434 :
1435 : /* Repartition all tuples in this chunk. */
1436 220860 : while (idx < chunk->used)
1437 : {
1438 220572 : HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
1439 220572 : MinimalTuple tuple = HJTUPLE_MINTUPLE(hashTuple);
1440 : HashJoinTuple copyTuple;
1441 : dsa_pointer shared;
1442 : int bucketno;
1443 : int batchno;
1444 :
1445 220572 : ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
1446 : &bucketno, &batchno);
1447 :
1448 : Assert(batchno < hashtable->nbatch);
1449 220572 : if (batchno == 0)
1450 : {
1451 : /* It still belongs in batch 0. Copy to a new chunk. */
1452 : copyTuple =
1453 50748 : ExecParallelHashTupleAlloc(hashtable,
1454 50748 : HJTUPLE_OVERHEAD + tuple->t_len,
1455 : &shared);
1456 50748 : copyTuple->hashvalue = hashTuple->hashvalue;
1457 50748 : memcpy(HJTUPLE_MINTUPLE(copyTuple), tuple, tuple->t_len);
1458 50748 : ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
1459 : copyTuple, shared);
1460 : }
1461 : else
1462 : {
1463 169824 : size_t tuple_size =
1464 169824 : MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
1465 :
1466 : /* It belongs in a later batch. */
1467 169824 : hashtable->batches[batchno].estimated_size += tuple_size;
1468 169824 : sts_puttuple(hashtable->batches[batchno].inner_tuples,
1469 169824 : &hashTuple->hashvalue, tuple);
1470 : }
1471 :
1472 : /* Count this tuple. */
1473 220572 : ++hashtable->batches[0].old_ntuples;
1474 220572 : ++hashtable->batches[batchno].ntuples;
1475 :
1476 220572 : idx += MAXALIGN(HJTUPLE_OVERHEAD +
1477 : HJTUPLE_MINTUPLE(hashTuple)->t_len);
1478 : }
1479 :
1480 : /* Free this chunk. */
1481 288 : dsa_free(hashtable->area, chunk_shared);
1482 :
1483 288 : CHECK_FOR_INTERRUPTS();
1484 : }
1485 48 : }
1486 :
1487 : /*
1488 : * Help repartition inner batches 1..n.
1489 : */
1490 : static void
1491 48 : ExecParallelHashRepartitionRest(HashJoinTable hashtable)
1492 : {
1493 48 : ParallelHashJoinState *pstate = hashtable->parallel_state;
1494 48 : int old_nbatch = pstate->old_nbatch;
1495 : SharedTuplestoreAccessor **old_inner_tuples;
1496 : ParallelHashJoinBatch *old_batches;
1497 : int i;
1498 :
1499 : /* Get our hands on the previous generation of batches. */
1500 : old_batches = (ParallelHashJoinBatch *)
1501 48 : dsa_get_address(hashtable->area, pstate->old_batches);
1502 48 : old_inner_tuples = palloc0_array(SharedTuplestoreAccessor *, old_nbatch);
1503 84 : for (i = 1; i < old_nbatch; ++i)
1504 : {
1505 36 : ParallelHashJoinBatch *shared =
1506 36 : NthParallelHashJoinBatch(old_batches, i);
1507 :
1508 36 : old_inner_tuples[i] = sts_attach(ParallelHashJoinBatchInner(shared),
1509 : ParallelWorkerNumber + 1,
1510 : &pstate->fileset);
1511 : }
1512 :
1513 : /* Join in the effort to repartition them. */
1514 84 : for (i = 1; i < old_nbatch; ++i)
1515 : {
1516 : MinimalTuple tuple;
1517 : uint32 hashvalue;
1518 :
1519 : /* Scan one partition from the previous generation. */
1520 36 : sts_begin_parallel_scan(old_inner_tuples[i]);
1521 161400 : while ((tuple = sts_parallel_scan_next(old_inner_tuples[i], &hashvalue)))
1522 : {
1523 161364 : size_t tuple_size = MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
1524 : int bucketno;
1525 : int batchno;
1526 :
1527 : /* Decide which partition it goes to in the new generation. */
1528 161364 : ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno,
1529 : &batchno);
1530 :
1531 161364 : hashtable->batches[batchno].estimated_size += tuple_size;
1532 161364 : ++hashtable->batches[batchno].ntuples;
1533 161364 : ++hashtable->batches[i].old_ntuples;
1534 :
1535 : /* Store the tuple its new batch. */
1536 161364 : sts_puttuple(hashtable->batches[batchno].inner_tuples,
1537 : &hashvalue, tuple);
1538 :
1539 161364 : CHECK_FOR_INTERRUPTS();
1540 : }
1541 36 : sts_end_parallel_scan(old_inner_tuples[i]);
1542 : }
1543 :
1544 48 : pfree(old_inner_tuples);
1545 48 : }
1546 :
1547 : /*
1548 : * Transfer the backend-local per-batch counters to the shared totals.
1549 : */
1550 : static void
1551 292 : ExecParallelHashMergeCounters(HashJoinTable hashtable)
1552 : {
1553 292 : ParallelHashJoinState *pstate = hashtable->parallel_state;
1554 : int i;
1555 :
1556 292 : LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
1557 292 : pstate->total_tuples = 0;
1558 1376 : for (i = 0; i < hashtable->nbatch; ++i)
1559 : {
1560 1084 : ParallelHashJoinBatchAccessor *batch = &hashtable->batches[i];
1561 :
1562 1084 : batch->shared->size += batch->size;
1563 1084 : batch->shared->estimated_size += batch->estimated_size;
1564 1084 : batch->shared->ntuples += batch->ntuples;
1565 1084 : batch->shared->old_ntuples += batch->old_ntuples;
1566 1084 : batch->size = 0;
1567 1084 : batch->estimated_size = 0;
1568 1084 : batch->ntuples = 0;
1569 1084 : batch->old_ntuples = 0;
1570 1084 : pstate->total_tuples += batch->shared->ntuples;
1571 : }
1572 292 : LWLockRelease(&pstate->lock);
1573 292 : }
1574 :
1575 : /*
1576 : * ExecHashIncreaseNumBuckets
1577 : * increase the original number of buckets in order to reduce
1578 : * number of tuples per bucket
1579 : */
1580 : static void
1581 72 : ExecHashIncreaseNumBuckets(HashJoinTable hashtable)
1582 : {
1583 : HashMemoryChunk chunk;
1584 :
1585 : /* do nothing if not an increase (it's called increase for a reason) */
1586 72 : if (hashtable->nbuckets >= hashtable->nbuckets_optimal)
1587 0 : return;
1588 :
1589 : #ifdef HJDEBUG
1590 : printf("Hashjoin %p: increasing nbuckets %d => %d\n",
1591 : hashtable, hashtable->nbuckets, hashtable->nbuckets_optimal);
1592 : #endif
1593 :
1594 72 : hashtable->nbuckets = hashtable->nbuckets_optimal;
1595 72 : hashtable->log2_nbuckets = hashtable->log2_nbuckets_optimal;
1596 :
1597 : Assert(hashtable->nbuckets > 1);
1598 : Assert(hashtable->nbuckets <= (INT_MAX / 2));
1599 : Assert(hashtable->nbuckets == (1 << hashtable->log2_nbuckets));
1600 :
1601 : /*
1602 : * Just reallocate the proper number of buckets - we don't need to walk
1603 : * through them - we can walk the dense-allocated chunks (just like in
1604 : * ExecHashIncreaseNumBatches, but without all the copying into new
1605 : * chunks)
1606 : */
1607 72 : hashtable->buckets.unshared =
1608 72 : repalloc_array(hashtable->buckets.unshared,
1609 : HashJoinTuple, hashtable->nbuckets);
1610 :
1611 72 : memset(hashtable->buckets.unshared, 0,
1612 72 : hashtable->nbuckets * sizeof(HashJoinTuple));
1613 :
1614 : /* scan through all tuples in all chunks to rebuild the hash table */
1615 996 : for (chunk = hashtable->chunks; chunk != NULL; chunk = chunk->next.unshared)
1616 : {
1617 : /* process all tuples stored in this chunk */
1618 924 : size_t idx = 0;
1619 :
1620 705042 : while (idx < chunk->used)
1621 : {
1622 704118 : HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
1623 : int bucketno;
1624 : int batchno;
1625 :
1626 704118 : ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
1627 : &bucketno, &batchno);
1628 :
1629 : /* add the tuple to the proper bucket */
1630 704118 : hashTuple->next.unshared = hashtable->buckets.unshared[bucketno];
1631 704118 : hashtable->buckets.unshared[bucketno] = hashTuple;
1632 :
1633 : /* advance index past the tuple */
1634 704118 : idx += MAXALIGN(HJTUPLE_OVERHEAD +
1635 : HJTUPLE_MINTUPLE(hashTuple)->t_len);
1636 : }
1637 :
1638 : /* allow this loop to be cancellable */
1639 924 : CHECK_FOR_INTERRUPTS();
1640 : }
1641 : }
1642 :
1643 : static void
1644 142 : ExecParallelHashIncreaseNumBuckets(HashJoinTable hashtable)
1645 : {
1646 142 : ParallelHashJoinState *pstate = hashtable->parallel_state;
1647 : int i;
1648 : HashMemoryChunk chunk;
1649 : dsa_pointer chunk_s;
1650 :
1651 : Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASH_INNER);
1652 :
1653 : /*
1654 : * It's unlikely, but we need to be prepared for new participants to show
1655 : * up while we're in the middle of this operation so we need to switch on
1656 : * barrier phase here.
1657 : */
1658 142 : switch (PHJ_GROW_BUCKETS_PHASE(BarrierPhase(&pstate->grow_buckets_barrier)))
1659 : {
1660 142 : case PHJ_GROW_BUCKETS_ELECT:
1661 : /* Elect one participant to prepare to increase nbuckets. */
1662 142 : if (BarrierArriveAndWait(&pstate->grow_buckets_barrier,
1663 : WAIT_EVENT_HASH_GROW_BUCKETS_ELECT))
1664 : {
1665 : size_t size;
1666 : dsa_pointer_atomic *buckets;
1667 :
1668 : /* Double the size of the bucket array. */
1669 108 : pstate->nbuckets *= 2;
1670 108 : size = pstate->nbuckets * sizeof(dsa_pointer_atomic);
1671 108 : hashtable->batches[0].shared->size += size / 2;
1672 108 : dsa_free(hashtable->area, hashtable->batches[0].shared->buckets);
1673 216 : hashtable->batches[0].shared->buckets =
1674 108 : dsa_allocate(hashtable->area, size);
1675 : buckets = (dsa_pointer_atomic *)
1676 108 : dsa_get_address(hashtable->area,
1677 108 : hashtable->batches[0].shared->buckets);
1678 933996 : for (i = 0; i < pstate->nbuckets; ++i)
1679 933888 : dsa_pointer_atomic_init(&buckets[i], InvalidDsaPointer);
1680 :
1681 : /* Put the chunk list onto the work queue. */
1682 108 : pstate->chunk_work_queue = hashtable->batches[0].shared->chunks;
1683 :
1684 : /* Clear the flag. */
1685 108 : pstate->growth = PHJ_GROWTH_OK;
1686 : }
1687 : /* Fall through. */
1688 :
1689 : case PHJ_GROW_BUCKETS_REALLOCATE:
1690 : /* Wait for the above to complete. */
1691 142 : BarrierArriveAndWait(&pstate->grow_buckets_barrier,
1692 : WAIT_EVENT_HASH_GROW_BUCKETS_REALLOCATE);
1693 : /* Fall through. */
1694 :
1695 142 : case PHJ_GROW_BUCKETS_REINSERT:
1696 : /* Reinsert all tuples into the hash table. */
1697 142 : ExecParallelHashEnsureBatchAccessors(hashtable);
1698 142 : ExecParallelHashTableSetCurrentBatch(hashtable, 0);
1699 806 : while ((chunk = ExecParallelHashPopChunkQueue(hashtable, &chunk_s)))
1700 : {
1701 664 : size_t idx = 0;
1702 :
1703 543756 : while (idx < chunk->used)
1704 : {
1705 543092 : HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
1706 543092 : dsa_pointer shared = chunk_s + HASH_CHUNK_HEADER_SIZE + idx;
1707 : int bucketno;
1708 : int batchno;
1709 :
1710 543092 : ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
1711 : &bucketno, &batchno);
1712 : Assert(batchno == 0);
1713 :
1714 : /* add the tuple to the proper bucket */
1715 543092 : ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
1716 : hashTuple, shared);
1717 :
1718 : /* advance index past the tuple */
1719 543092 : idx += MAXALIGN(HJTUPLE_OVERHEAD +
1720 : HJTUPLE_MINTUPLE(hashTuple)->t_len);
1721 : }
1722 :
1723 : /* allow this loop to be cancellable */
1724 664 : CHECK_FOR_INTERRUPTS();
1725 : }
1726 142 : BarrierArriveAndWait(&pstate->grow_buckets_barrier,
1727 : WAIT_EVENT_HASH_GROW_BUCKETS_REINSERT);
1728 : }
1729 142 : }
1730 :
1731 : /*
1732 : * ExecHashTableInsert
1733 : * insert a tuple into the hash table depending on the hash value
1734 : * it may just go to a temp file for later batches
1735 : *
1736 : * Note: the passed TupleTableSlot may contain a regular, minimal, or virtual
1737 : * tuple; the minimal case in particular is certain to happen while reloading
1738 : * tuples from batch files. We could save some cycles in the regular-tuple
1739 : * case by not forcing the slot contents into minimal form; not clear if it's
1740 : * worth the messiness required.
1741 : */
1742 : void
1743 9937924 : ExecHashTableInsert(HashJoinTable hashtable,
1744 : TupleTableSlot *slot,
1745 : uint32 hashvalue)
1746 : {
1747 : bool shouldFree;
1748 9937924 : MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
1749 : int bucketno;
1750 : int batchno;
1751 :
1752 9937924 : ExecHashGetBucketAndBatch(hashtable, hashvalue,
1753 : &bucketno, &batchno);
1754 :
1755 : /*
1756 : * decide whether to put the tuple in the hash table or a temp file
1757 : */
1758 9937924 : if (batchno == hashtable->curbatch)
1759 : {
1760 : /*
1761 : * put the tuple in hash table
1762 : */
1763 : HashJoinTuple hashTuple;
1764 : int hashTupleSize;
1765 7798612 : double ntuples = (hashtable->totalTuples - hashtable->skewTuples);
1766 :
1767 : /* Create the HashJoinTuple */
1768 7798612 : hashTupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
1769 7798612 : hashTuple = (HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
1770 :
1771 7798612 : hashTuple->hashvalue = hashvalue;
1772 7798612 : memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
1773 :
1774 : /*
1775 : * We always reset the tuple-matched flag on insertion. This is okay
1776 : * even when reloading a tuple from a batch file, since the tuple
1777 : * could not possibly have been matched to an outer tuple before it
1778 : * went into the batch file.
1779 : */
1780 7798612 : HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
1781 :
1782 : /* Push it onto the front of the bucket's list */
1783 7798612 : hashTuple->next.unshared = hashtable->buckets.unshared[bucketno];
1784 7798612 : hashtable->buckets.unshared[bucketno] = hashTuple;
1785 :
1786 : /*
1787 : * Increase the (optimal) number of buckets if we just exceeded the
1788 : * NTUP_PER_BUCKET threshold, but only when there's still a single
1789 : * batch.
1790 : */
1791 7798612 : if (hashtable->nbatch == 1 &&
1792 5209070 : ntuples > (hashtable->nbuckets_optimal * NTUP_PER_BUCKET))
1793 : {
1794 : /* Guard against integer overflow and alloc size overflow */
1795 192 : if (hashtable->nbuckets_optimal <= INT_MAX / 2 &&
1796 192 : hashtable->nbuckets_optimal * 2 <= MaxAllocSize / sizeof(HashJoinTuple))
1797 : {
1798 192 : hashtable->nbuckets_optimal *= 2;
1799 192 : hashtable->log2_nbuckets_optimal += 1;
1800 : }
1801 : }
1802 :
1803 : /* Account for space used, and back off if we've used too much */
1804 7798612 : hashtable->spaceUsed += hashTupleSize;
1805 7798612 : if (hashtable->spaceUsed > hashtable->spacePeak)
1806 5992510 : hashtable->spacePeak = hashtable->spaceUsed;
1807 7798612 : if (hashtable->spaceUsed +
1808 7798612 : hashtable->nbuckets_optimal * sizeof(HashJoinTuple)
1809 7798612 : > hashtable->spaceAllowed)
1810 518226 : ExecHashIncreaseNumBatches(hashtable);
1811 : }
1812 : else
1813 : {
1814 : /*
1815 : * put the tuple into a temp file for later batches
1816 : */
1817 : Assert(batchno > hashtable->curbatch);
1818 2139312 : ExecHashJoinSaveTuple(tuple,
1819 : hashvalue,
1820 2139312 : &hashtable->innerBatchFile[batchno],
1821 : hashtable);
1822 : }
1823 :
1824 9937924 : if (shouldFree)
1825 7539890 : heap_free_minimal_tuple(tuple);
1826 9937924 : }
1827 :
1828 : /*
1829 : * ExecParallelHashTableInsert
1830 : * insert a tuple into a shared hash table or shared batch tuplestore
1831 : */
1832 : void
1833 2160098 : ExecParallelHashTableInsert(HashJoinTable hashtable,
1834 : TupleTableSlot *slot,
1835 : uint32 hashvalue)
1836 : {
1837 : bool shouldFree;
1838 2160098 : MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
1839 : dsa_pointer shared;
1840 : int bucketno;
1841 : int batchno;
1842 :
1843 2160444 : retry:
1844 2160444 : ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
1845 :
1846 2160444 : if (batchno == 0)
1847 : {
1848 : HashJoinTuple hashTuple;
1849 :
1850 : /* Try to load it into memory. */
1851 : Assert(BarrierPhase(&hashtable->parallel_state->build_barrier) ==
1852 : PHJ_BUILD_HASH_INNER);
1853 1293828 : hashTuple = ExecParallelHashTupleAlloc(hashtable,
1854 1293828 : HJTUPLE_OVERHEAD + tuple->t_len,
1855 : &shared);
1856 1293828 : if (hashTuple == NULL)
1857 322 : goto retry;
1858 :
1859 : /* Store the hash value in the HashJoinTuple header. */
1860 1293506 : hashTuple->hashvalue = hashvalue;
1861 1293506 : memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
1862 1293506 : HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
1863 :
1864 : /* Push it onto the front of the bucket's list */
1865 1293506 : ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
1866 : hashTuple, shared);
1867 : }
1868 : else
1869 : {
1870 866616 : size_t tuple_size = MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
1871 :
1872 : Assert(batchno > 0);
1873 :
1874 : /* Try to preallocate space in the batch if necessary. */
1875 866616 : if (hashtable->batches[batchno].preallocated < tuple_size)
1876 : {
1877 1406 : if (!ExecParallelHashTuplePrealloc(hashtable, batchno, tuple_size))
1878 24 : goto retry;
1879 : }
1880 :
1881 : Assert(hashtable->batches[batchno].preallocated >= tuple_size);
1882 866592 : hashtable->batches[batchno].preallocated -= tuple_size;
1883 866592 : sts_puttuple(hashtable->batches[batchno].inner_tuples, &hashvalue,
1884 : tuple);
1885 : }
1886 2160098 : ++hashtable->batches[batchno].ntuples;
1887 :
1888 2160098 : if (shouldFree)
1889 2160098 : heap_free_minimal_tuple(tuple);
1890 2160098 : }
1891 :
1892 : /*
1893 : * Insert a tuple into the current hash table. Unlike
1894 : * ExecParallelHashTableInsert, this version is not prepared to send the tuple
1895 : * to other batches or to run out of memory, and should only be called with
1896 : * tuples that belong in the current batch once growth has been disabled.
1897 : */
1898 : void
1899 1036416 : ExecParallelHashTableInsertCurrentBatch(HashJoinTable hashtable,
1900 : TupleTableSlot *slot,
1901 : uint32 hashvalue)
1902 : {
1903 : bool shouldFree;
1904 1036416 : MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
1905 : HashJoinTuple hashTuple;
1906 : dsa_pointer shared;
1907 : int batchno;
1908 : int bucketno;
1909 :
1910 1036416 : ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
1911 : Assert(batchno == hashtable->curbatch);
1912 1036416 : hashTuple = ExecParallelHashTupleAlloc(hashtable,
1913 1036416 : HJTUPLE_OVERHEAD + tuple->t_len,
1914 : &shared);
1915 1036416 : hashTuple->hashvalue = hashvalue;
1916 1036416 : memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
1917 1036416 : HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
1918 1036416 : ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
1919 : hashTuple, shared);
1920 :
1921 1036416 : if (shouldFree)
1922 0 : heap_free_minimal_tuple(tuple);
1923 1036416 : }
1924 :
1925 :
1926 : /*
1927 : * ExecHashGetBucketAndBatch
1928 : * Determine the bucket number and batch number for a hash value
1929 : *
1930 : * Note: on-the-fly increases of nbatch must not change the bucket number
1931 : * for a given hash code (since we don't move tuples to different hash
1932 : * chains), and must only cause the batch number to remain the same or
1933 : * increase. Our algorithm is
1934 : * bucketno = hashvalue MOD nbuckets
1935 : * batchno = ROR(hashvalue, log2_nbuckets) MOD nbatch
1936 : * where nbuckets and nbatch are both expected to be powers of 2, so we can
1937 : * do the computations by shifting and masking. (This assumes that all hash
1938 : * functions are good about randomizing all their output bits, else we are
1939 : * likely to have very skewed bucket or batch occupancy.)
1940 : *
1941 : * nbuckets and log2_nbuckets may change while nbatch == 1 because of dynamic
1942 : * bucket count growth. Once we start batching, the value is fixed and does
1943 : * not change over the course of the join (making it possible to compute batch
1944 : * number the way we do here).
1945 : *
1946 : * nbatch is always a power of 2; we increase it only by doubling it. This
1947 : * effectively adds one more bit to the top of the batchno. In very large
1948 : * joins, we might run out of bits to add, so we do this by rotating the hash
1949 : * value. This causes batchno to steal bits from bucketno when the number of
1950 : * virtual buckets exceeds 2^32. It's better to have longer bucket chains
1951 : * than to lose the ability to divide batches.
1952 : */
1953 : void
1954 33708826 : ExecHashGetBucketAndBatch(HashJoinTable hashtable,
1955 : uint32 hashvalue,
1956 : int *bucketno,
1957 : int *batchno)
1958 : {
1959 33708826 : uint32 nbuckets = (uint32) hashtable->nbuckets;
1960 33708826 : uint32 nbatch = (uint32) hashtable->nbatch;
1961 :
1962 33708826 : if (nbatch > 1)
1963 : {
1964 13177226 : *bucketno = hashvalue & (nbuckets - 1);
1965 13177226 : *batchno = pg_rotate_right32(hashvalue,
1966 13177226 : hashtable->log2_nbuckets) & (nbatch - 1);
1967 : }
1968 : else
1969 : {
1970 20531600 : *bucketno = hashvalue & (nbuckets - 1);
1971 20531600 : *batchno = 0;
1972 : }
1973 33708826 : }
1974 :
1975 : /*
1976 : * ExecScanHashBucket
1977 : * scan a hash bucket for matches to the current outer tuple
1978 : *
1979 : * The current outer tuple must be stored in econtext->ecxt_outertuple.
1980 : *
1981 : * On success, the inner tuple is stored into hjstate->hj_CurTuple and
1982 : * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
1983 : * for the latter.
1984 : */
1985 : bool
1986 18343892 : ExecScanHashBucket(HashJoinState *hjstate,
1987 : ExprContext *econtext)
1988 : {
1989 18343892 : ExprState *hjclauses = hjstate->hashclauses;
1990 18343892 : HashJoinTable hashtable = hjstate->hj_HashTable;
1991 18343892 : HashJoinTuple hashTuple = hjstate->hj_CurTuple;
1992 18343892 : uint32 hashvalue = hjstate->hj_CurHashValue;
1993 :
1994 : /*
1995 : * hj_CurTuple is the address of the tuple last returned from the current
1996 : * bucket, or NULL if it's time to start scanning a new bucket.
1997 : *
1998 : * If the tuple hashed to a skew bucket then scan the skew bucket
1999 : * otherwise scan the standard hashtable bucket.
2000 : */
2001 18343892 : if (hashTuple != NULL)
2002 4495858 : hashTuple = hashTuple->next.unshared;
2003 13848034 : else if (hjstate->hj_CurSkewBucketNo != INVALID_SKEW_BUCKET_NO)
2004 2400 : hashTuple = hashtable->skewBucket[hjstate->hj_CurSkewBucketNo]->tuples;
2005 : else
2006 13845634 : hashTuple = hashtable->buckets.unshared[hjstate->hj_CurBucketNo];
2007 :
2008 22620110 : while (hashTuple != NULL)
2009 : {
2010 12384002 : if (hashTuple->hashvalue == hashvalue)
2011 : {
2012 : TupleTableSlot *inntuple;
2013 :
2014 : /* insert hashtable's tuple into exec slot so ExecQual sees it */
2015 8107796 : inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
2016 : hjstate->hj_HashTupleSlot,
2017 : false); /* do not pfree */
2018 8107796 : econtext->ecxt_innertuple = inntuple;
2019 :
2020 8107796 : if (ExecQualAndReset(hjclauses, econtext))
2021 : {
2022 8107784 : hjstate->hj_CurTuple = hashTuple;
2023 8107784 : return true;
2024 : }
2025 : }
2026 :
2027 4276218 : hashTuple = hashTuple->next.unshared;
2028 : }
2029 :
2030 : /*
2031 : * no match
2032 : */
2033 10236108 : return false;
2034 : }
2035 :
2036 : /*
2037 : * ExecParallelScanHashBucket
2038 : * scan a hash bucket for matches to the current outer tuple
2039 : *
2040 : * The current outer tuple must be stored in econtext->ecxt_outertuple.
2041 : *
2042 : * On success, the inner tuple is stored into hjstate->hj_CurTuple and
2043 : * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
2044 : * for the latter.
2045 : */
2046 : bool
2047 4200054 : ExecParallelScanHashBucket(HashJoinState *hjstate,
2048 : ExprContext *econtext)
2049 : {
2050 4200054 : ExprState *hjclauses = hjstate->hashclauses;
2051 4200054 : HashJoinTable hashtable = hjstate->hj_HashTable;
2052 4200054 : HashJoinTuple hashTuple = hjstate->hj_CurTuple;
2053 4200054 : uint32 hashvalue = hjstate->hj_CurHashValue;
2054 :
2055 : /*
2056 : * hj_CurTuple is the address of the tuple last returned from the current
2057 : * bucket, or NULL if it's time to start scanning a new bucket.
2058 : */
2059 4200054 : if (hashTuple != NULL)
2060 2040024 : hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
2061 : else
2062 2160030 : hashTuple = ExecParallelHashFirstTuple(hashtable,
2063 : hjstate->hj_CurBucketNo);
2064 :
2065 5607210 : while (hashTuple != NULL)
2066 : {
2067 3447180 : if (hashTuple->hashvalue == hashvalue)
2068 : {
2069 : TupleTableSlot *inntuple;
2070 :
2071 : /* insert hashtable's tuple into exec slot so ExecQual sees it */
2072 2040024 : inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
2073 : hjstate->hj_HashTupleSlot,
2074 : false); /* do not pfree */
2075 2040024 : econtext->ecxt_innertuple = inntuple;
2076 :
2077 2040024 : if (ExecQualAndReset(hjclauses, econtext))
2078 : {
2079 2040024 : hjstate->hj_CurTuple = hashTuple;
2080 2040024 : return true;
2081 : }
2082 : }
2083 :
2084 1407156 : hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
2085 : }
2086 :
2087 : /*
2088 : * no match
2089 : */
2090 2160030 : return false;
2091 : }
2092 :
2093 : /*
2094 : * ExecPrepHashTableForUnmatched
2095 : * set up for a series of ExecScanHashTableForUnmatched calls
2096 : */
2097 : void
2098 3752 : ExecPrepHashTableForUnmatched(HashJoinState *hjstate)
2099 : {
2100 : /*----------
2101 : * During this scan we use the HashJoinState fields as follows:
2102 : *
2103 : * hj_CurBucketNo: next regular bucket to scan
2104 : * hj_CurSkewBucketNo: next skew bucket (an index into skewBucketNums)
2105 : * hj_CurTuple: last tuple returned, or NULL to start next bucket
2106 : *----------
2107 : */
2108 3752 : hjstate->hj_CurBucketNo = 0;
2109 3752 : hjstate->hj_CurSkewBucketNo = 0;
2110 3752 : hjstate->hj_CurTuple = NULL;
2111 3752 : }
2112 :
2113 : /*
2114 : * Decide if this process is allowed to run the unmatched scan. If so, the
2115 : * batch barrier is advanced to PHJ_BATCH_SCAN and true is returned.
2116 : * Otherwise the batch is detached and false is returned.
2117 : */
2118 : bool
2119 70 : ExecParallelPrepHashTableForUnmatched(HashJoinState *hjstate)
2120 : {
2121 70 : HashJoinTable hashtable = hjstate->hj_HashTable;
2122 70 : int curbatch = hashtable->curbatch;
2123 70 : ParallelHashJoinBatch *batch = hashtable->batches[curbatch].shared;
2124 :
2125 : Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE);
2126 :
2127 : /*
2128 : * It would not be deadlock-free to wait on the batch barrier, because it
2129 : * is in PHJ_BATCH_PROBE phase, and thus processes attached to it have
2130 : * already emitted tuples. Therefore, we'll hold a wait-free election:
2131 : * only one process can continue to the next phase, and all others detach
2132 : * from this batch. They can still go any work on other batches, if there
2133 : * are any.
2134 : */
2135 70 : if (!BarrierArriveAndDetachExceptLast(&batch->batch_barrier))
2136 : {
2137 : /* This process considers the batch to be done. */
2138 4 : hashtable->batches[hashtable->curbatch].done = true;
2139 :
2140 : /* Make sure any temporary files are closed. */
2141 4 : sts_end_parallel_scan(hashtable->batches[curbatch].inner_tuples);
2142 4 : sts_end_parallel_scan(hashtable->batches[curbatch].outer_tuples);
2143 :
2144 : /*
2145 : * Track largest batch we've seen, which would normally happen in
2146 : * ExecHashTableDetachBatch().
2147 : */
2148 4 : hashtable->spacePeak =
2149 4 : Max(hashtable->spacePeak,
2150 : batch->size + sizeof(dsa_pointer_atomic) * hashtable->nbuckets);
2151 4 : hashtable->curbatch = -1;
2152 4 : return false;
2153 : }
2154 :
2155 : /* Now we are alone with this batch. */
2156 : Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_SCAN);
2157 :
2158 : /*
2159 : * Has another process decided to give up early and command all processes
2160 : * to skip the unmatched scan?
2161 : */
2162 66 : if (batch->skip_unmatched)
2163 : {
2164 0 : hashtable->batches[hashtable->curbatch].done = true;
2165 0 : ExecHashTableDetachBatch(hashtable);
2166 0 : return false;
2167 : }
2168 :
2169 : /* Now prepare the process local state, just as for non-parallel join. */
2170 66 : ExecPrepHashTableForUnmatched(hjstate);
2171 :
2172 66 : return true;
2173 : }
2174 :
2175 : /*
2176 : * ExecScanHashTableForUnmatched
2177 : * scan the hash table for unmatched inner tuples
2178 : *
2179 : * On success, the inner tuple is stored into hjstate->hj_CurTuple and
2180 : * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
2181 : * for the latter.
2182 : */
2183 : bool
2184 360990 : ExecScanHashTableForUnmatched(HashJoinState *hjstate, ExprContext *econtext)
2185 : {
2186 360990 : HashJoinTable hashtable = hjstate->hj_HashTable;
2187 360990 : HashJoinTuple hashTuple = hjstate->hj_CurTuple;
2188 :
2189 : for (;;)
2190 : {
2191 : /*
2192 : * hj_CurTuple is the address of the tuple last returned from the
2193 : * current bucket, or NULL if it's time to start scanning a new
2194 : * bucket.
2195 : */
2196 5229570 : if (hashTuple != NULL)
2197 357304 : hashTuple = hashTuple->next.unshared;
2198 4872266 : else if (hjstate->hj_CurBucketNo < hashtable->nbuckets)
2199 : {
2200 4868592 : hashTuple = hashtable->buckets.unshared[hjstate->hj_CurBucketNo];
2201 4868592 : hjstate->hj_CurBucketNo++;
2202 : }
2203 3674 : else if (hjstate->hj_CurSkewBucketNo < hashtable->nSkewBuckets)
2204 : {
2205 0 : int j = hashtable->skewBucketNums[hjstate->hj_CurSkewBucketNo];
2206 :
2207 0 : hashTuple = hashtable->skewBucket[j]->tuples;
2208 0 : hjstate->hj_CurSkewBucketNo++;
2209 : }
2210 : else
2211 3674 : break; /* finished all buckets */
2212 :
2213 5627584 : while (hashTuple != NULL)
2214 : {
2215 759004 : if (!HeapTupleHeaderHasMatch(HJTUPLE_MINTUPLE(hashTuple)))
2216 : {
2217 : TupleTableSlot *inntuple;
2218 :
2219 : /* insert hashtable's tuple into exec slot */
2220 357316 : inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
2221 : hjstate->hj_HashTupleSlot,
2222 : false); /* do not pfree */
2223 357316 : econtext->ecxt_innertuple = inntuple;
2224 :
2225 : /*
2226 : * Reset temp memory each time; although this function doesn't
2227 : * do any qual eval, the caller will, so let's keep it
2228 : * parallel to ExecScanHashBucket.
2229 : */
2230 357316 : ResetExprContext(econtext);
2231 :
2232 357316 : hjstate->hj_CurTuple = hashTuple;
2233 357316 : return true;
2234 : }
2235 :
2236 401688 : hashTuple = hashTuple->next.unshared;
2237 : }
2238 :
2239 : /* allow this loop to be cancellable */
2240 4868580 : CHECK_FOR_INTERRUPTS();
2241 : }
2242 :
2243 : /*
2244 : * no more unmatched tuples
2245 : */
2246 3674 : return false;
2247 : }
2248 :
2249 : /*
2250 : * ExecParallelScanHashTableForUnmatched
2251 : * scan the hash table for unmatched inner tuples, in parallel join
2252 : *
2253 : * On success, the inner tuple is stored into hjstate->hj_CurTuple and
2254 : * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
2255 : * for the latter.
2256 : */
2257 : bool
2258 120072 : ExecParallelScanHashTableForUnmatched(HashJoinState *hjstate,
2259 : ExprContext *econtext)
2260 : {
2261 120072 : HashJoinTable hashtable = hjstate->hj_HashTable;
2262 120072 : HashJoinTuple hashTuple = hjstate->hj_CurTuple;
2263 :
2264 : for (;;)
2265 : {
2266 : /*
2267 : * hj_CurTuple is the address of the tuple last returned from the
2268 : * current bucket, or NULL if it's time to start scanning a new
2269 : * bucket.
2270 : */
2271 734472 : if (hashTuple != NULL)
2272 120006 : hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
2273 614466 : else if (hjstate->hj_CurBucketNo < hashtable->nbuckets)
2274 614400 : hashTuple = ExecParallelHashFirstTuple(hashtable,
2275 614400 : hjstate->hj_CurBucketNo++);
2276 : else
2277 66 : break; /* finished all buckets */
2278 :
2279 974406 : while (hashTuple != NULL)
2280 : {
2281 360006 : if (!HeapTupleHeaderHasMatch(HJTUPLE_MINTUPLE(hashTuple)))
2282 : {
2283 : TupleTableSlot *inntuple;
2284 :
2285 : /* insert hashtable's tuple into exec slot */
2286 120006 : inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
2287 : hjstate->hj_HashTupleSlot,
2288 : false); /* do not pfree */
2289 120006 : econtext->ecxt_innertuple = inntuple;
2290 :
2291 : /*
2292 : * Reset temp memory each time; although this function doesn't
2293 : * do any qual eval, the caller will, so let's keep it
2294 : * parallel to ExecScanHashBucket.
2295 : */
2296 120006 : ResetExprContext(econtext);
2297 :
2298 120006 : hjstate->hj_CurTuple = hashTuple;
2299 120006 : return true;
2300 : }
2301 :
2302 240000 : hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
2303 : }
2304 :
2305 : /* allow this loop to be cancellable */
2306 614400 : CHECK_FOR_INTERRUPTS();
2307 : }
2308 :
2309 : /*
2310 : * no more unmatched tuples
2311 : */
2312 66 : return false;
2313 : }
2314 :
2315 : /*
2316 : * ExecHashTableReset
2317 : *
2318 : * reset hash table header for new batch
2319 : */
2320 : void
2321 670 : ExecHashTableReset(HashJoinTable hashtable)
2322 : {
2323 : MemoryContext oldcxt;
2324 670 : int nbuckets = hashtable->nbuckets;
2325 :
2326 : /*
2327 : * Release all the hash buckets and tuples acquired in the prior pass, and
2328 : * reinitialize the context for a new pass.
2329 : */
2330 670 : MemoryContextReset(hashtable->batchCxt);
2331 670 : oldcxt = MemoryContextSwitchTo(hashtable->batchCxt);
2332 :
2333 : /* Reallocate and reinitialize the hash bucket headers. */
2334 670 : hashtable->buckets.unshared = palloc0_array(HashJoinTuple, nbuckets);
2335 :
2336 670 : hashtable->spaceUsed = 0;
2337 :
2338 670 : MemoryContextSwitchTo(oldcxt);
2339 :
2340 : /* Forget the chunks (the memory was freed by the context reset above). */
2341 670 : hashtable->chunks = NULL;
2342 670 : }
2343 :
2344 : /*
2345 : * ExecHashTableResetMatchFlags
2346 : * Clear all the HeapTupleHeaderHasMatch flags in the table
2347 : */
2348 : void
2349 70 : ExecHashTableResetMatchFlags(HashJoinTable hashtable)
2350 : {
2351 : HashJoinTuple tuple;
2352 : int i;
2353 :
2354 : /* Reset all flags in the main table ... */
2355 71750 : for (i = 0; i < hashtable->nbuckets; i++)
2356 : {
2357 72014 : for (tuple = hashtable->buckets.unshared[i]; tuple != NULL;
2358 334 : tuple = tuple->next.unshared)
2359 334 : HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(tuple));
2360 : }
2361 :
2362 : /* ... and the same for the skew buckets, if any */
2363 70 : for (i = 0; i < hashtable->nSkewBuckets; i++)
2364 : {
2365 0 : int j = hashtable->skewBucketNums[i];
2366 0 : HashSkewBucket *skewBucket = hashtable->skewBucket[j];
2367 :
2368 0 : for (tuple = skewBucket->tuples; tuple != NULL; tuple = tuple->next.unshared)
2369 0 : HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(tuple));
2370 : }
2371 70 : }
2372 :
2373 :
2374 : void
2375 1392 : ExecReScanHash(HashState *node)
2376 : {
2377 1392 : PlanState *outerPlan = outerPlanState(node);
2378 :
2379 : /*
2380 : * if chgParam of subnode is not null then plan will be re-scanned by
2381 : * first ExecProcNode.
2382 : */
2383 1392 : if (outerPlan->chgParam == NULL)
2384 30 : ExecReScan(outerPlan);
2385 1392 : }
2386 :
2387 :
2388 : /*
2389 : * ExecHashBuildSkewHash
2390 : *
2391 : * Set up for skew optimization if we can identify the most common values
2392 : * (MCVs) of the outer relation's join key. We make a skew hash bucket
2393 : * for the hash value of each MCV, up to the number of slots allowed
2394 : * based on available memory.
2395 : */
2396 : static void
2397 106 : ExecHashBuildSkewHash(HashState *hashstate, HashJoinTable hashtable,
2398 : Hash *node, int mcvsToUse)
2399 : {
2400 : HeapTupleData *statsTuple;
2401 : AttStatsSlot sslot;
2402 :
2403 : /* Do nothing if planner didn't identify the outer relation's join key */
2404 106 : if (!OidIsValid(node->skewTable))
2405 72 : return;
2406 : /* Also, do nothing if we don't have room for at least one skew bucket */
2407 106 : if (mcvsToUse <= 0)
2408 0 : return;
2409 :
2410 : /*
2411 : * Try to find the MCV statistics for the outer relation's join key.
2412 : */
2413 106 : statsTuple = SearchSysCache3(STATRELATTINH,
2414 : ObjectIdGetDatum(node->skewTable),
2415 106 : Int16GetDatum(node->skewColumn),
2416 106 : BoolGetDatum(node->skewInherit));
2417 106 : if (!HeapTupleIsValid(statsTuple))
2418 72 : return;
2419 :
2420 34 : if (get_attstatsslot(&sslot, statsTuple,
2421 : STATISTIC_KIND_MCV, InvalidOid,
2422 : ATTSTATSSLOT_VALUES | ATTSTATSSLOT_NUMBERS))
2423 : {
2424 : double frac;
2425 : int nbuckets;
2426 : int i;
2427 :
2428 6 : if (mcvsToUse > sslot.nvalues)
2429 0 : mcvsToUse = sslot.nvalues;
2430 :
2431 : /*
2432 : * Calculate the expected fraction of outer relation that will
2433 : * participate in the skew optimization. If this isn't at least
2434 : * SKEW_MIN_OUTER_FRACTION, don't use skew optimization.
2435 : */
2436 6 : frac = 0;
2437 132 : for (i = 0; i < mcvsToUse; i++)
2438 126 : frac += sslot.numbers[i];
2439 6 : if (frac < SKEW_MIN_OUTER_FRACTION)
2440 : {
2441 0 : free_attstatsslot(&sslot);
2442 0 : ReleaseSysCache(statsTuple);
2443 0 : return;
2444 : }
2445 :
2446 : /*
2447 : * Okay, set up the skew hashtable.
2448 : *
2449 : * skewBucket[] is an open addressing hashtable with a power of 2 size
2450 : * that is greater than the number of MCV values. (This ensures there
2451 : * will be at least one null entry, so searches will always
2452 : * terminate.)
2453 : *
2454 : * Note: this code could fail if mcvsToUse exceeds INT_MAX/8 or
2455 : * MaxAllocSize/sizeof(void *)/8, but that is not currently possible
2456 : * since we limit pg_statistic entries to much less than that.
2457 : */
2458 6 : nbuckets = pg_nextpower2_32(mcvsToUse + 1);
2459 : /* use two more bits just to help avoid collisions */
2460 6 : nbuckets <<= 2;
2461 :
2462 6 : hashtable->skewEnabled = true;
2463 6 : hashtable->skewBucketLen = nbuckets;
2464 :
2465 : /*
2466 : * We allocate the bucket memory in the hashtable's batch context. It
2467 : * is only needed during the first batch, and this ensures it will be
2468 : * automatically removed once the first batch is done.
2469 : */
2470 6 : hashtable->skewBucket = (HashSkewBucket **)
2471 6 : MemoryContextAllocZero(hashtable->batchCxt,
2472 : nbuckets * sizeof(HashSkewBucket *));
2473 6 : hashtable->skewBucketNums = (int *)
2474 6 : MemoryContextAllocZero(hashtable->batchCxt,
2475 : mcvsToUse * sizeof(int));
2476 :
2477 6 : hashtable->spaceUsed += nbuckets * sizeof(HashSkewBucket *)
2478 6 : + mcvsToUse * sizeof(int);
2479 6 : hashtable->spaceUsedSkew += nbuckets * sizeof(HashSkewBucket *)
2480 6 : + mcvsToUse * sizeof(int);
2481 6 : if (hashtable->spaceUsed > hashtable->spacePeak)
2482 6 : hashtable->spacePeak = hashtable->spaceUsed;
2483 :
2484 : /*
2485 : * Create a skew bucket for each MCV hash value.
2486 : *
2487 : * Note: it is very important that we create the buckets in order of
2488 : * decreasing MCV frequency. If we have to remove some buckets, they
2489 : * must be removed in reverse order of creation (see notes in
2490 : * ExecHashRemoveNextSkewBucket) and we want the least common MCVs to
2491 : * be removed first.
2492 : */
2493 :
2494 132 : for (i = 0; i < mcvsToUse; i++)
2495 : {
2496 : uint32 hashvalue;
2497 : int bucket;
2498 :
2499 126 : hashvalue = DatumGetUInt32(FunctionCall1Coll(hashstate->skew_hashfunction,
2500 : hashstate->skew_collation,
2501 126 : sslot.values[i]));
2502 :
2503 : /*
2504 : * While we have not hit a hole in the hashtable and have not hit
2505 : * the desired bucket, we have collided with some previous hash
2506 : * value, so try the next bucket location. NB: this code must
2507 : * match ExecHashGetSkewBucket.
2508 : */
2509 126 : bucket = hashvalue & (nbuckets - 1);
2510 126 : while (hashtable->skewBucket[bucket] != NULL &&
2511 0 : hashtable->skewBucket[bucket]->hashvalue != hashvalue)
2512 0 : bucket = (bucket + 1) & (nbuckets - 1);
2513 :
2514 : /*
2515 : * If we found an existing bucket with the same hashvalue, leave
2516 : * it alone. It's okay for two MCVs to share a hashvalue.
2517 : */
2518 126 : if (hashtable->skewBucket[bucket] != NULL)
2519 0 : continue;
2520 :
2521 : /* Okay, create a new skew bucket for this hashvalue. */
2522 252 : hashtable->skewBucket[bucket] = (HashSkewBucket *)
2523 126 : MemoryContextAlloc(hashtable->batchCxt,
2524 : sizeof(HashSkewBucket));
2525 126 : hashtable->skewBucket[bucket]->hashvalue = hashvalue;
2526 126 : hashtable->skewBucket[bucket]->tuples = NULL;
2527 126 : hashtable->skewBucketNums[hashtable->nSkewBuckets] = bucket;
2528 126 : hashtable->nSkewBuckets++;
2529 126 : hashtable->spaceUsed += SKEW_BUCKET_OVERHEAD;
2530 126 : hashtable->spaceUsedSkew += SKEW_BUCKET_OVERHEAD;
2531 126 : if (hashtable->spaceUsed > hashtable->spacePeak)
2532 126 : hashtable->spacePeak = hashtable->spaceUsed;
2533 : }
2534 :
2535 6 : free_attstatsslot(&sslot);
2536 : }
2537 :
2538 34 : ReleaseSysCache(statsTuple);
2539 : }
2540 :
2541 : /*
2542 : * ExecHashGetSkewBucket
2543 : *
2544 : * Returns the index of the skew bucket for this hashvalue,
2545 : * or INVALID_SKEW_BUCKET_NO if the hashvalue is not
2546 : * associated with any active skew bucket.
2547 : */
2548 : int
2549 24986306 : ExecHashGetSkewBucket(HashJoinTable hashtable, uint32 hashvalue)
2550 : {
2551 : int bucket;
2552 :
2553 : /*
2554 : * Always return INVALID_SKEW_BUCKET_NO if not doing skew optimization (in
2555 : * particular, this happens after the initial batch is done).
2556 : */
2557 24986306 : if (!hashtable->skewEnabled)
2558 24866306 : return INVALID_SKEW_BUCKET_NO;
2559 :
2560 : /*
2561 : * Since skewBucketLen is a power of 2, we can do a modulo by ANDing.
2562 : */
2563 120000 : bucket = hashvalue & (hashtable->skewBucketLen - 1);
2564 :
2565 : /*
2566 : * While we have not hit a hole in the hashtable and have not hit the
2567 : * desired bucket, we have collided with some other hash value, so try the
2568 : * next bucket location.
2569 : */
2570 127830 : while (hashtable->skewBucket[bucket] != NULL &&
2571 10818 : hashtable->skewBucket[bucket]->hashvalue != hashvalue)
2572 7830 : bucket = (bucket + 1) & (hashtable->skewBucketLen - 1);
2573 :
2574 : /*
2575 : * Found the desired bucket?
2576 : */
2577 120000 : if (hashtable->skewBucket[bucket] != NULL)
2578 2988 : return bucket;
2579 :
2580 : /*
2581 : * There must not be any hashtable entry for this hash value.
2582 : */
2583 117012 : return INVALID_SKEW_BUCKET_NO;
2584 : }
2585 :
2586 : /*
2587 : * ExecHashSkewTableInsert
2588 : *
2589 : * Insert a tuple into the skew hashtable.
2590 : *
2591 : * This should generally match up with the current-batch case in
2592 : * ExecHashTableInsert.
2593 : */
2594 : static void
2595 588 : ExecHashSkewTableInsert(HashJoinTable hashtable,
2596 : TupleTableSlot *slot,
2597 : uint32 hashvalue,
2598 : int bucketNumber)
2599 : {
2600 : bool shouldFree;
2601 588 : MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
2602 : HashJoinTuple hashTuple;
2603 : int hashTupleSize;
2604 :
2605 : /* Create the HashJoinTuple */
2606 588 : hashTupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
2607 588 : hashTuple = (HashJoinTuple) MemoryContextAlloc(hashtable->batchCxt,
2608 : hashTupleSize);
2609 588 : hashTuple->hashvalue = hashvalue;
2610 588 : memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
2611 588 : HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
2612 :
2613 : /* Push it onto the front of the skew bucket's list */
2614 588 : hashTuple->next.unshared = hashtable->skewBucket[bucketNumber]->tuples;
2615 588 : hashtable->skewBucket[bucketNumber]->tuples = hashTuple;
2616 : Assert(hashTuple != hashTuple->next.unshared);
2617 :
2618 : /* Account for space used, and back off if we've used too much */
2619 588 : hashtable->spaceUsed += hashTupleSize;
2620 588 : hashtable->spaceUsedSkew += hashTupleSize;
2621 588 : if (hashtable->spaceUsed > hashtable->spacePeak)
2622 432 : hashtable->spacePeak = hashtable->spaceUsed;
2623 690 : while (hashtable->spaceUsedSkew > hashtable->spaceAllowedSkew)
2624 102 : ExecHashRemoveNextSkewBucket(hashtable);
2625 :
2626 : /* Check we are not over the total spaceAllowed, either */
2627 588 : if (hashtable->spaceUsed > hashtable->spaceAllowed)
2628 0 : ExecHashIncreaseNumBatches(hashtable);
2629 :
2630 588 : if (shouldFree)
2631 588 : heap_free_minimal_tuple(tuple);
2632 588 : }
2633 :
2634 : /*
2635 : * ExecHashRemoveNextSkewBucket
2636 : *
2637 : * Remove the least valuable skew bucket by pushing its tuples into
2638 : * the main hash table.
2639 : */
2640 : static void
2641 102 : ExecHashRemoveNextSkewBucket(HashJoinTable hashtable)
2642 : {
2643 : int bucketToRemove;
2644 : HashSkewBucket *bucket;
2645 : uint32 hashvalue;
2646 : int bucketno;
2647 : int batchno;
2648 : HashJoinTuple hashTuple;
2649 :
2650 : /* Locate the bucket to remove */
2651 102 : bucketToRemove = hashtable->skewBucketNums[hashtable->nSkewBuckets - 1];
2652 102 : bucket = hashtable->skewBucket[bucketToRemove];
2653 :
2654 : /*
2655 : * Calculate which bucket and batch the tuples belong to in the main
2656 : * hashtable. They all have the same hash value, so it's the same for all
2657 : * of them. Also note that it's not possible for nbatch to increase while
2658 : * we are processing the tuples.
2659 : */
2660 102 : hashvalue = bucket->hashvalue;
2661 102 : ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
2662 :
2663 : /* Process all tuples in the bucket */
2664 102 : hashTuple = bucket->tuples;
2665 450 : while (hashTuple != NULL)
2666 : {
2667 348 : HashJoinTuple nextHashTuple = hashTuple->next.unshared;
2668 : MinimalTuple tuple;
2669 : Size tupleSize;
2670 :
2671 : /*
2672 : * This code must agree with ExecHashTableInsert. We do not use
2673 : * ExecHashTableInsert directly as ExecHashTableInsert expects a
2674 : * TupleTableSlot while we already have HashJoinTuples.
2675 : */
2676 348 : tuple = HJTUPLE_MINTUPLE(hashTuple);
2677 348 : tupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
2678 :
2679 : /* Decide whether to put the tuple in the hash table or a temp file */
2680 348 : if (batchno == hashtable->curbatch)
2681 : {
2682 : /* Move the tuple to the main hash table */
2683 : HashJoinTuple copyTuple;
2684 :
2685 : /*
2686 : * We must copy the tuple into the dense storage, else it will not
2687 : * be found by, eg, ExecHashIncreaseNumBatches.
2688 : */
2689 138 : copyTuple = (HashJoinTuple) dense_alloc(hashtable, tupleSize);
2690 138 : memcpy(copyTuple, hashTuple, tupleSize);
2691 138 : pfree(hashTuple);
2692 :
2693 138 : copyTuple->next.unshared = hashtable->buckets.unshared[bucketno];
2694 138 : hashtable->buckets.unshared[bucketno] = copyTuple;
2695 :
2696 : /* We have reduced skew space, but overall space doesn't change */
2697 138 : hashtable->spaceUsedSkew -= tupleSize;
2698 : }
2699 : else
2700 : {
2701 : /* Put the tuple into a temp file for later batches */
2702 : Assert(batchno > hashtable->curbatch);
2703 210 : ExecHashJoinSaveTuple(tuple, hashvalue,
2704 210 : &hashtable->innerBatchFile[batchno],
2705 : hashtable);
2706 210 : pfree(hashTuple);
2707 210 : hashtable->spaceUsed -= tupleSize;
2708 210 : hashtable->spaceUsedSkew -= tupleSize;
2709 : }
2710 :
2711 348 : hashTuple = nextHashTuple;
2712 :
2713 : /* allow this loop to be cancellable */
2714 348 : CHECK_FOR_INTERRUPTS();
2715 : }
2716 :
2717 : /*
2718 : * Free the bucket struct itself and reset the hashtable entry to NULL.
2719 : *
2720 : * NOTE: this is not nearly as simple as it looks on the surface, because
2721 : * of the possibility of collisions in the hashtable. Suppose that hash
2722 : * values A and B collide at a particular hashtable entry, and that A was
2723 : * entered first so B gets shifted to a different table entry. If we were
2724 : * to remove A first then ExecHashGetSkewBucket would mistakenly start
2725 : * reporting that B is not in the hashtable, because it would hit the NULL
2726 : * before finding B. However, we always remove entries in the reverse
2727 : * order of creation, so this failure cannot happen.
2728 : */
2729 102 : hashtable->skewBucket[bucketToRemove] = NULL;
2730 102 : hashtable->nSkewBuckets--;
2731 102 : pfree(bucket);
2732 102 : hashtable->spaceUsed -= SKEW_BUCKET_OVERHEAD;
2733 102 : hashtable->spaceUsedSkew -= SKEW_BUCKET_OVERHEAD;
2734 :
2735 : /*
2736 : * If we have removed all skew buckets then give up on skew optimization.
2737 : * Release the arrays since they aren't useful any more.
2738 : */
2739 102 : if (hashtable->nSkewBuckets == 0)
2740 : {
2741 0 : hashtable->skewEnabled = false;
2742 0 : pfree(hashtable->skewBucket);
2743 0 : pfree(hashtable->skewBucketNums);
2744 0 : hashtable->skewBucket = NULL;
2745 0 : hashtable->skewBucketNums = NULL;
2746 0 : hashtable->spaceUsed -= hashtable->spaceUsedSkew;
2747 0 : hashtable->spaceUsedSkew = 0;
2748 : }
2749 102 : }
2750 :
2751 : /*
2752 : * Reserve space in the DSM segment for instrumentation data.
2753 : */
2754 : void
2755 192 : ExecHashEstimate(HashState *node, ParallelContext *pcxt)
2756 : {
2757 : size_t size;
2758 :
2759 : /* don't need this if not instrumenting or no workers */
2760 192 : if (!node->ps.instrument || pcxt->nworkers == 0)
2761 108 : return;
2762 :
2763 84 : size = mul_size(pcxt->nworkers, sizeof(HashInstrumentation));
2764 84 : size = add_size(size, offsetof(SharedHashInfo, hinstrument));
2765 84 : shm_toc_estimate_chunk(&pcxt->estimator, size);
2766 84 : shm_toc_estimate_keys(&pcxt->estimator, 1);
2767 : }
2768 :
2769 : /*
2770 : * Set up a space in the DSM for all workers to record instrumentation data
2771 : * about their hash table.
2772 : */
2773 : void
2774 192 : ExecHashInitializeDSM(HashState *node, ParallelContext *pcxt)
2775 : {
2776 : size_t size;
2777 :
2778 : /* don't need this if not instrumenting or no workers */
2779 192 : if (!node->ps.instrument || pcxt->nworkers == 0)
2780 108 : return;
2781 :
2782 84 : size = offsetof(SharedHashInfo, hinstrument) +
2783 84 : pcxt->nworkers * sizeof(HashInstrumentation);
2784 84 : node->shared_info = (SharedHashInfo *) shm_toc_allocate(pcxt->toc, size);
2785 :
2786 : /* Each per-worker area must start out as zeroes. */
2787 84 : memset(node->shared_info, 0, size);
2788 :
2789 84 : node->shared_info->num_workers = pcxt->nworkers;
2790 84 : shm_toc_insert(pcxt->toc, node->ps.plan->plan_node_id,
2791 84 : node->shared_info);
2792 : }
2793 :
2794 : /*
2795 : * Locate the DSM space for hash table instrumentation data that we'll write
2796 : * to at shutdown time.
2797 : */
2798 : void
2799 548 : ExecHashInitializeWorker(HashState *node, ParallelWorkerContext *pwcxt)
2800 : {
2801 : SharedHashInfo *shared_info;
2802 :
2803 : /* don't need this if not instrumenting */
2804 548 : if (!node->ps.instrument)
2805 296 : return;
2806 :
2807 : /*
2808 : * Find our entry in the shared area, and set up a pointer to it so that
2809 : * we'll accumulate stats there when shutting down or rebuilding the hash
2810 : * table.
2811 : */
2812 : shared_info = (SharedHashInfo *)
2813 252 : shm_toc_lookup(pwcxt->toc, node->ps.plan->plan_node_id, false);
2814 252 : node->hinstrument = &shared_info->hinstrument[ParallelWorkerNumber];
2815 : }
2816 :
2817 : /*
2818 : * Collect EXPLAIN stats if needed, saving them into DSM memory if
2819 : * ExecHashInitializeWorker was called, or local storage if not. In the
2820 : * parallel case, this must be done in ExecShutdownHash() rather than
2821 : * ExecEndHash() because the latter runs after we've detached from the DSM
2822 : * segment.
2823 : */
2824 : void
2825 26472 : ExecShutdownHash(HashState *node)
2826 : {
2827 : /* Allocate save space if EXPLAIN'ing and we didn't do so already */
2828 26472 : if (node->ps.instrument && !node->hinstrument)
2829 114 : node->hinstrument = palloc0_object(HashInstrumentation);
2830 : /* Now accumulate data for the current (final) hash table */
2831 26472 : if (node->hinstrument && node->hashtable)
2832 306 : ExecHashAccumInstrumentation(node->hinstrument, node->hashtable);
2833 26472 : }
2834 :
2835 : /*
2836 : * Retrieve instrumentation data from workers before the DSM segment is
2837 : * detached, so that EXPLAIN can access it.
2838 : */
2839 : void
2840 84 : ExecHashRetrieveInstrumentation(HashState *node)
2841 : {
2842 84 : SharedHashInfo *shared_info = node->shared_info;
2843 : size_t size;
2844 :
2845 84 : if (shared_info == NULL)
2846 0 : return;
2847 :
2848 : /* Replace node->shared_info with a copy in backend-local memory. */
2849 84 : size = offsetof(SharedHashInfo, hinstrument) +
2850 84 : shared_info->num_workers * sizeof(HashInstrumentation);
2851 84 : node->shared_info = palloc(size);
2852 84 : memcpy(node->shared_info, shared_info, size);
2853 : }
2854 :
2855 : /*
2856 : * Accumulate instrumentation data from 'hashtable' into an
2857 : * initially-zeroed HashInstrumentation struct.
2858 : *
2859 : * This is used to merge information across successive hash table instances
2860 : * within a single plan node. We take the maximum values of each interesting
2861 : * number. The largest nbuckets and largest nbatch values might have occurred
2862 : * in different instances, so there's some risk of confusion from reporting
2863 : * unrelated numbers; but there's a bigger risk of misdiagnosing a performance
2864 : * issue if we don't report the largest values. Similarly, we want to report
2865 : * the largest spacePeak regardless of whether it happened in the same
2866 : * instance as the largest nbuckets or nbatch. All the instances should have
2867 : * the same nbuckets_original and nbatch_original; but there's little value
2868 : * in depending on that here, so handle them the same way.
2869 : */
2870 : void
2871 306 : ExecHashAccumInstrumentation(HashInstrumentation *instrument,
2872 : HashJoinTable hashtable)
2873 : {
2874 306 : instrument->nbuckets = Max(instrument->nbuckets,
2875 : hashtable->nbuckets);
2876 306 : instrument->nbuckets_original = Max(instrument->nbuckets_original,
2877 : hashtable->nbuckets_original);
2878 306 : instrument->nbatch = Max(instrument->nbatch,
2879 : hashtable->nbatch);
2880 306 : instrument->nbatch_original = Max(instrument->nbatch_original,
2881 : hashtable->nbatch_original);
2882 306 : instrument->space_peak = Max(instrument->space_peak,
2883 : hashtable->spacePeak);
2884 306 : }
2885 :
2886 : /*
2887 : * Allocate 'size' bytes from the currently active HashMemoryChunk
2888 : */
2889 : static void *
2890 7928644 : dense_alloc(HashJoinTable hashtable, Size size)
2891 : {
2892 : HashMemoryChunk newChunk;
2893 : char *ptr;
2894 :
2895 : /* just in case the size is not already aligned properly */
2896 7928644 : size = MAXALIGN(size);
2897 :
2898 : /*
2899 : * If tuple size is larger than threshold, allocate a separate chunk.
2900 : */
2901 7928644 : if (size > HASH_CHUNK_THRESHOLD)
2902 : {
2903 : /* allocate new chunk and put it at the beginning of the list */
2904 0 : newChunk = (HashMemoryChunk) MemoryContextAlloc(hashtable->batchCxt,
2905 : HASH_CHUNK_HEADER_SIZE + size);
2906 0 : newChunk->maxlen = size;
2907 0 : newChunk->used = size;
2908 0 : newChunk->ntuples = 1;
2909 :
2910 : /*
2911 : * Add this chunk to the list after the first existing chunk, so that
2912 : * we don't lose the remaining space in the "current" chunk.
2913 : */
2914 0 : if (hashtable->chunks != NULL)
2915 : {
2916 0 : newChunk->next = hashtable->chunks->next;
2917 0 : hashtable->chunks->next.unshared = newChunk;
2918 : }
2919 : else
2920 : {
2921 0 : newChunk->next.unshared = hashtable->chunks;
2922 0 : hashtable->chunks = newChunk;
2923 : }
2924 :
2925 0 : return HASH_CHUNK_DATA(newChunk);
2926 : }
2927 :
2928 : /*
2929 : * See if we have enough space for it in the current chunk (if any). If
2930 : * not, allocate a fresh chunk.
2931 : */
2932 7928644 : if ((hashtable->chunks == NULL) ||
2933 7910380 : (hashtable->chunks->maxlen - hashtable->chunks->used) < size)
2934 : {
2935 : /* allocate new chunk and put it at the beginning of the list */
2936 28720 : newChunk = (HashMemoryChunk) MemoryContextAlloc(hashtable->batchCxt,
2937 : HASH_CHUNK_HEADER_SIZE + HASH_CHUNK_SIZE);
2938 :
2939 28720 : newChunk->maxlen = HASH_CHUNK_SIZE;
2940 28720 : newChunk->used = size;
2941 28720 : newChunk->ntuples = 1;
2942 :
2943 28720 : newChunk->next.unshared = hashtable->chunks;
2944 28720 : hashtable->chunks = newChunk;
2945 :
2946 28720 : return HASH_CHUNK_DATA(newChunk);
2947 : }
2948 :
2949 : /* There is enough space in the current chunk, let's add the tuple */
2950 7899924 : ptr = HASH_CHUNK_DATA(hashtable->chunks) + hashtable->chunks->used;
2951 7899924 : hashtable->chunks->used += size;
2952 7899924 : hashtable->chunks->ntuples += 1;
2953 :
2954 : /* return pointer to the start of the tuple memory */
2955 7899924 : return ptr;
2956 : }
2957 :
2958 : /*
2959 : * Allocate space for a tuple in shared dense storage. This is equivalent to
2960 : * dense_alloc but for Parallel Hash using shared memory.
2961 : *
2962 : * While loading a tuple into shared memory, we might run out of memory and
2963 : * decide to repartition, or determine that the load factor is too high and
2964 : * decide to expand the bucket array, or discover that another participant has
2965 : * commanded us to help do that. Return NULL if number of buckets or batches
2966 : * has changed, indicating that the caller must retry (considering the
2967 : * possibility that the tuple no longer belongs in the same batch).
2968 : */
2969 : static HashJoinTuple
2970 2380992 : ExecParallelHashTupleAlloc(HashJoinTable hashtable, size_t size,
2971 : dsa_pointer *shared)
2972 : {
2973 2380992 : ParallelHashJoinState *pstate = hashtable->parallel_state;
2974 : dsa_pointer chunk_shared;
2975 : HashMemoryChunk chunk;
2976 : Size chunk_size;
2977 : HashJoinTuple result;
2978 2380992 : int curbatch = hashtable->curbatch;
2979 :
2980 2380992 : size = MAXALIGN(size);
2981 :
2982 : /*
2983 : * Fast path: if there is enough space in this backend's current chunk,
2984 : * then we can allocate without any locking.
2985 : */
2986 2380992 : chunk = hashtable->current_chunk;
2987 2380992 : if (chunk != NULL &&
2988 2380170 : size <= HASH_CHUNK_THRESHOLD &&
2989 2380170 : chunk->maxlen - chunk->used >= size)
2990 : {
2991 :
2992 2377282 : chunk_shared = hashtable->current_chunk_shared;
2993 : Assert(chunk == dsa_get_address(hashtable->area, chunk_shared));
2994 2377282 : *shared = chunk_shared + HASH_CHUNK_HEADER_SIZE + chunk->used;
2995 2377282 : result = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + chunk->used);
2996 2377282 : chunk->used += size;
2997 :
2998 : Assert(chunk->used <= chunk->maxlen);
2999 : Assert(result == dsa_get_address(hashtable->area, *shared));
3000 :
3001 2377282 : return result;
3002 : }
3003 :
3004 : /* Slow path: try to allocate a new chunk. */
3005 3710 : LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
3006 :
3007 : /*
3008 : * Check if we need to help increase the number of buckets or batches.
3009 : */
3010 3710 : if (pstate->growth == PHJ_GROWTH_NEED_MORE_BATCHES ||
3011 3674 : pstate->growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
3012 : {
3013 178 : ParallelHashGrowth growth = pstate->growth;
3014 :
3015 178 : hashtable->current_chunk = NULL;
3016 178 : LWLockRelease(&pstate->lock);
3017 :
3018 : /* Another participant has commanded us to help grow. */
3019 178 : if (growth == PHJ_GROWTH_NEED_MORE_BATCHES)
3020 36 : ExecParallelHashIncreaseNumBatches(hashtable);
3021 142 : else if (growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
3022 142 : ExecParallelHashIncreaseNumBuckets(hashtable);
3023 :
3024 : /* The caller must retry. */
3025 178 : return NULL;
3026 : }
3027 :
3028 : /* Oversized tuples get their own chunk. */
3029 3532 : if (size > HASH_CHUNK_THRESHOLD)
3030 48 : chunk_size = size + HASH_CHUNK_HEADER_SIZE;
3031 : else
3032 3484 : chunk_size = HASH_CHUNK_SIZE;
3033 :
3034 : /* Check if it's time to grow batches or buckets. */
3035 3532 : if (pstate->growth != PHJ_GROWTH_DISABLED)
3036 : {
3037 : Assert(curbatch == 0);
3038 : Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASH_INNER);
3039 :
3040 : /*
3041 : * Check if our space limit would be exceeded. To avoid choking on
3042 : * very large tuples or very low hash_mem setting, we'll always allow
3043 : * each backend to allocate at least one chunk.
3044 : */
3045 1856 : if (hashtable->batches[0].at_least_one_chunk &&
3046 1482 : hashtable->batches[0].shared->size +
3047 1482 : chunk_size > pstate->space_allowed)
3048 : {
3049 36 : pstate->growth = PHJ_GROWTH_NEED_MORE_BATCHES;
3050 36 : hashtable->batches[0].shared->space_exhausted = true;
3051 36 : LWLockRelease(&pstate->lock);
3052 :
3053 36 : return NULL;
3054 : }
3055 :
3056 : /* Check if our load factor limit would be exceeded. */
3057 1820 : if (hashtable->nbatch == 1)
3058 : {
3059 1556 : hashtable->batches[0].shared->ntuples += hashtable->batches[0].ntuples;
3060 1556 : hashtable->batches[0].ntuples = 0;
3061 : /* Guard against integer overflow and alloc size overflow */
3062 1556 : if (hashtable->batches[0].shared->ntuples + 1 >
3063 1556 : hashtable->nbuckets * NTUP_PER_BUCKET &&
3064 108 : hashtable->nbuckets < (INT_MAX / 2) &&
3065 108 : hashtable->nbuckets * 2 <=
3066 : MaxAllocSize / sizeof(dsa_pointer_atomic))
3067 : {
3068 108 : pstate->growth = PHJ_GROWTH_NEED_MORE_BUCKETS;
3069 108 : LWLockRelease(&pstate->lock);
3070 :
3071 108 : return NULL;
3072 : }
3073 : }
3074 : }
3075 :
3076 : /* We are cleared to allocate a new chunk. */
3077 3388 : chunk_shared = dsa_allocate(hashtable->area, chunk_size);
3078 3388 : hashtable->batches[curbatch].shared->size += chunk_size;
3079 3388 : hashtable->batches[curbatch].at_least_one_chunk = true;
3080 :
3081 : /* Set up the chunk. */
3082 3388 : chunk = (HashMemoryChunk) dsa_get_address(hashtable->area, chunk_shared);
3083 3388 : *shared = chunk_shared + HASH_CHUNK_HEADER_SIZE;
3084 3388 : chunk->maxlen = chunk_size - HASH_CHUNK_HEADER_SIZE;
3085 3388 : chunk->used = size;
3086 :
3087 : /*
3088 : * Push it onto the list of chunks, so that it can be found if we need to
3089 : * increase the number of buckets or batches (batch 0 only) and later for
3090 : * freeing the memory (all batches).
3091 : */
3092 3388 : chunk->next.shared = hashtable->batches[curbatch].shared->chunks;
3093 3388 : hashtable->batches[curbatch].shared->chunks = chunk_shared;
3094 :
3095 3388 : if (size <= HASH_CHUNK_THRESHOLD)
3096 : {
3097 : /*
3098 : * Make this the current chunk so that we can use the fast path to
3099 : * fill the rest of it up in future calls.
3100 : */
3101 3352 : hashtable->current_chunk = chunk;
3102 3352 : hashtable->current_chunk_shared = chunk_shared;
3103 : }
3104 3388 : LWLockRelease(&pstate->lock);
3105 :
3106 : Assert(HASH_CHUNK_DATA(chunk) == dsa_get_address(hashtable->area, *shared));
3107 3388 : result = (HashJoinTuple) HASH_CHUNK_DATA(chunk);
3108 :
3109 3388 : return result;
3110 : }
3111 :
3112 : /*
3113 : * One backend needs to set up the shared batch state including tuplestores.
3114 : * Other backends will ensure they have correctly configured accessors by
3115 : * called ExecParallelHashEnsureBatchAccessors().
3116 : */
3117 : static void
3118 216 : ExecParallelHashJoinSetUpBatches(HashJoinTable hashtable, int nbatch)
3119 : {
3120 216 : ParallelHashJoinState *pstate = hashtable->parallel_state;
3121 : ParallelHashJoinBatch *batches;
3122 : MemoryContext oldcxt;
3123 : int i;
3124 :
3125 : Assert(hashtable->batches == NULL);
3126 :
3127 : /* Allocate space. */
3128 216 : pstate->batches =
3129 216 : dsa_allocate0(hashtable->area,
3130 : EstimateParallelHashJoinBatch(hashtable) * nbatch);
3131 216 : pstate->nbatch = nbatch;
3132 216 : batches = dsa_get_address(hashtable->area, pstate->batches);
3133 :
3134 : /*
3135 : * Use hash join spill memory context to allocate accessors, including
3136 : * buffers for the temporary files.
3137 : */
3138 216 : oldcxt = MemoryContextSwitchTo(hashtable->spillCxt);
3139 :
3140 : /* Allocate this backend's accessor array. */
3141 216 : hashtable->nbatch = nbatch;
3142 216 : hashtable->batches =
3143 216 : palloc0_array(ParallelHashJoinBatchAccessor, hashtable->nbatch);
3144 :
3145 : /* Set up the shared state, tuplestores and backend-local accessors. */
3146 906 : for (i = 0; i < hashtable->nbatch; ++i)
3147 : {
3148 690 : ParallelHashJoinBatchAccessor *accessor = &hashtable->batches[i];
3149 690 : ParallelHashJoinBatch *shared = NthParallelHashJoinBatch(batches, i);
3150 : char name[MAXPGPATH];
3151 :
3152 : /*
3153 : * All members of shared were zero-initialized. We just need to set
3154 : * up the Barrier.
3155 : */
3156 690 : BarrierInit(&shared->batch_barrier, 0);
3157 690 : if (i == 0)
3158 : {
3159 : /* Batch 0 doesn't need to be loaded. */
3160 216 : BarrierAttach(&shared->batch_barrier);
3161 864 : while (BarrierPhase(&shared->batch_barrier) < PHJ_BATCH_PROBE)
3162 648 : BarrierArriveAndWait(&shared->batch_barrier, 0);
3163 216 : BarrierDetach(&shared->batch_barrier);
3164 : }
3165 :
3166 : /* Initialize accessor state. All members were zero-initialized. */
3167 690 : accessor->shared = shared;
3168 :
3169 : /* Initialize the shared tuplestores. */
3170 690 : snprintf(name, sizeof(name), "i%dof%d", i, hashtable->nbatch);
3171 690 : accessor->inner_tuples =
3172 690 : sts_initialize(ParallelHashJoinBatchInner(shared),
3173 : pstate->nparticipants,
3174 : ParallelWorkerNumber + 1,
3175 : sizeof(uint32),
3176 : SHARED_TUPLESTORE_SINGLE_PASS,
3177 : &pstate->fileset,
3178 : name);
3179 690 : snprintf(name, sizeof(name), "o%dof%d", i, hashtable->nbatch);
3180 690 : accessor->outer_tuples =
3181 690 : sts_initialize(ParallelHashJoinBatchOuter(shared,
3182 : pstate->nparticipants),
3183 : pstate->nparticipants,
3184 : ParallelWorkerNumber + 1,
3185 : sizeof(uint32),
3186 : SHARED_TUPLESTORE_SINGLE_PASS,
3187 : &pstate->fileset,
3188 : name);
3189 : }
3190 :
3191 216 : MemoryContextSwitchTo(oldcxt);
3192 216 : }
3193 :
3194 : /*
3195 : * Free the current set of ParallelHashJoinBatchAccessor objects.
3196 : */
3197 : static void
3198 48 : ExecParallelHashCloseBatchAccessors(HashJoinTable hashtable)
3199 : {
3200 : int i;
3201 :
3202 132 : for (i = 0; i < hashtable->nbatch; ++i)
3203 : {
3204 : /* Make sure no files are left open. */
3205 84 : sts_end_write(hashtable->batches[i].inner_tuples);
3206 84 : sts_end_write(hashtable->batches[i].outer_tuples);
3207 84 : sts_end_parallel_scan(hashtable->batches[i].inner_tuples);
3208 84 : sts_end_parallel_scan(hashtable->batches[i].outer_tuples);
3209 : }
3210 48 : pfree(hashtable->batches);
3211 48 : hashtable->batches = NULL;
3212 48 : }
3213 :
3214 : /*
3215 : * Make sure this backend has up-to-date accessors for the current set of
3216 : * batches.
3217 : */
3218 : static void
3219 878 : ExecParallelHashEnsureBatchAccessors(HashJoinTable hashtable)
3220 : {
3221 878 : ParallelHashJoinState *pstate = hashtable->parallel_state;
3222 : ParallelHashJoinBatch *batches;
3223 : MemoryContext oldcxt;
3224 : int i;
3225 :
3226 878 : if (hashtable->batches != NULL)
3227 : {
3228 650 : if (hashtable->nbatch == pstate->nbatch)
3229 650 : return;
3230 0 : ExecParallelHashCloseBatchAccessors(hashtable);
3231 : }
3232 :
3233 : /*
3234 : * We should never see a state where the batch-tracking array is freed,
3235 : * because we should have given up sooner if we join when the build
3236 : * barrier has reached the PHJ_BUILD_FREE phase.
3237 : */
3238 : Assert(DsaPointerIsValid(pstate->batches));
3239 :
3240 : /*
3241 : * Use hash join spill memory context to allocate accessors, including
3242 : * buffers for the temporary files.
3243 : */
3244 228 : oldcxt = MemoryContextSwitchTo(hashtable->spillCxt);
3245 :
3246 : /* Allocate this backend's accessor array. */
3247 228 : hashtable->nbatch = pstate->nbatch;
3248 228 : hashtable->batches =
3249 228 : palloc0_array(ParallelHashJoinBatchAccessor, hashtable->nbatch);
3250 :
3251 : /* Find the base of the pseudo-array of ParallelHashJoinBatch objects. */
3252 : batches = (ParallelHashJoinBatch *)
3253 228 : dsa_get_address(hashtable->area, pstate->batches);
3254 :
3255 : /* Set up the accessor array and attach to the tuplestores. */
3256 1104 : for (i = 0; i < hashtable->nbatch; ++i)
3257 : {
3258 876 : ParallelHashJoinBatchAccessor *accessor = &hashtable->batches[i];
3259 876 : ParallelHashJoinBatch *shared = NthParallelHashJoinBatch(batches, i);
3260 :
3261 876 : accessor->shared = shared;
3262 876 : accessor->preallocated = 0;
3263 876 : accessor->done = false;
3264 876 : accessor->outer_eof = false;
3265 876 : accessor->inner_tuples =
3266 876 : sts_attach(ParallelHashJoinBatchInner(shared),
3267 : ParallelWorkerNumber + 1,
3268 : &pstate->fileset);
3269 876 : accessor->outer_tuples =
3270 876 : sts_attach(ParallelHashJoinBatchOuter(shared,
3271 : pstate->nparticipants),
3272 : ParallelWorkerNumber + 1,
3273 : &pstate->fileset);
3274 : }
3275 :
3276 228 : MemoryContextSwitchTo(oldcxt);
3277 : }
3278 :
3279 : /*
3280 : * Allocate an empty shared memory hash table for a given batch.
3281 : */
3282 : void
3283 606 : ExecParallelHashTableAlloc(HashJoinTable hashtable, int batchno)
3284 : {
3285 606 : ParallelHashJoinBatch *batch = hashtable->batches[batchno].shared;
3286 : dsa_pointer_atomic *buckets;
3287 606 : int nbuckets = hashtable->parallel_state->nbuckets;
3288 : int i;
3289 :
3290 606 : batch->buckets =
3291 606 : dsa_allocate(hashtable->area, sizeof(dsa_pointer_atomic) * nbuckets);
3292 : buckets = (dsa_pointer_atomic *)
3293 606 : dsa_get_address(hashtable->area, batch->buckets);
3294 3115614 : for (i = 0; i < nbuckets; ++i)
3295 3115008 : dsa_pointer_atomic_init(&buckets[i], InvalidDsaPointer);
3296 606 : }
3297 :
3298 : /*
3299 : * If we are currently attached to a shared hash join batch, detach. If we
3300 : * are last to detach, clean up.
3301 : */
3302 : void
3303 18704 : ExecHashTableDetachBatch(HashJoinTable hashtable)
3304 : {
3305 18704 : if (hashtable->parallel_state != NULL &&
3306 1114 : hashtable->curbatch >= 0)
3307 : {
3308 716 : int curbatch = hashtable->curbatch;
3309 716 : ParallelHashJoinBatch *batch = hashtable->batches[curbatch].shared;
3310 716 : bool attached = true;
3311 :
3312 : /* Make sure any temporary files are closed. */
3313 716 : sts_end_parallel_scan(hashtable->batches[curbatch].inner_tuples);
3314 716 : sts_end_parallel_scan(hashtable->batches[curbatch].outer_tuples);
3315 :
3316 : /* After attaching we always get at least to PHJ_BATCH_PROBE. */
3317 : Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE ||
3318 : BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_SCAN);
3319 :
3320 : /*
3321 : * If we're abandoning the PHJ_BATCH_PROBE phase early without having
3322 : * reached the end of it, it means the plan doesn't want any more
3323 : * tuples, and it is happy to abandon any tuples buffered in this
3324 : * process's subplans. For correctness, we can't allow any process to
3325 : * execute the PHJ_BATCH_SCAN phase, because we will never have the
3326 : * complete set of match bits. Therefore we skip emitting unmatched
3327 : * tuples in all backends (if this is a full/right join), as if those
3328 : * tuples were all due to be emitted by this process and it has
3329 : * abandoned them too.
3330 : */
3331 716 : if (BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE &&
3332 648 : !hashtable->batches[curbatch].outer_eof)
3333 : {
3334 : /*
3335 : * This flag may be written to by multiple backends during
3336 : * PHJ_BATCH_PROBE phase, but will only be read in PHJ_BATCH_SCAN
3337 : * phase so requires no extra locking.
3338 : */
3339 0 : batch->skip_unmatched = true;
3340 : }
3341 :
3342 : /*
3343 : * Even if we aren't doing a full/right outer join, we'll step through
3344 : * the PHJ_BATCH_SCAN phase just to maintain the invariant that
3345 : * freeing happens in PHJ_BATCH_FREE, but that'll be wait-free.
3346 : */
3347 716 : if (BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE)
3348 648 : attached = BarrierArriveAndDetachExceptLast(&batch->batch_barrier);
3349 716 : if (attached && BarrierArriveAndDetach(&batch->batch_barrier))
3350 : {
3351 : /*
3352 : * We are not longer attached to the batch barrier, but we're the
3353 : * process that was chosen to free resources and it's safe to
3354 : * assert the current phase. The ParallelHashJoinBatch can't go
3355 : * away underneath us while we are attached to the build barrier,
3356 : * making this access safe.
3357 : */
3358 : Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_FREE);
3359 :
3360 : /* Free shared chunks and buckets. */
3361 3704 : while (DsaPointerIsValid(batch->chunks))
3362 : {
3363 : HashMemoryChunk chunk =
3364 3100 : dsa_get_address(hashtable->area, batch->chunks);
3365 3100 : dsa_pointer next = chunk->next.shared;
3366 :
3367 3100 : dsa_free(hashtable->area, batch->chunks);
3368 3100 : batch->chunks = next;
3369 : }
3370 604 : if (DsaPointerIsValid(batch->buckets))
3371 : {
3372 604 : dsa_free(hashtable->area, batch->buckets);
3373 604 : batch->buckets = InvalidDsaPointer;
3374 : }
3375 : }
3376 :
3377 : /*
3378 : * Track the largest batch we've been attached to. Though each
3379 : * backend might see a different subset of batches, explain.c will
3380 : * scan the results from all backends to find the largest value.
3381 : */
3382 716 : hashtable->spacePeak =
3383 716 : Max(hashtable->spacePeak,
3384 : batch->size + sizeof(dsa_pointer_atomic) * hashtable->nbuckets);
3385 :
3386 : /* Remember that we are not attached to a batch. */
3387 716 : hashtable->curbatch = -1;
3388 : }
3389 18704 : }
3390 :
3391 : /*
3392 : * Detach from all shared resources. If we are last to detach, clean up.
3393 : */
3394 : void
3395 17988 : ExecHashTableDetach(HashJoinTable hashtable)
3396 : {
3397 17988 : ParallelHashJoinState *pstate = hashtable->parallel_state;
3398 :
3399 : /*
3400 : * If we're involved in a parallel query, we must either have gotten all
3401 : * the way to PHJ_BUILD_RUN, or joined too late and be in PHJ_BUILD_FREE.
3402 : */
3403 : Assert(!pstate ||
3404 : BarrierPhase(&pstate->build_barrier) >= PHJ_BUILD_RUN);
3405 :
3406 17988 : if (pstate && BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_RUN)
3407 : {
3408 : int i;
3409 :
3410 : /* Make sure any temporary files are closed. */
3411 396 : if (hashtable->batches)
3412 : {
3413 1878 : for (i = 0; i < hashtable->nbatch; ++i)
3414 : {
3415 1482 : sts_end_write(hashtable->batches[i].inner_tuples);
3416 1482 : sts_end_write(hashtable->batches[i].outer_tuples);
3417 1482 : sts_end_parallel_scan(hashtable->batches[i].inner_tuples);
3418 1482 : sts_end_parallel_scan(hashtable->batches[i].outer_tuples);
3419 : }
3420 : }
3421 :
3422 : /* If we're last to detach, clean up shared memory. */
3423 396 : if (BarrierArriveAndDetach(&pstate->build_barrier))
3424 : {
3425 : /*
3426 : * Late joining processes will see this state and give up
3427 : * immediately.
3428 : */
3429 : Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_FREE);
3430 :
3431 168 : if (DsaPointerIsValid(pstate->batches))
3432 : {
3433 168 : dsa_free(hashtable->area, pstate->batches);
3434 168 : pstate->batches = InvalidDsaPointer;
3435 : }
3436 : }
3437 : }
3438 17988 : hashtable->parallel_state = NULL;
3439 17988 : }
3440 :
3441 : /*
3442 : * Get the first tuple in a given bucket identified by number.
3443 : */
3444 : static inline HashJoinTuple
3445 2774430 : ExecParallelHashFirstTuple(HashJoinTable hashtable, int bucketno)
3446 : {
3447 : HashJoinTuple tuple;
3448 : dsa_pointer p;
3449 :
3450 : Assert(hashtable->parallel_state);
3451 2774430 : p = dsa_pointer_atomic_read(&hashtable->buckets.shared[bucketno]);
3452 2774430 : tuple = (HashJoinTuple) dsa_get_address(hashtable->area, p);
3453 :
3454 2774430 : return tuple;
3455 : }
3456 :
3457 : /*
3458 : * Get the next tuple in the same bucket as 'tuple'.
3459 : */
3460 : static inline HashJoinTuple
3461 3807186 : ExecParallelHashNextTuple(HashJoinTable hashtable, HashJoinTuple tuple)
3462 : {
3463 : HashJoinTuple next;
3464 :
3465 : Assert(hashtable->parallel_state);
3466 3807186 : next = (HashJoinTuple) dsa_get_address(hashtable->area, tuple->next.shared);
3467 :
3468 3807186 : return next;
3469 : }
3470 :
3471 : /*
3472 : * Insert a tuple at the front of a chain of tuples in DSA memory atomically.
3473 : */
3474 : static inline void
3475 2934492 : ExecParallelHashPushTuple(dsa_pointer_atomic *head,
3476 : HashJoinTuple tuple,
3477 : dsa_pointer tuple_shared)
3478 : {
3479 : for (;;)
3480 : {
3481 2934492 : tuple->next.shared = dsa_pointer_atomic_read(head);
3482 2934492 : if (dsa_pointer_atomic_compare_exchange(head,
3483 2934492 : &tuple->next.shared,
3484 : tuple_shared))
3485 2923762 : break;
3486 : }
3487 2923762 : }
3488 :
3489 : /*
3490 : * Prepare to work on a given batch.
3491 : */
3492 : void
3493 1658 : ExecParallelHashTableSetCurrentBatch(HashJoinTable hashtable, int batchno)
3494 : {
3495 : Assert(hashtable->batches[batchno].shared->buckets != InvalidDsaPointer);
3496 :
3497 1658 : hashtable->curbatch = batchno;
3498 1658 : hashtable->buckets.shared = (dsa_pointer_atomic *)
3499 1658 : dsa_get_address(hashtable->area,
3500 1658 : hashtable->batches[batchno].shared->buckets);
3501 1658 : hashtable->nbuckets = hashtable->parallel_state->nbuckets;
3502 1658 : hashtable->log2_nbuckets = my_log2(hashtable->nbuckets);
3503 1658 : hashtable->current_chunk = NULL;
3504 1658 : hashtable->current_chunk_shared = InvalidDsaPointer;
3505 1658 : hashtable->batches[batchno].at_least_one_chunk = false;
3506 1658 : }
3507 :
3508 : /*
3509 : * Take the next available chunk from the queue of chunks being worked on in
3510 : * parallel. Return NULL if there are none left. Otherwise return a pointer
3511 : * to the chunk, and set *shared to the DSA pointer to the chunk.
3512 : */
3513 : static HashMemoryChunk
3514 1142 : ExecParallelHashPopChunkQueue(HashJoinTable hashtable, dsa_pointer *shared)
3515 : {
3516 1142 : ParallelHashJoinState *pstate = hashtable->parallel_state;
3517 : HashMemoryChunk chunk;
3518 :
3519 1142 : LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
3520 1142 : if (DsaPointerIsValid(pstate->chunk_work_queue))
3521 : {
3522 952 : *shared = pstate->chunk_work_queue;
3523 : chunk = (HashMemoryChunk)
3524 952 : dsa_get_address(hashtable->area, *shared);
3525 952 : pstate->chunk_work_queue = chunk->next.shared;
3526 : }
3527 : else
3528 190 : chunk = NULL;
3529 1142 : LWLockRelease(&pstate->lock);
3530 :
3531 1142 : return chunk;
3532 : }
3533 :
3534 : /*
3535 : * Increase the space preallocated in this backend for a given inner batch by
3536 : * at least a given amount. This allows us to track whether a given batch
3537 : * would fit in memory when loaded back in. Also increase the number of
3538 : * batches or buckets if required.
3539 : *
3540 : * This maintains a running estimation of how much space will be taken when we
3541 : * load the batch back into memory by simulating the way chunks will be handed
3542 : * out to workers. It's not perfectly accurate because the tuples will be
3543 : * packed into memory chunks differently by ExecParallelHashTupleAlloc(), but
3544 : * it should be pretty close. It tends to overestimate by a fraction of a
3545 : * chunk per worker since all workers gang up to preallocate during hashing,
3546 : * but workers tend to reload batches alone if there are enough to go around,
3547 : * leaving fewer partially filled chunks. This effect is bounded by
3548 : * nparticipants.
3549 : *
3550 : * Return false if the number of batches or buckets has changed, and the
3551 : * caller should reconsider which batch a given tuple now belongs in and call
3552 : * again.
3553 : */
3554 : static bool
3555 1406 : ExecParallelHashTuplePrealloc(HashJoinTable hashtable, int batchno, size_t size)
3556 : {
3557 1406 : ParallelHashJoinState *pstate = hashtable->parallel_state;
3558 1406 : ParallelHashJoinBatchAccessor *batch = &hashtable->batches[batchno];
3559 1406 : size_t want = Max(size, HASH_CHUNK_SIZE - HASH_CHUNK_HEADER_SIZE);
3560 :
3561 : Assert(batchno > 0);
3562 : Assert(batchno < hashtable->nbatch);
3563 : Assert(size == MAXALIGN(size));
3564 :
3565 1406 : LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
3566 :
3567 : /* Has another participant commanded us to help grow? */
3568 1406 : if (pstate->growth == PHJ_GROWTH_NEED_MORE_BATCHES ||
3569 1394 : pstate->growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
3570 : {
3571 12 : ParallelHashGrowth growth = pstate->growth;
3572 :
3573 12 : LWLockRelease(&pstate->lock);
3574 12 : if (growth == PHJ_GROWTH_NEED_MORE_BATCHES)
3575 12 : ExecParallelHashIncreaseNumBatches(hashtable);
3576 0 : else if (growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
3577 0 : ExecParallelHashIncreaseNumBuckets(hashtable);
3578 :
3579 12 : return false;
3580 : }
3581 :
3582 1394 : if (pstate->growth != PHJ_GROWTH_DISABLED &&
3583 1164 : batch->at_least_one_chunk &&
3584 702 : (batch->shared->estimated_size + want + HASH_CHUNK_HEADER_SIZE
3585 702 : > pstate->space_allowed))
3586 : {
3587 : /*
3588 : * We have determined that this batch would exceed the space budget if
3589 : * loaded into memory. Command all participants to help repartition.
3590 : */
3591 12 : batch->shared->space_exhausted = true;
3592 12 : pstate->growth = PHJ_GROWTH_NEED_MORE_BATCHES;
3593 12 : LWLockRelease(&pstate->lock);
3594 :
3595 12 : return false;
3596 : }
3597 :
3598 1382 : batch->at_least_one_chunk = true;
3599 1382 : batch->shared->estimated_size += want + HASH_CHUNK_HEADER_SIZE;
3600 1382 : batch->preallocated = want;
3601 1382 : LWLockRelease(&pstate->lock);
3602 :
3603 1382 : return true;
3604 : }
3605 :
3606 : /*
3607 : * Calculate the limit on how much memory can be used by Hash and similar
3608 : * plan types. This is work_mem times hash_mem_multiplier, and is
3609 : * expressed in bytes.
3610 : *
3611 : * Exported for use by the planner, as well as other hash-like executor
3612 : * nodes. This is a rather random place for this, but there is no better
3613 : * place.
3614 : */
3615 : size_t
3616 1968326 : get_hash_memory_limit(void)
3617 : {
3618 : double mem_limit;
3619 :
3620 : /* Do initial calculation in double arithmetic */
3621 1968326 : mem_limit = (double) work_mem * hash_mem_multiplier * 1024.0;
3622 :
3623 : /* Clamp in case it doesn't fit in size_t */
3624 1968326 : mem_limit = Min(mem_limit, (double) SIZE_MAX);
3625 :
3626 1968326 : return (size_t) mem_limit;
3627 : }
|