Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * nodeHash.c
4 : * Routines to hash relations for hashjoin
5 : *
6 : * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/executor/nodeHash.c
12 : *
13 : * See note on parallelism in nodeHashjoin.c.
14 : *
15 : *-------------------------------------------------------------------------
16 : */
17 : /*
18 : * INTERFACE ROUTINES
19 : * MultiExecHash - generate an in-memory hash table of the relation
20 : * ExecInitHash - initialize node and subnodes
21 : * ExecEndHash - shutdown node and subnodes
22 : */
23 :
24 : #include "postgres.h"
25 :
26 : #include <math.h>
27 : #include <limits.h>
28 :
29 : #include "access/htup_details.h"
30 : #include "access/parallel.h"
31 : #include "catalog/pg_statistic.h"
32 : #include "commands/tablespace.h"
33 : #include "executor/executor.h"
34 : #include "executor/hashjoin.h"
35 : #include "executor/nodeHash.h"
36 : #include "executor/nodeHashjoin.h"
37 : #include "miscadmin.h"
38 : #include "port/atomics.h"
39 : #include "port/pg_bitutils.h"
40 : #include "utils/dynahash.h"
41 : #include "utils/lsyscache.h"
42 : #include "utils/memutils.h"
43 : #include "utils/syscache.h"
44 : #include "utils/wait_event.h"
45 :
46 : static void ExecHashIncreaseNumBatches(HashJoinTable hashtable);
47 : static void ExecHashIncreaseNumBuckets(HashJoinTable hashtable);
48 : static void ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable);
49 : static void ExecParallelHashIncreaseNumBuckets(HashJoinTable hashtable);
50 : static void ExecHashBuildSkewHash(HashJoinTable hashtable, Hash *node,
51 : int mcvsToUse);
52 : static void ExecHashSkewTableInsert(HashJoinTable hashtable,
53 : TupleTableSlot *slot,
54 : uint32 hashvalue,
55 : int bucketNumber);
56 : static void ExecHashRemoveNextSkewBucket(HashJoinTable hashtable);
57 :
58 : static void *dense_alloc(HashJoinTable hashtable, Size size);
59 : static HashJoinTuple ExecParallelHashTupleAlloc(HashJoinTable hashtable,
60 : size_t size,
61 : dsa_pointer *shared);
62 : static void MultiExecPrivateHash(HashState *node);
63 : static void MultiExecParallelHash(HashState *node);
64 : static inline HashJoinTuple ExecParallelHashFirstTuple(HashJoinTable hashtable,
65 : int bucketno);
66 : static inline HashJoinTuple ExecParallelHashNextTuple(HashJoinTable hashtable,
67 : HashJoinTuple tuple);
68 : static inline void ExecParallelHashPushTuple(dsa_pointer_atomic *head,
69 : HashJoinTuple tuple,
70 : dsa_pointer tuple_shared);
71 : static void ExecParallelHashJoinSetUpBatches(HashJoinTable hashtable, int nbatch);
72 : static void ExecParallelHashEnsureBatchAccessors(HashJoinTable hashtable);
73 : static void ExecParallelHashRepartitionFirst(HashJoinTable hashtable);
74 : static void ExecParallelHashRepartitionRest(HashJoinTable hashtable);
75 : static HashMemoryChunk ExecParallelHashPopChunkQueue(HashJoinTable hashtable,
76 : dsa_pointer *shared);
77 : static bool ExecParallelHashTuplePrealloc(HashJoinTable hashtable,
78 : int batchno,
79 : size_t size);
80 : static void ExecParallelHashMergeCounters(HashJoinTable hashtable);
81 : static void ExecParallelHashCloseBatchAccessors(HashJoinTable hashtable);
82 :
83 :
84 : /* ----------------------------------------------------------------
85 : * ExecHash
86 : *
87 : * stub for pro forma compliance
88 : * ----------------------------------------------------------------
89 : */
90 : static TupleTableSlot *
91 0 : ExecHash(PlanState *pstate)
92 : {
93 0 : elog(ERROR, "Hash node does not support ExecProcNode call convention");
94 : return NULL;
95 : }
96 :
97 : /* ----------------------------------------------------------------
98 : * MultiExecHash
99 : *
100 : * build hash table for hashjoin, doing partitioning if more
101 : * than one batch is required.
102 : * ----------------------------------------------------------------
103 : */
104 : Node *
105 20550 : MultiExecHash(HashState *node)
106 : {
107 : /* must provide our own instrumentation support */
108 20550 : if (node->ps.instrument)
109 292 : InstrStartNode(node->ps.instrument);
110 :
111 20550 : if (node->parallel_state != NULL)
112 398 : MultiExecParallelHash(node);
113 : else
114 20152 : MultiExecPrivateHash(node);
115 :
116 : /* must provide our own instrumentation support */
117 20550 : if (node->ps.instrument)
118 292 : InstrStopNode(node->ps.instrument, node->hashtable->partialTuples);
119 :
120 : /*
121 : * We do not return the hash table directly because it's not a subtype of
122 : * Node, and so would violate the MultiExecProcNode API. Instead, our
123 : * parent Hashjoin node is expected to know how to fish it out of our node
124 : * state. Ugly but not really worth cleaning up, since Hashjoin knows
125 : * quite a bit more about Hash besides that.
126 : */
127 20550 : return NULL;
128 : }
129 :
130 : /* ----------------------------------------------------------------
131 : * MultiExecPrivateHash
132 : *
133 : * parallel-oblivious version, building a backend-private
134 : * hash table and (if necessary) batch files.
135 : * ----------------------------------------------------------------
136 : */
137 : static void
138 20152 : MultiExecPrivateHash(HashState *node)
139 : {
140 : PlanState *outerNode;
141 : List *hashkeys;
142 : HashJoinTable hashtable;
143 : TupleTableSlot *slot;
144 : ExprContext *econtext;
145 : uint32 hashvalue;
146 :
147 : /*
148 : * get state info from node
149 : */
150 20152 : outerNode = outerPlanState(node);
151 20152 : hashtable = node->hashtable;
152 :
153 : /*
154 : * set expression context
155 : */
156 20152 : hashkeys = node->hashkeys;
157 20152 : econtext = node->ps.ps_ExprContext;
158 :
159 : /*
160 : * Get all tuples from the node below the Hash node and insert into the
161 : * hash table (or temp files).
162 : */
163 : for (;;)
164 : {
165 7218682 : slot = ExecProcNode(outerNode);
166 7218682 : if (TupIsNull(slot))
167 : break;
168 : /* We have to compute the hash value */
169 7198530 : econtext->ecxt_outertuple = slot;
170 7198530 : if (ExecHashGetHashValue(hashtable, econtext, hashkeys,
171 7198530 : false, hashtable->keepNulls,
172 : &hashvalue))
173 : {
174 : int bucketNumber;
175 :
176 7198488 : bucketNumber = ExecHashGetSkewBucket(hashtable, hashvalue);
177 7198488 : if (bucketNumber != INVALID_SKEW_BUCKET_NO)
178 : {
179 : /* It's a skew tuple, so put it into that hash table */
180 588 : ExecHashSkewTableInsert(hashtable, slot, hashvalue,
181 : bucketNumber);
182 588 : hashtable->skewTuples += 1;
183 : }
184 : else
185 : {
186 : /* Not subject to skew optimization, so insert normally */
187 7197900 : ExecHashTableInsert(hashtable, slot, hashvalue);
188 : }
189 7198488 : hashtable->totalTuples += 1;
190 : }
191 : }
192 :
193 : /* resize the hash table if needed (NTUP_PER_BUCKET exceeded) */
194 20152 : if (hashtable->nbuckets != hashtable->nbuckets_optimal)
195 72 : ExecHashIncreaseNumBuckets(hashtable);
196 :
197 : /* Account for the buckets in spaceUsed (reported in EXPLAIN ANALYZE) */
198 20152 : hashtable->spaceUsed += hashtable->nbuckets * sizeof(HashJoinTuple);
199 20152 : if (hashtable->spaceUsed > hashtable->spacePeak)
200 20126 : hashtable->spacePeak = hashtable->spaceUsed;
201 :
202 20152 : hashtable->partialTuples = hashtable->totalTuples;
203 20152 : }
204 :
205 : /* ----------------------------------------------------------------
206 : * MultiExecParallelHash
207 : *
208 : * parallel-aware version, building a shared hash table and
209 : * (if necessary) batch files using the combined effort of
210 : * a set of co-operating backends.
211 : * ----------------------------------------------------------------
212 : */
213 : static void
214 398 : MultiExecParallelHash(HashState *node)
215 : {
216 : ParallelHashJoinState *pstate;
217 : PlanState *outerNode;
218 : List *hashkeys;
219 : HashJoinTable hashtable;
220 : TupleTableSlot *slot;
221 : ExprContext *econtext;
222 : uint32 hashvalue;
223 : Barrier *build_barrier;
224 : int i;
225 :
226 : /*
227 : * get state info from node
228 : */
229 398 : outerNode = outerPlanState(node);
230 398 : hashtable = node->hashtable;
231 :
232 : /*
233 : * set expression context
234 : */
235 398 : hashkeys = node->hashkeys;
236 398 : econtext = node->ps.ps_ExprContext;
237 :
238 : /*
239 : * Synchronize the parallel hash table build. At this stage we know that
240 : * the shared hash table has been or is being set up by
241 : * ExecHashTableCreate(), but we don't know if our peers have returned
242 : * from there or are here in MultiExecParallelHash(), and if so how far
243 : * through they are. To find out, we check the build_barrier phase then
244 : * and jump to the right step in the build algorithm.
245 : */
246 398 : pstate = hashtable->parallel_state;
247 398 : build_barrier = &pstate->build_barrier;
248 : Assert(BarrierPhase(build_barrier) >= PHJ_BUILD_ALLOCATE);
249 398 : switch (BarrierPhase(build_barrier))
250 : {
251 168 : case PHJ_BUILD_ALLOCATE:
252 :
253 : /*
254 : * Either I just allocated the initial hash table in
255 : * ExecHashTableCreate(), or someone else is doing that. Either
256 : * way, wait for everyone to arrive here so we can proceed.
257 : */
258 168 : BarrierArriveAndWait(build_barrier, WAIT_EVENT_HASH_BUILD_ALLOCATE);
259 : /* Fall through. */
260 :
261 240 : case PHJ_BUILD_HASH_INNER:
262 :
263 : /*
264 : * It's time to begin hashing, or if we just arrived here then
265 : * hashing is already underway, so join in that effort. While
266 : * hashing we have to be prepared to help increase the number of
267 : * batches or buckets at any time, and if we arrived here when
268 : * that was already underway we'll have to help complete that work
269 : * immediately so that it's safe to access batches and buckets
270 : * below.
271 : */
272 240 : if (PHJ_GROW_BATCHES_PHASE(BarrierAttach(&pstate->grow_batches_barrier)) !=
273 : PHJ_GROW_BATCHES_ELECT)
274 0 : ExecParallelHashIncreaseNumBatches(hashtable);
275 240 : if (PHJ_GROW_BUCKETS_PHASE(BarrierAttach(&pstate->grow_buckets_barrier)) !=
276 : PHJ_GROW_BUCKETS_ELECT)
277 0 : ExecParallelHashIncreaseNumBuckets(hashtable);
278 240 : ExecParallelHashEnsureBatchAccessors(hashtable);
279 240 : ExecParallelHashTableSetCurrentBatch(hashtable, 0);
280 : for (;;)
281 : {
282 2160334 : slot = ExecProcNode(outerNode);
283 2160334 : if (TupIsNull(slot))
284 : break;
285 2160094 : econtext->ecxt_outertuple = slot;
286 2160094 : if (ExecHashGetHashValue(hashtable, econtext, hashkeys,
287 2160094 : false, hashtable->keepNulls,
288 : &hashvalue))
289 2160094 : ExecParallelHashTableInsert(hashtable, slot, hashvalue);
290 2160094 : hashtable->partialTuples++;
291 : }
292 :
293 : /*
294 : * Make sure that any tuples we wrote to disk are visible to
295 : * others before anyone tries to load them.
296 : */
297 1314 : for (i = 0; i < hashtable->nbatch; ++i)
298 1074 : sts_end_write(hashtable->batches[i].inner_tuples);
299 :
300 : /*
301 : * Update shared counters. We need an accurate total tuple count
302 : * to control the empty table optimization.
303 : */
304 240 : ExecParallelHashMergeCounters(hashtable);
305 :
306 240 : BarrierDetach(&pstate->grow_buckets_barrier);
307 240 : BarrierDetach(&pstate->grow_batches_barrier);
308 :
309 : /*
310 : * Wait for everyone to finish building and flushing files and
311 : * counters.
312 : */
313 240 : if (BarrierArriveAndWait(build_barrier,
314 : WAIT_EVENT_HASH_BUILD_HASH_INNER))
315 : {
316 : /*
317 : * Elect one backend to disable any further growth. Batches
318 : * are now fixed. While building them we made sure they'd fit
319 : * in our memory budget when we load them back in later (or we
320 : * tried to do that and gave up because we detected extreme
321 : * skew).
322 : */
323 168 : pstate->growth = PHJ_GROWTH_DISABLED;
324 : }
325 : }
326 :
327 : /*
328 : * We're not yet attached to a batch. We all agree on the dimensions and
329 : * number of inner tuples (for the empty table optimization).
330 : */
331 398 : hashtable->curbatch = -1;
332 398 : hashtable->nbuckets = pstate->nbuckets;
333 398 : hashtable->log2_nbuckets = my_log2(hashtable->nbuckets);
334 398 : hashtable->totalTuples = pstate->total_tuples;
335 :
336 : /*
337 : * Unless we're completely done and the batch state has been freed, make
338 : * sure we have accessors.
339 : */
340 398 : if (BarrierPhase(build_barrier) < PHJ_BUILD_FREE)
341 398 : ExecParallelHashEnsureBatchAccessors(hashtable);
342 :
343 : /*
344 : * The next synchronization point is in ExecHashJoin's HJ_BUILD_HASHTABLE
345 : * case, which will bring the build phase to PHJ_BUILD_RUN (if it isn't
346 : * there already).
347 : */
348 : Assert(BarrierPhase(build_barrier) == PHJ_BUILD_HASH_OUTER ||
349 : BarrierPhase(build_barrier) == PHJ_BUILD_RUN ||
350 : BarrierPhase(build_barrier) == PHJ_BUILD_FREE);
351 398 : }
352 :
353 : /* ----------------------------------------------------------------
354 : * ExecInitHash
355 : *
356 : * Init routine for Hash node
357 : * ----------------------------------------------------------------
358 : */
359 : HashState *
360 29796 : ExecInitHash(Hash *node, EState *estate, int eflags)
361 : {
362 : HashState *hashstate;
363 :
364 : /* check for unsupported flags */
365 : Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
366 :
367 : /*
368 : * create state structure
369 : */
370 29796 : hashstate = makeNode(HashState);
371 29796 : hashstate->ps.plan = (Plan *) node;
372 29796 : hashstate->ps.state = estate;
373 29796 : hashstate->ps.ExecProcNode = ExecHash;
374 29796 : hashstate->hashtable = NULL;
375 29796 : hashstate->hashkeys = NIL; /* will be set by parent HashJoin */
376 :
377 : /*
378 : * Miscellaneous initialization
379 : *
380 : * create expression context for node
381 : */
382 29796 : ExecAssignExprContext(estate, &hashstate->ps);
383 :
384 : /*
385 : * initialize child nodes
386 : */
387 29796 : outerPlanState(hashstate) = ExecInitNode(outerPlan(node), estate, eflags);
388 :
389 : /*
390 : * initialize our result slot and type. No need to build projection
391 : * because this node doesn't do projections.
392 : */
393 29796 : ExecInitResultTupleSlotTL(&hashstate->ps, &TTSOpsMinimalTuple);
394 29796 : hashstate->ps.ps_ProjInfo = NULL;
395 :
396 : /*
397 : * initialize child expressions
398 : */
399 : Assert(node->plan.qual == NIL);
400 29796 : hashstate->hashkeys =
401 29796 : ExecInitExprList(node->hashkeys, (PlanState *) hashstate);
402 :
403 29796 : return hashstate;
404 : }
405 :
406 : /* ---------------------------------------------------------------
407 : * ExecEndHash
408 : *
409 : * clean up routine for Hash node
410 : * ----------------------------------------------------------------
411 : */
412 : void
413 29690 : ExecEndHash(HashState *node)
414 : {
415 : PlanState *outerPlan;
416 :
417 : /*
418 : * shut down the subplan
419 : */
420 29690 : outerPlan = outerPlanState(node);
421 29690 : ExecEndNode(outerPlan);
422 29690 : }
423 :
424 :
425 : /* ----------------------------------------------------------------
426 : * ExecHashTableCreate
427 : *
428 : * create an empty hashtable data structure for hashjoin.
429 : * ----------------------------------------------------------------
430 : */
431 : HashJoinTable
432 20550 : ExecHashTableCreate(HashState *state, List *hashOperators, List *hashCollations, bool keepNulls)
433 : {
434 : Hash *node;
435 : HashJoinTable hashtable;
436 : Plan *outerNode;
437 : size_t space_allowed;
438 : int nbuckets;
439 : int nbatch;
440 : double rows;
441 : int num_skew_mcvs;
442 : int log2_nbuckets;
443 : int nkeys;
444 : int i;
445 : ListCell *ho;
446 : ListCell *hc;
447 : MemoryContext oldcxt;
448 :
449 : /*
450 : * Get information about the size of the relation to be hashed (it's the
451 : * "outer" subtree of this node, but the inner relation of the hashjoin).
452 : * Compute the appropriate size of the hash table.
453 : */
454 20550 : node = (Hash *) state->ps.plan;
455 20550 : outerNode = outerPlan(node);
456 :
457 : /*
458 : * If this is shared hash table with a partial plan, then we can't use
459 : * outerNode->plan_rows to estimate its size. We need an estimate of the
460 : * total number of rows across all copies of the partial plan.
461 : */
462 20550 : rows = node->plan.parallel_aware ? node->rows_total : outerNode->plan_rows;
463 :
464 20152 : ExecChooseHashTableSize(rows, outerNode->plan_width,
465 20550 : OidIsValid(node->skewTable),
466 20550 : state->parallel_state != NULL,
467 20550 : state->parallel_state != NULL ?
468 398 : state->parallel_state->nparticipants - 1 : 0,
469 : &space_allowed,
470 : &nbuckets, &nbatch, &num_skew_mcvs);
471 :
472 : /* nbuckets must be a power of 2 */
473 20550 : log2_nbuckets = my_log2(nbuckets);
474 : Assert(nbuckets == (1 << log2_nbuckets));
475 :
476 : /*
477 : * Initialize the hash table control block.
478 : *
479 : * The hashtable control block is just palloc'd from the executor's
480 : * per-query memory context. Everything else should be kept inside the
481 : * subsidiary hashCxt, batchCxt or spillCxt.
482 : */
483 20550 : hashtable = palloc_object(HashJoinTableData);
484 20550 : hashtable->nbuckets = nbuckets;
485 20550 : hashtable->nbuckets_original = nbuckets;
486 20550 : hashtable->nbuckets_optimal = nbuckets;
487 20550 : hashtable->log2_nbuckets = log2_nbuckets;
488 20550 : hashtable->log2_nbuckets_optimal = log2_nbuckets;
489 20550 : hashtable->buckets.unshared = NULL;
490 20550 : hashtable->keepNulls = keepNulls;
491 20550 : hashtable->skewEnabled = false;
492 20550 : hashtable->skewBucket = NULL;
493 20550 : hashtable->skewBucketLen = 0;
494 20550 : hashtable->nSkewBuckets = 0;
495 20550 : hashtable->skewBucketNums = NULL;
496 20550 : hashtable->nbatch = nbatch;
497 20550 : hashtable->curbatch = 0;
498 20550 : hashtable->nbatch_original = nbatch;
499 20550 : hashtable->nbatch_outstart = nbatch;
500 20550 : hashtable->growEnabled = true;
501 20550 : hashtable->totalTuples = 0;
502 20550 : hashtable->partialTuples = 0;
503 20550 : hashtable->skewTuples = 0;
504 20550 : hashtable->innerBatchFile = NULL;
505 20550 : hashtable->outerBatchFile = NULL;
506 20550 : hashtable->spaceUsed = 0;
507 20550 : hashtable->spacePeak = 0;
508 20550 : hashtable->spaceAllowed = space_allowed;
509 20550 : hashtable->spaceUsedSkew = 0;
510 20550 : hashtable->spaceAllowedSkew =
511 20550 : hashtable->spaceAllowed * SKEW_HASH_MEM_PERCENT / 100;
512 20550 : hashtable->chunks = NULL;
513 20550 : hashtable->current_chunk = NULL;
514 20550 : hashtable->parallel_state = state->parallel_state;
515 20550 : hashtable->area = state->ps.state->es_query_dsa;
516 20550 : hashtable->batches = NULL;
517 :
518 : #ifdef HJDEBUG
519 : printf("Hashjoin %p: initial nbatch = %d, nbuckets = %d\n",
520 : hashtable, nbatch, nbuckets);
521 : #endif
522 :
523 : /*
524 : * Create temporary memory contexts in which to keep the hashtable working
525 : * storage. See notes in executor/hashjoin.h.
526 : */
527 20550 : hashtable->hashCxt = AllocSetContextCreate(CurrentMemoryContext,
528 : "HashTableContext",
529 : ALLOCSET_DEFAULT_SIZES);
530 :
531 20550 : hashtable->batchCxt = AllocSetContextCreate(hashtable->hashCxt,
532 : "HashBatchContext",
533 : ALLOCSET_DEFAULT_SIZES);
534 :
535 20550 : hashtable->spillCxt = AllocSetContextCreate(hashtable->hashCxt,
536 : "HashSpillContext",
537 : ALLOCSET_DEFAULT_SIZES);
538 :
539 : /* Allocate data that will live for the life of the hashjoin */
540 :
541 20550 : oldcxt = MemoryContextSwitchTo(hashtable->hashCxt);
542 :
543 : /*
544 : * Get info about the hash functions to be used for each hash key. Also
545 : * remember whether the join operators are strict.
546 : */
547 20550 : nkeys = list_length(hashOperators);
548 20550 : hashtable->outer_hashfunctions = palloc_array(FmgrInfo, nkeys);
549 20550 : hashtable->inner_hashfunctions = palloc_array(FmgrInfo, nkeys);
550 20550 : hashtable->hashStrict = palloc_array(bool, nkeys);
551 20550 : hashtable->collations = palloc_array(Oid, nkeys);
552 20550 : i = 0;
553 42440 : forboth(ho, hashOperators, hc, hashCollations)
554 : {
555 21890 : Oid hashop = lfirst_oid(ho);
556 : Oid left_hashfn;
557 : Oid right_hashfn;
558 :
559 21890 : if (!get_op_hash_functions(hashop, &left_hashfn, &right_hashfn))
560 0 : elog(ERROR, "could not find hash function for hash operator %u",
561 : hashop);
562 21890 : fmgr_info(left_hashfn, &hashtable->outer_hashfunctions[i]);
563 21890 : fmgr_info(right_hashfn, &hashtable->inner_hashfunctions[i]);
564 21890 : hashtable->hashStrict[i] = op_strict(hashop);
565 21890 : hashtable->collations[i] = lfirst_oid(hc);
566 21890 : i++;
567 : }
568 :
569 20550 : if (nbatch > 1 && hashtable->parallel_state == NULL)
570 : {
571 : MemoryContext oldctx;
572 :
573 : /*
574 : * allocate and initialize the file arrays in hashCxt (not needed for
575 : * parallel case which uses shared tuplestores instead of raw files)
576 : */
577 104 : oldctx = MemoryContextSwitchTo(hashtable->spillCxt);
578 :
579 104 : hashtable->innerBatchFile = palloc0_array(BufFile *, nbatch);
580 104 : hashtable->outerBatchFile = palloc0_array(BufFile *, nbatch);
581 :
582 104 : MemoryContextSwitchTo(oldctx);
583 :
584 : /* The files will not be opened until needed... */
585 : /* ... but make sure we have temp tablespaces established for them */
586 104 : PrepareTempTablespaces();
587 : }
588 :
589 20550 : MemoryContextSwitchTo(oldcxt);
590 :
591 20550 : if (hashtable->parallel_state)
592 : {
593 398 : ParallelHashJoinState *pstate = hashtable->parallel_state;
594 : Barrier *build_barrier;
595 :
596 : /*
597 : * Attach to the build barrier. The corresponding detach operation is
598 : * in ExecHashTableDetach. Note that we won't attach to the
599 : * batch_barrier for batch 0 yet. We'll attach later and start it out
600 : * in PHJ_BATCH_PROBE phase, because batch 0 is allocated up front and
601 : * then loaded while hashing (the standard hybrid hash join
602 : * algorithm), and we'll coordinate that using build_barrier.
603 : */
604 398 : build_barrier = &pstate->build_barrier;
605 398 : BarrierAttach(build_barrier);
606 :
607 : /*
608 : * So far we have no idea whether there are any other participants,
609 : * and if so, what phase they are working on. The only thing we care
610 : * about at this point is whether someone has already created the
611 : * SharedHashJoinBatch objects and the hash table for batch 0. One
612 : * backend will be elected to do that now if necessary.
613 : */
614 566 : if (BarrierPhase(build_barrier) == PHJ_BUILD_ELECT &&
615 168 : BarrierArriveAndWait(build_barrier, WAIT_EVENT_HASH_BUILD_ELECT))
616 : {
617 168 : pstate->nbatch = nbatch;
618 168 : pstate->space_allowed = space_allowed;
619 168 : pstate->growth = PHJ_GROWTH_OK;
620 :
621 : /* Set up the shared state for coordinating batches. */
622 168 : ExecParallelHashJoinSetUpBatches(hashtable, nbatch);
623 :
624 : /*
625 : * Allocate batch 0's hash table up front so we can load it
626 : * directly while hashing.
627 : */
628 168 : pstate->nbuckets = nbuckets;
629 168 : ExecParallelHashTableAlloc(hashtable, 0);
630 : }
631 :
632 : /*
633 : * The next Parallel Hash synchronization point is in
634 : * MultiExecParallelHash(), which will progress it all the way to
635 : * PHJ_BUILD_RUN. The caller must not return control from this
636 : * executor node between now and then.
637 : */
638 : }
639 : else
640 : {
641 : /*
642 : * Prepare context for the first-scan space allocations; allocate the
643 : * hashbucket array therein, and set each bucket "empty".
644 : */
645 20152 : MemoryContextSwitchTo(hashtable->batchCxt);
646 :
647 20152 : hashtable->buckets.unshared = palloc0_array(HashJoinTuple, nbuckets);
648 :
649 : /*
650 : * Set up for skew optimization, if possible and there's a need for
651 : * more than one batch. (In a one-batch join, there's no point in
652 : * it.)
653 : */
654 20152 : if (nbatch > 1)
655 104 : ExecHashBuildSkewHash(hashtable, node, num_skew_mcvs);
656 :
657 20152 : MemoryContextSwitchTo(oldcxt);
658 : }
659 :
660 20550 : return hashtable;
661 : }
662 :
663 :
664 : /*
665 : * Compute appropriate size for hashtable given the estimated size of the
666 : * relation to be hashed (number of rows and average row width).
667 : *
668 : * This is exported so that the planner's costsize.c can use it.
669 : */
670 :
671 : /* Target bucket loading (tuples per bucket) */
672 : #define NTUP_PER_BUCKET 1
673 :
674 : void
675 558974 : ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
676 : bool try_combined_hash_mem,
677 : int parallel_workers,
678 : size_t *space_allowed,
679 : int *numbuckets,
680 : int *numbatches,
681 : int *num_skew_mcvs)
682 : {
683 : int tupsize;
684 : double inner_rel_bytes;
685 : size_t hash_table_bytes;
686 : size_t bucket_bytes;
687 : size_t max_pointers;
688 558974 : int nbatch = 1;
689 : int nbuckets;
690 : double dbuckets;
691 :
692 : /* Force a plausible relation size if no info */
693 558974 : if (ntuples <= 0.0)
694 150 : ntuples = 1000.0;
695 :
696 : /*
697 : * Estimate tupsize based on footprint of tuple in hashtable... note this
698 : * does not allow for any palloc overhead. The manipulations of spaceUsed
699 : * don't count palloc overhead either.
700 : */
701 558974 : tupsize = HJTUPLE_OVERHEAD +
702 558974 : MAXALIGN(SizeofMinimalTupleHeader) +
703 558974 : MAXALIGN(tupwidth);
704 558974 : inner_rel_bytes = ntuples * tupsize;
705 :
706 : /*
707 : * Compute in-memory hashtable size limit from GUCs.
708 : */
709 558974 : hash_table_bytes = get_hash_memory_limit();
710 :
711 : /*
712 : * Parallel Hash tries to use the combined hash_mem of all workers to
713 : * avoid the need to batch. If that won't work, it falls back to hash_mem
714 : * per worker and tries to process batches in parallel.
715 : */
716 558974 : if (try_combined_hash_mem)
717 : {
718 : /* Careful, this could overflow size_t */
719 : double newlimit;
720 :
721 11972 : newlimit = (double) hash_table_bytes * (double) (parallel_workers + 1);
722 11972 : newlimit = Min(newlimit, (double) SIZE_MAX);
723 11972 : hash_table_bytes = (size_t) newlimit;
724 : }
725 :
726 558974 : *space_allowed = hash_table_bytes;
727 :
728 : /*
729 : * If skew optimization is possible, estimate the number of skew buckets
730 : * that will fit in the memory allowed, and decrement the assumed space
731 : * available for the main hash table accordingly.
732 : *
733 : * We make the optimistic assumption that each skew bucket will contain
734 : * one inner-relation tuple. If that turns out to be low, we will recover
735 : * at runtime by reducing the number of skew buckets.
736 : *
737 : * hashtable->skewBucket will have up to 8 times as many HashSkewBucket
738 : * pointers as the number of MCVs we allow, since ExecHashBuildSkewHash
739 : * will round up to the next power of 2 and then multiply by 4 to reduce
740 : * collisions.
741 : */
742 558974 : if (useskew)
743 : {
744 : size_t bytes_per_mcv;
745 : size_t skew_mcvs;
746 :
747 : /*----------
748 : * Compute number of MCVs we could hold in hash_table_bytes
749 : *
750 : * Divisor is:
751 : * size of a hash tuple +
752 : * worst-case size of skewBucket[] per MCV +
753 : * size of skewBucketNums[] entry +
754 : * size of skew bucket struct itself
755 : *----------
756 : */
757 554888 : bytes_per_mcv = tupsize +
758 : (8 * sizeof(HashSkewBucket *)) +
759 554888 : sizeof(int) +
760 : SKEW_BUCKET_OVERHEAD;
761 554888 : skew_mcvs = hash_table_bytes / bytes_per_mcv;
762 :
763 : /*
764 : * Now scale by SKEW_HASH_MEM_PERCENT (we do it in this order so as
765 : * not to worry about size_t overflow in the multiplication)
766 : */
767 554888 : skew_mcvs = (skew_mcvs * SKEW_HASH_MEM_PERCENT) / 100;
768 :
769 : /* Now clamp to integer range */
770 554888 : skew_mcvs = Min(skew_mcvs, INT_MAX);
771 :
772 554888 : *num_skew_mcvs = (int) skew_mcvs;
773 :
774 : /* Reduce hash_table_bytes by the amount needed for the skew table */
775 554888 : if (skew_mcvs > 0)
776 554888 : hash_table_bytes -= skew_mcvs * bytes_per_mcv;
777 : }
778 : else
779 4086 : *num_skew_mcvs = 0;
780 :
781 : /*
782 : * Set nbuckets to achieve an average bucket load of NTUP_PER_BUCKET when
783 : * memory is filled, assuming a single batch; but limit the value so that
784 : * the pointer arrays we'll try to allocate do not exceed hash_table_bytes
785 : * nor MaxAllocSize.
786 : *
787 : * Note that both nbuckets and nbatch must be powers of 2 to make
788 : * ExecHashGetBucketAndBatch fast.
789 : */
790 558974 : max_pointers = hash_table_bytes / sizeof(HashJoinTuple);
791 558974 : max_pointers = Min(max_pointers, MaxAllocSize / sizeof(HashJoinTuple));
792 : /* If max_pointers isn't a power of 2, must round it down to one */
793 558974 : max_pointers = pg_prevpower2_size_t(max_pointers);
794 :
795 : /* Also ensure we avoid integer overflow in nbatch and nbuckets */
796 : /* (this step is redundant given the current value of MaxAllocSize) */
797 558974 : max_pointers = Min(max_pointers, INT_MAX / 2 + 1);
798 :
799 558974 : dbuckets = ceil(ntuples / NTUP_PER_BUCKET);
800 558974 : dbuckets = Min(dbuckets, max_pointers);
801 558974 : nbuckets = (int) dbuckets;
802 : /* don't let nbuckets be really small, though ... */
803 558974 : nbuckets = Max(nbuckets, 1024);
804 : /* ... and force it to be a power of 2. */
805 558974 : nbuckets = pg_nextpower2_32(nbuckets);
806 :
807 : /*
808 : * If there's not enough space to store the projected number of tuples and
809 : * the required bucket headers, we will need multiple batches.
810 : */
811 558974 : bucket_bytes = sizeof(HashJoinTuple) * nbuckets;
812 558974 : if (inner_rel_bytes + bucket_bytes > hash_table_bytes)
813 : {
814 : /* We'll need multiple batches */
815 : size_t sbuckets;
816 : double dbatch;
817 : int minbatch;
818 : size_t bucket_size;
819 :
820 : /*
821 : * If Parallel Hash with combined hash_mem would still need multiple
822 : * batches, we'll have to fall back to regular hash_mem budget.
823 : */
824 4902 : if (try_combined_hash_mem)
825 : {
826 246 : ExecChooseHashTableSize(ntuples, tupwidth, useskew,
827 : false, parallel_workers,
828 : space_allowed,
829 : numbuckets,
830 : numbatches,
831 : num_skew_mcvs);
832 246 : return;
833 : }
834 :
835 : /*
836 : * Estimate the number of buckets we'll want to have when hash_mem is
837 : * entirely full. Each bucket will contain a bucket pointer plus
838 : * NTUP_PER_BUCKET tuples, whose projected size already includes
839 : * overhead for the hash code, pointer to the next tuple, etc.
840 : */
841 4656 : bucket_size = (tupsize * NTUP_PER_BUCKET + sizeof(HashJoinTuple));
842 4656 : if (hash_table_bytes <= bucket_size)
843 0 : sbuckets = 1; /* avoid pg_nextpower2_size_t(0) */
844 : else
845 4656 : sbuckets = pg_nextpower2_size_t(hash_table_bytes / bucket_size);
846 4656 : sbuckets = Min(sbuckets, max_pointers);
847 4656 : nbuckets = (int) sbuckets;
848 4656 : nbuckets = pg_nextpower2_32(nbuckets);
849 4656 : bucket_bytes = nbuckets * sizeof(HashJoinTuple);
850 :
851 : /*
852 : * Buckets are simple pointers to hashjoin tuples, while tupsize
853 : * includes the pointer, hash code, and MinimalTupleData. So buckets
854 : * should never really exceed 25% of hash_mem (even for
855 : * NTUP_PER_BUCKET=1); except maybe for hash_mem values that are not
856 : * 2^N bytes, where we might get more because of doubling. So let's
857 : * look for 50% here.
858 : */
859 : Assert(bucket_bytes <= hash_table_bytes / 2);
860 :
861 : /* Calculate required number of batches. */
862 4656 : dbatch = ceil(inner_rel_bytes / (hash_table_bytes - bucket_bytes));
863 4656 : dbatch = Min(dbatch, max_pointers);
864 4656 : minbatch = (int) dbatch;
865 4656 : nbatch = pg_nextpower2_32(Max(2, minbatch));
866 : }
867 :
868 : Assert(nbuckets > 0);
869 : Assert(nbatch > 0);
870 :
871 558728 : *numbuckets = nbuckets;
872 558728 : *numbatches = nbatch;
873 : }
874 :
875 :
876 : /* ----------------------------------------------------------------
877 : * ExecHashTableDestroy
878 : *
879 : * destroy a hash table
880 : * ----------------------------------------------------------------
881 : */
882 : void
883 20446 : ExecHashTableDestroy(HashJoinTable hashtable)
884 : {
885 : int i;
886 :
887 : /*
888 : * Make sure all the temp files are closed. We skip batch 0, since it
889 : * can't have any temp files (and the arrays might not even exist if
890 : * nbatch is only 1). Parallel hash joins don't use these files.
891 : */
892 20446 : if (hashtable->innerBatchFile != NULL)
893 : {
894 1308 : for (i = 1; i < hashtable->nbatch; i++)
895 : {
896 1148 : if (hashtable->innerBatchFile[i])
897 0 : BufFileClose(hashtable->innerBatchFile[i]);
898 1148 : if (hashtable->outerBatchFile[i])
899 0 : BufFileClose(hashtable->outerBatchFile[i]);
900 : }
901 : }
902 :
903 : /* Release working memory (batchCxt is a child, so it goes away too) */
904 20446 : MemoryContextDelete(hashtable->hashCxt);
905 :
906 : /* And drop the control block */
907 20446 : pfree(hashtable);
908 20446 : }
909 :
910 : /*
911 : * ExecHashIncreaseNumBatches
912 : * increase the original number of batches in order to reduce
913 : * current memory consumption
914 : */
915 : static void
916 449142 : ExecHashIncreaseNumBatches(HashJoinTable hashtable)
917 : {
918 449142 : int oldnbatch = hashtable->nbatch;
919 449142 : int curbatch = hashtable->curbatch;
920 : int nbatch;
921 : long ninmemory;
922 : long nfreed;
923 : HashMemoryChunk oldchunks;
924 :
925 : /* do nothing if we've decided to shut off growth */
926 449142 : if (!hashtable->growEnabled)
927 449020 : return;
928 :
929 : /* safety check to avoid overflow */
930 122 : if (oldnbatch > Min(INT_MAX / 2, MaxAllocSize / (sizeof(void *) * 2)))
931 0 : return;
932 :
933 122 : nbatch = oldnbatch * 2;
934 : Assert(nbatch > 1);
935 :
936 : #ifdef HJDEBUG
937 : printf("Hashjoin %p: increasing nbatch to %d because space = %zu\n",
938 : hashtable, nbatch, hashtable->spaceUsed);
939 : #endif
940 :
941 122 : if (hashtable->innerBatchFile == NULL)
942 : {
943 56 : MemoryContext oldcxt = MemoryContextSwitchTo(hashtable->spillCxt);
944 :
945 : /* we had no file arrays before */
946 56 : hashtable->innerBatchFile = palloc0_array(BufFile *, nbatch);
947 56 : hashtable->outerBatchFile = palloc0_array(BufFile *, nbatch);
948 :
949 56 : MemoryContextSwitchTo(oldcxt);
950 :
951 : /* time to establish the temp tablespaces, too */
952 56 : PrepareTempTablespaces();
953 : }
954 : else
955 : {
956 : /* enlarge arrays and zero out added entries */
957 66 : hashtable->innerBatchFile = repalloc0_array(hashtable->innerBatchFile, BufFile *, oldnbatch, nbatch);
958 66 : hashtable->outerBatchFile = repalloc0_array(hashtable->outerBatchFile, BufFile *, oldnbatch, nbatch);
959 : }
960 :
961 122 : hashtable->nbatch = nbatch;
962 :
963 : /*
964 : * Scan through the existing hash table entries and dump out any that are
965 : * no longer of the current batch.
966 : */
967 122 : ninmemory = nfreed = 0;
968 :
969 : /* If know we need to resize nbuckets, we can do it while rebatching. */
970 122 : if (hashtable->nbuckets_optimal != hashtable->nbuckets)
971 : {
972 : /* we never decrease the number of buckets */
973 : Assert(hashtable->nbuckets_optimal > hashtable->nbuckets);
974 :
975 56 : hashtable->nbuckets = hashtable->nbuckets_optimal;
976 56 : hashtable->log2_nbuckets = hashtable->log2_nbuckets_optimal;
977 :
978 56 : hashtable->buckets.unshared =
979 56 : repalloc_array(hashtable->buckets.unshared,
980 : HashJoinTuple, hashtable->nbuckets);
981 : }
982 :
983 : /*
984 : * We will scan through the chunks directly, so that we can reset the
985 : * buckets now and not have to keep track which tuples in the buckets have
986 : * already been processed. We will free the old chunks as we go.
987 : */
988 122 : memset(hashtable->buckets.unshared, 0,
989 122 : sizeof(HashJoinTuple) * hashtable->nbuckets);
990 122 : oldchunks = hashtable->chunks;
991 122 : hashtable->chunks = NULL;
992 :
993 : /* so, let's scan through the old chunks, and all tuples in each chunk */
994 610 : while (oldchunks != NULL)
995 : {
996 488 : HashMemoryChunk nextchunk = oldchunks->next.unshared;
997 :
998 : /* position within the buffer (up to oldchunks->used) */
999 488 : size_t idx = 0;
1000 :
1001 : /* process all tuples stored in this chunk (and then free it) */
1002 333238 : while (idx < oldchunks->used)
1003 : {
1004 332750 : HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(oldchunks) + idx);
1005 332750 : MinimalTuple tuple = HJTUPLE_MINTUPLE(hashTuple);
1006 332750 : int hashTupleSize = (HJTUPLE_OVERHEAD + tuple->t_len);
1007 : int bucketno;
1008 : int batchno;
1009 :
1010 332750 : ninmemory++;
1011 332750 : ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
1012 : &bucketno, &batchno);
1013 :
1014 332750 : if (batchno == curbatch)
1015 : {
1016 : /* keep tuple in memory - copy it into the new chunk */
1017 : HashJoinTuple copyTuple;
1018 :
1019 129894 : copyTuple = (HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
1020 129894 : memcpy(copyTuple, hashTuple, hashTupleSize);
1021 :
1022 : /* and add it back to the appropriate bucket */
1023 129894 : copyTuple->next.unshared = hashtable->buckets.unshared[bucketno];
1024 129894 : hashtable->buckets.unshared[bucketno] = copyTuple;
1025 : }
1026 : else
1027 : {
1028 : /* dump it out */
1029 : Assert(batchno > curbatch);
1030 202856 : ExecHashJoinSaveTuple(HJTUPLE_MINTUPLE(hashTuple),
1031 : hashTuple->hashvalue,
1032 202856 : &hashtable->innerBatchFile[batchno],
1033 : hashtable);
1034 :
1035 202856 : hashtable->spaceUsed -= hashTupleSize;
1036 202856 : nfreed++;
1037 : }
1038 :
1039 : /* next tuple in this chunk */
1040 332750 : idx += MAXALIGN(hashTupleSize);
1041 :
1042 : /* allow this loop to be cancellable */
1043 332750 : CHECK_FOR_INTERRUPTS();
1044 : }
1045 :
1046 : /* we're done with this chunk - free it and proceed to the next one */
1047 488 : pfree(oldchunks);
1048 488 : oldchunks = nextchunk;
1049 : }
1050 :
1051 : #ifdef HJDEBUG
1052 : printf("Hashjoin %p: freed %ld of %ld tuples, space now %zu\n",
1053 : hashtable, nfreed, ninmemory, hashtable->spaceUsed);
1054 : #endif
1055 :
1056 : /*
1057 : * If we dumped out either all or none of the tuples in the table, disable
1058 : * further expansion of nbatch. This situation implies that we have
1059 : * enough tuples of identical hashvalues to overflow spaceAllowed.
1060 : * Increasing nbatch will not fix it since there's no way to subdivide the
1061 : * group any more finely. We have to just gut it out and hope the server
1062 : * has enough RAM.
1063 : */
1064 122 : if (nfreed == 0 || nfreed == ninmemory)
1065 : {
1066 26 : hashtable->growEnabled = false;
1067 : #ifdef HJDEBUG
1068 : printf("Hashjoin %p: disabling further increase of nbatch\n",
1069 : hashtable);
1070 : #endif
1071 : }
1072 : }
1073 :
1074 : /*
1075 : * ExecParallelHashIncreaseNumBatches
1076 : * Every participant attached to grow_batches_barrier must run this
1077 : * function when it observes growth == PHJ_GROWTH_NEED_MORE_BATCHES.
1078 : */
1079 : static void
1080 48 : ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
1081 : {
1082 48 : ParallelHashJoinState *pstate = hashtable->parallel_state;
1083 :
1084 : Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASH_INNER);
1085 :
1086 : /*
1087 : * It's unlikely, but we need to be prepared for new participants to show
1088 : * up while we're in the middle of this operation so we need to switch on
1089 : * barrier phase here.
1090 : */
1091 48 : switch (PHJ_GROW_BATCHES_PHASE(BarrierPhase(&pstate->grow_batches_barrier)))
1092 : {
1093 48 : case PHJ_GROW_BATCHES_ELECT:
1094 :
1095 : /*
1096 : * Elect one participant to prepare to grow the number of batches.
1097 : * This involves reallocating or resetting the buckets of batch 0
1098 : * in preparation for all participants to begin repartitioning the
1099 : * tuples.
1100 : */
1101 48 : if (BarrierArriveAndWait(&pstate->grow_batches_barrier,
1102 : WAIT_EVENT_HASH_GROW_BATCHES_ELECT))
1103 : {
1104 : dsa_pointer_atomic *buckets;
1105 : ParallelHashJoinBatch *old_batch0;
1106 : int new_nbatch;
1107 : int i;
1108 :
1109 : /* Move the old batch out of the way. */
1110 48 : old_batch0 = hashtable->batches[0].shared;
1111 48 : pstate->old_batches = pstate->batches;
1112 48 : pstate->old_nbatch = hashtable->nbatch;
1113 48 : pstate->batches = InvalidDsaPointer;
1114 :
1115 : /* Free this backend's old accessors. */
1116 48 : ExecParallelHashCloseBatchAccessors(hashtable);
1117 :
1118 : /* Figure out how many batches to use. */
1119 48 : if (hashtable->nbatch == 1)
1120 : {
1121 : /*
1122 : * We are going from single-batch to multi-batch. We need
1123 : * to switch from one large combined memory budget to the
1124 : * regular hash_mem budget.
1125 : */
1126 36 : pstate->space_allowed = get_hash_memory_limit();
1127 :
1128 : /*
1129 : * The combined hash_mem of all participants wasn't
1130 : * enough. Therefore one batch per participant would be
1131 : * approximately equivalent and would probably also be
1132 : * insufficient. So try two batches per participant,
1133 : * rounded up to a power of two.
1134 : */
1135 36 : new_nbatch = pg_nextpower2_32(pstate->nparticipants * 2);
1136 : }
1137 : else
1138 : {
1139 : /*
1140 : * We were already multi-batched. Try doubling the number
1141 : * of batches.
1142 : */
1143 12 : new_nbatch = hashtable->nbatch * 2;
1144 : }
1145 :
1146 : /* Allocate new larger generation of batches. */
1147 : Assert(hashtable->nbatch == pstate->nbatch);
1148 48 : ExecParallelHashJoinSetUpBatches(hashtable, new_nbatch);
1149 : Assert(hashtable->nbatch == pstate->nbatch);
1150 :
1151 : /* Replace or recycle batch 0's bucket array. */
1152 48 : if (pstate->old_nbatch == 1)
1153 : {
1154 : double dtuples;
1155 : double dbuckets;
1156 : int new_nbuckets;
1157 : uint32 max_buckets;
1158 :
1159 : /*
1160 : * We probably also need a smaller bucket array. How many
1161 : * tuples do we expect per batch, assuming we have only
1162 : * half of them so far? Normally we don't need to change
1163 : * the bucket array's size, because the size of each batch
1164 : * stays the same as we add more batches, but in this
1165 : * special case we move from a large batch to many smaller
1166 : * batches and it would be wasteful to keep the large
1167 : * array.
1168 : */
1169 36 : dtuples = (old_batch0->ntuples * 2.0) / new_nbatch;
1170 :
1171 : /*
1172 : * We need to calculate the maximum number of buckets to
1173 : * stay within the MaxAllocSize boundary. Round the
1174 : * maximum number to the previous power of 2 given that
1175 : * later we round the number to the next power of 2.
1176 : */
1177 36 : max_buckets = pg_prevpower2_32((uint32)
1178 : (MaxAllocSize / sizeof(dsa_pointer_atomic)));
1179 36 : dbuckets = ceil(dtuples / NTUP_PER_BUCKET);
1180 36 : dbuckets = Min(dbuckets, max_buckets);
1181 36 : new_nbuckets = (int) dbuckets;
1182 36 : new_nbuckets = Max(new_nbuckets, 1024);
1183 36 : new_nbuckets = pg_nextpower2_32(new_nbuckets);
1184 36 : dsa_free(hashtable->area, old_batch0->buckets);
1185 72 : hashtable->batches[0].shared->buckets =
1186 36 : dsa_allocate(hashtable->area,
1187 : sizeof(dsa_pointer_atomic) * new_nbuckets);
1188 : buckets = (dsa_pointer_atomic *)
1189 36 : dsa_get_address(hashtable->area,
1190 36 : hashtable->batches[0].shared->buckets);
1191 110628 : for (i = 0; i < new_nbuckets; ++i)
1192 110592 : dsa_pointer_atomic_init(&buckets[i], InvalidDsaPointer);
1193 36 : pstate->nbuckets = new_nbuckets;
1194 : }
1195 : else
1196 : {
1197 : /* Recycle the existing bucket array. */
1198 12 : hashtable->batches[0].shared->buckets = old_batch0->buckets;
1199 : buckets = (dsa_pointer_atomic *)
1200 12 : dsa_get_address(hashtable->area, old_batch0->buckets);
1201 49164 : for (i = 0; i < hashtable->nbuckets; ++i)
1202 49152 : dsa_pointer_atomic_write(&buckets[i], InvalidDsaPointer);
1203 : }
1204 :
1205 : /* Move all chunks to the work queue for parallel processing. */
1206 48 : pstate->chunk_work_queue = old_batch0->chunks;
1207 :
1208 : /* Disable further growth temporarily while we're growing. */
1209 48 : pstate->growth = PHJ_GROWTH_DISABLED;
1210 : }
1211 : else
1212 : {
1213 : /* All other participants just flush their tuples to disk. */
1214 0 : ExecParallelHashCloseBatchAccessors(hashtable);
1215 : }
1216 : /* Fall through. */
1217 :
1218 : case PHJ_GROW_BATCHES_REALLOCATE:
1219 : /* Wait for the above to be finished. */
1220 48 : BarrierArriveAndWait(&pstate->grow_batches_barrier,
1221 : WAIT_EVENT_HASH_GROW_BATCHES_REALLOCATE);
1222 : /* Fall through. */
1223 :
1224 48 : case PHJ_GROW_BATCHES_REPARTITION:
1225 : /* Make sure that we have the current dimensions and buckets. */
1226 48 : ExecParallelHashEnsureBatchAccessors(hashtable);
1227 48 : ExecParallelHashTableSetCurrentBatch(hashtable, 0);
1228 : /* Then partition, flush counters. */
1229 48 : ExecParallelHashRepartitionFirst(hashtable);
1230 48 : ExecParallelHashRepartitionRest(hashtable);
1231 48 : ExecParallelHashMergeCounters(hashtable);
1232 : /* Wait for the above to be finished. */
1233 48 : BarrierArriveAndWait(&pstate->grow_batches_barrier,
1234 : WAIT_EVENT_HASH_GROW_BATCHES_REPARTITION);
1235 : /* Fall through. */
1236 :
1237 48 : case PHJ_GROW_BATCHES_DECIDE:
1238 :
1239 : /*
1240 : * Elect one participant to clean up and decide whether further
1241 : * repartitioning is needed, or should be disabled because it's
1242 : * not helping.
1243 : */
1244 48 : if (BarrierArriveAndWait(&pstate->grow_batches_barrier,
1245 : WAIT_EVENT_HASH_GROW_BATCHES_DECIDE))
1246 : {
1247 48 : bool space_exhausted = false;
1248 48 : bool extreme_skew_detected = false;
1249 :
1250 : /* Make sure that we have the current dimensions and buckets. */
1251 48 : ExecParallelHashEnsureBatchAccessors(hashtable);
1252 48 : ExecParallelHashTableSetCurrentBatch(hashtable, 0);
1253 :
1254 : /* Are any of the new generation of batches exhausted? */
1255 336 : for (int i = 0; i < hashtable->nbatch; ++i)
1256 : {
1257 288 : ParallelHashJoinBatch *batch = hashtable->batches[i].shared;
1258 :
1259 288 : if (batch->space_exhausted ||
1260 288 : batch->estimated_size > pstate->space_allowed)
1261 : {
1262 : int parent;
1263 :
1264 24 : space_exhausted = true;
1265 :
1266 : /*
1267 : * Did this batch receive ALL of the tuples from its
1268 : * parent batch? That would indicate that further
1269 : * repartitioning isn't going to help (the hash values
1270 : * are probably all the same).
1271 : */
1272 24 : parent = i % pstate->old_nbatch;
1273 24 : if (batch->ntuples == hashtable->batches[parent].shared->old_ntuples)
1274 24 : extreme_skew_detected = true;
1275 : }
1276 : }
1277 :
1278 : /* Don't keep growing if it's not helping or we'd overflow. */
1279 48 : if (extreme_skew_detected || hashtable->nbatch >= INT_MAX / 2)
1280 24 : pstate->growth = PHJ_GROWTH_DISABLED;
1281 24 : else if (space_exhausted)
1282 0 : pstate->growth = PHJ_GROWTH_NEED_MORE_BATCHES;
1283 : else
1284 24 : pstate->growth = PHJ_GROWTH_OK;
1285 :
1286 : /* Free the old batches in shared memory. */
1287 48 : dsa_free(hashtable->area, pstate->old_batches);
1288 48 : pstate->old_batches = InvalidDsaPointer;
1289 : }
1290 : /* Fall through. */
1291 :
1292 : case PHJ_GROW_BATCHES_FINISH:
1293 : /* Wait for the above to complete. */
1294 48 : BarrierArriveAndWait(&pstate->grow_batches_barrier,
1295 : WAIT_EVENT_HASH_GROW_BATCHES_FINISH);
1296 : }
1297 48 : }
1298 :
1299 : /*
1300 : * Repartition the tuples currently loaded into memory for inner batch 0
1301 : * because the number of batches has been increased. Some tuples are retained
1302 : * in memory and some are written out to a later batch.
1303 : */
1304 : static void
1305 48 : ExecParallelHashRepartitionFirst(HashJoinTable hashtable)
1306 : {
1307 : dsa_pointer chunk_shared;
1308 : HashMemoryChunk chunk;
1309 :
1310 : Assert(hashtable->nbatch == hashtable->parallel_state->nbatch);
1311 :
1312 336 : while ((chunk = ExecParallelHashPopChunkQueue(hashtable, &chunk_shared)))
1313 : {
1314 288 : size_t idx = 0;
1315 :
1316 : /* Repartition all tuples in this chunk. */
1317 220860 : while (idx < chunk->used)
1318 : {
1319 220572 : HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
1320 220572 : MinimalTuple tuple = HJTUPLE_MINTUPLE(hashTuple);
1321 : HashJoinTuple copyTuple;
1322 : dsa_pointer shared;
1323 : int bucketno;
1324 : int batchno;
1325 :
1326 220572 : ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
1327 : &bucketno, &batchno);
1328 :
1329 : Assert(batchno < hashtable->nbatch);
1330 220572 : if (batchno == 0)
1331 : {
1332 : /* It still belongs in batch 0. Copy to a new chunk. */
1333 : copyTuple =
1334 50748 : ExecParallelHashTupleAlloc(hashtable,
1335 50748 : HJTUPLE_OVERHEAD + tuple->t_len,
1336 : &shared);
1337 50748 : copyTuple->hashvalue = hashTuple->hashvalue;
1338 50748 : memcpy(HJTUPLE_MINTUPLE(copyTuple), tuple, tuple->t_len);
1339 50748 : ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
1340 : copyTuple, shared);
1341 : }
1342 : else
1343 : {
1344 169824 : size_t tuple_size =
1345 169824 : MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
1346 :
1347 : /* It belongs in a later batch. */
1348 169824 : hashtable->batches[batchno].estimated_size += tuple_size;
1349 169824 : sts_puttuple(hashtable->batches[batchno].inner_tuples,
1350 169824 : &hashTuple->hashvalue, tuple);
1351 : }
1352 :
1353 : /* Count this tuple. */
1354 220572 : ++hashtable->batches[0].old_ntuples;
1355 220572 : ++hashtable->batches[batchno].ntuples;
1356 :
1357 220572 : idx += MAXALIGN(HJTUPLE_OVERHEAD +
1358 : HJTUPLE_MINTUPLE(hashTuple)->t_len);
1359 : }
1360 :
1361 : /* Free this chunk. */
1362 288 : dsa_free(hashtable->area, chunk_shared);
1363 :
1364 288 : CHECK_FOR_INTERRUPTS();
1365 : }
1366 48 : }
1367 :
1368 : /*
1369 : * Help repartition inner batches 1..n.
1370 : */
1371 : static void
1372 48 : ExecParallelHashRepartitionRest(HashJoinTable hashtable)
1373 : {
1374 48 : ParallelHashJoinState *pstate = hashtable->parallel_state;
1375 48 : int old_nbatch = pstate->old_nbatch;
1376 : SharedTuplestoreAccessor **old_inner_tuples;
1377 : ParallelHashJoinBatch *old_batches;
1378 : int i;
1379 :
1380 : /* Get our hands on the previous generation of batches. */
1381 : old_batches = (ParallelHashJoinBatch *)
1382 48 : dsa_get_address(hashtable->area, pstate->old_batches);
1383 48 : old_inner_tuples = palloc0_array(SharedTuplestoreAccessor *, old_nbatch);
1384 84 : for (i = 1; i < old_nbatch; ++i)
1385 : {
1386 36 : ParallelHashJoinBatch *shared =
1387 36 : NthParallelHashJoinBatch(old_batches, i);
1388 :
1389 36 : old_inner_tuples[i] = sts_attach(ParallelHashJoinBatchInner(shared),
1390 : ParallelWorkerNumber + 1,
1391 : &pstate->fileset);
1392 : }
1393 :
1394 : /* Join in the effort to repartition them. */
1395 84 : for (i = 1; i < old_nbatch; ++i)
1396 : {
1397 : MinimalTuple tuple;
1398 : uint32 hashvalue;
1399 :
1400 : /* Scan one partition from the previous generation. */
1401 36 : sts_begin_parallel_scan(old_inner_tuples[i]);
1402 161400 : while ((tuple = sts_parallel_scan_next(old_inner_tuples[i], &hashvalue)))
1403 : {
1404 161364 : size_t tuple_size = MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
1405 : int bucketno;
1406 : int batchno;
1407 :
1408 : /* Decide which partition it goes to in the new generation. */
1409 161364 : ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno,
1410 : &batchno);
1411 :
1412 161364 : hashtable->batches[batchno].estimated_size += tuple_size;
1413 161364 : ++hashtable->batches[batchno].ntuples;
1414 161364 : ++hashtable->batches[i].old_ntuples;
1415 :
1416 : /* Store the tuple its new batch. */
1417 161364 : sts_puttuple(hashtable->batches[batchno].inner_tuples,
1418 : &hashvalue, tuple);
1419 :
1420 161364 : CHECK_FOR_INTERRUPTS();
1421 : }
1422 36 : sts_end_parallel_scan(old_inner_tuples[i]);
1423 : }
1424 :
1425 48 : pfree(old_inner_tuples);
1426 48 : }
1427 :
1428 : /*
1429 : * Transfer the backend-local per-batch counters to the shared totals.
1430 : */
1431 : static void
1432 288 : ExecParallelHashMergeCounters(HashJoinTable hashtable)
1433 : {
1434 288 : ParallelHashJoinState *pstate = hashtable->parallel_state;
1435 : int i;
1436 :
1437 288 : LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
1438 288 : pstate->total_tuples = 0;
1439 1650 : for (i = 0; i < hashtable->nbatch; ++i)
1440 : {
1441 1362 : ParallelHashJoinBatchAccessor *batch = &hashtable->batches[i];
1442 :
1443 1362 : batch->shared->size += batch->size;
1444 1362 : batch->shared->estimated_size += batch->estimated_size;
1445 1362 : batch->shared->ntuples += batch->ntuples;
1446 1362 : batch->shared->old_ntuples += batch->old_ntuples;
1447 1362 : batch->size = 0;
1448 1362 : batch->estimated_size = 0;
1449 1362 : batch->ntuples = 0;
1450 1362 : batch->old_ntuples = 0;
1451 1362 : pstate->total_tuples += batch->shared->ntuples;
1452 : }
1453 288 : LWLockRelease(&pstate->lock);
1454 288 : }
1455 :
1456 : /*
1457 : * ExecHashIncreaseNumBuckets
1458 : * increase the original number of buckets in order to reduce
1459 : * number of tuples per bucket
1460 : */
1461 : static void
1462 72 : ExecHashIncreaseNumBuckets(HashJoinTable hashtable)
1463 : {
1464 : HashMemoryChunk chunk;
1465 :
1466 : /* do nothing if not an increase (it's called increase for a reason) */
1467 72 : if (hashtable->nbuckets >= hashtable->nbuckets_optimal)
1468 0 : return;
1469 :
1470 : #ifdef HJDEBUG
1471 : printf("Hashjoin %p: increasing nbuckets %d => %d\n",
1472 : hashtable, hashtable->nbuckets, hashtable->nbuckets_optimal);
1473 : #endif
1474 :
1475 72 : hashtable->nbuckets = hashtable->nbuckets_optimal;
1476 72 : hashtable->log2_nbuckets = hashtable->log2_nbuckets_optimal;
1477 :
1478 : Assert(hashtable->nbuckets > 1);
1479 : Assert(hashtable->nbuckets <= (INT_MAX / 2));
1480 : Assert(hashtable->nbuckets == (1 << hashtable->log2_nbuckets));
1481 :
1482 : /*
1483 : * Just reallocate the proper number of buckets - we don't need to walk
1484 : * through them - we can walk the dense-allocated chunks (just like in
1485 : * ExecHashIncreaseNumBatches, but without all the copying into new
1486 : * chunks)
1487 : */
1488 72 : hashtable->buckets.unshared =
1489 72 : repalloc_array(hashtable->buckets.unshared,
1490 : HashJoinTuple, hashtable->nbuckets);
1491 :
1492 72 : memset(hashtable->buckets.unshared, 0,
1493 72 : hashtable->nbuckets * sizeof(HashJoinTuple));
1494 :
1495 : /* scan through all tuples in all chunks to rebuild the hash table */
1496 1008 : for (chunk = hashtable->chunks; chunk != NULL; chunk = chunk->next.unshared)
1497 : {
1498 : /* process all tuples stored in this chunk */
1499 936 : size_t idx = 0;
1500 :
1501 720936 : while (idx < chunk->used)
1502 : {
1503 720000 : HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
1504 : int bucketno;
1505 : int batchno;
1506 :
1507 720000 : ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
1508 : &bucketno, &batchno);
1509 :
1510 : /* add the tuple to the proper bucket */
1511 720000 : hashTuple->next.unshared = hashtable->buckets.unshared[bucketno];
1512 720000 : hashtable->buckets.unshared[bucketno] = hashTuple;
1513 :
1514 : /* advance index past the tuple */
1515 720000 : idx += MAXALIGN(HJTUPLE_OVERHEAD +
1516 : HJTUPLE_MINTUPLE(hashTuple)->t_len);
1517 : }
1518 :
1519 : /* allow this loop to be cancellable */
1520 936 : CHECK_FOR_INTERRUPTS();
1521 : }
1522 : }
1523 :
1524 : static void
1525 138 : ExecParallelHashIncreaseNumBuckets(HashJoinTable hashtable)
1526 : {
1527 138 : ParallelHashJoinState *pstate = hashtable->parallel_state;
1528 : int i;
1529 : HashMemoryChunk chunk;
1530 : dsa_pointer chunk_s;
1531 :
1532 : Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASH_INNER);
1533 :
1534 : /*
1535 : * It's unlikely, but we need to be prepared for new participants to show
1536 : * up while we're in the middle of this operation so we need to switch on
1537 : * barrier phase here.
1538 : */
1539 138 : switch (PHJ_GROW_BUCKETS_PHASE(BarrierPhase(&pstate->grow_buckets_barrier)))
1540 : {
1541 138 : case PHJ_GROW_BUCKETS_ELECT:
1542 : /* Elect one participant to prepare to increase nbuckets. */
1543 138 : if (BarrierArriveAndWait(&pstate->grow_buckets_barrier,
1544 : WAIT_EVENT_HASH_GROW_BUCKETS_ELECT))
1545 : {
1546 : size_t size;
1547 : dsa_pointer_atomic *buckets;
1548 :
1549 : /* Double the size of the bucket array. */
1550 108 : pstate->nbuckets *= 2;
1551 108 : size = pstate->nbuckets * sizeof(dsa_pointer_atomic);
1552 108 : hashtable->batches[0].shared->size += size / 2;
1553 108 : dsa_free(hashtable->area, hashtable->batches[0].shared->buckets);
1554 216 : hashtable->batches[0].shared->buckets =
1555 108 : dsa_allocate(hashtable->area, size);
1556 : buckets = (dsa_pointer_atomic *)
1557 108 : dsa_get_address(hashtable->area,
1558 108 : hashtable->batches[0].shared->buckets);
1559 933996 : for (i = 0; i < pstate->nbuckets; ++i)
1560 933888 : dsa_pointer_atomic_init(&buckets[i], InvalidDsaPointer);
1561 :
1562 : /* Put the chunk list onto the work queue. */
1563 108 : pstate->chunk_work_queue = hashtable->batches[0].shared->chunks;
1564 :
1565 : /* Clear the flag. */
1566 108 : pstate->growth = PHJ_GROWTH_OK;
1567 : }
1568 : /* Fall through. */
1569 :
1570 : case PHJ_GROW_BUCKETS_REALLOCATE:
1571 : /* Wait for the above to complete. */
1572 138 : BarrierArriveAndWait(&pstate->grow_buckets_barrier,
1573 : WAIT_EVENT_HASH_GROW_BUCKETS_REALLOCATE);
1574 : /* Fall through. */
1575 :
1576 138 : case PHJ_GROW_BUCKETS_REINSERT:
1577 : /* Reinsert all tuples into the hash table. */
1578 138 : ExecParallelHashEnsureBatchAccessors(hashtable);
1579 138 : ExecParallelHashTableSetCurrentBatch(hashtable, 0);
1580 800 : while ((chunk = ExecParallelHashPopChunkQueue(hashtable, &chunk_s)))
1581 : {
1582 662 : size_t idx = 0;
1583 :
1584 540384 : while (idx < chunk->used)
1585 : {
1586 539722 : HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
1587 539722 : dsa_pointer shared = chunk_s + HASH_CHUNK_HEADER_SIZE + idx;
1588 : int bucketno;
1589 : int batchno;
1590 :
1591 539722 : ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
1592 : &bucketno, &batchno);
1593 : Assert(batchno == 0);
1594 :
1595 : /* add the tuple to the proper bucket */
1596 539722 : ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
1597 : hashTuple, shared);
1598 :
1599 : /* advance index past the tuple */
1600 539722 : idx += MAXALIGN(HJTUPLE_OVERHEAD +
1601 : HJTUPLE_MINTUPLE(hashTuple)->t_len);
1602 : }
1603 :
1604 : /* allow this loop to be cancellable */
1605 662 : CHECK_FOR_INTERRUPTS();
1606 : }
1607 138 : BarrierArriveAndWait(&pstate->grow_buckets_barrier,
1608 : WAIT_EVENT_HASH_GROW_BUCKETS_REINSERT);
1609 : }
1610 138 : }
1611 :
1612 : /*
1613 : * ExecHashTableInsert
1614 : * insert a tuple into the hash table depending on the hash value
1615 : * it may just go to a temp file for later batches
1616 : *
1617 : * Note: the passed TupleTableSlot may contain a regular, minimal, or virtual
1618 : * tuple; the minimal case in particular is certain to happen while reloading
1619 : * tuples from batch files. We could save some cycles in the regular-tuple
1620 : * case by not forcing the slot contents into minimal form; not clear if it's
1621 : * worth the messiness required.
1622 : */
1623 : void
1624 9562514 : ExecHashTableInsert(HashJoinTable hashtable,
1625 : TupleTableSlot *slot,
1626 : uint32 hashvalue)
1627 : {
1628 : bool shouldFree;
1629 9562514 : MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
1630 : int bucketno;
1631 : int batchno;
1632 :
1633 9562514 : ExecHashGetBucketAndBatch(hashtable, hashvalue,
1634 : &bucketno, &batchno);
1635 :
1636 : /*
1637 : * decide whether to put the tuple in the hash table or a temp file
1638 : */
1639 9562514 : if (batchno == hashtable->curbatch)
1640 : {
1641 : /*
1642 : * put the tuple in hash table
1643 : */
1644 : HashJoinTuple hashTuple;
1645 : int hashTupleSize;
1646 7400966 : double ntuples = (hashtable->totalTuples - hashtable->skewTuples);
1647 :
1648 : /* Create the HashJoinTuple */
1649 7400966 : hashTupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
1650 7400966 : hashTuple = (HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
1651 :
1652 7400966 : hashTuple->hashvalue = hashvalue;
1653 7400966 : memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
1654 :
1655 : /*
1656 : * We always reset the tuple-matched flag on insertion. This is okay
1657 : * even when reloading a tuple from a batch file, since the tuple
1658 : * could not possibly have been matched to an outer tuple before it
1659 : * went into the batch file.
1660 : */
1661 7400966 : HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
1662 :
1663 : /* Push it onto the front of the bucket's list */
1664 7400966 : hashTuple->next.unshared = hashtable->buckets.unshared[bucketno];
1665 7400966 : hashtable->buckets.unshared[bucketno] = hashTuple;
1666 :
1667 : /*
1668 : * Increase the (optimal) number of buckets if we just exceeded the
1669 : * NTUP_PER_BUCKET threshold, but only when there's still a single
1670 : * batch.
1671 : */
1672 7400966 : if (hashtable->nbatch == 1 &&
1673 4931424 : ntuples > (hashtable->nbuckets_optimal * NTUP_PER_BUCKET))
1674 : {
1675 : /* Guard against integer overflow and alloc size overflow */
1676 184 : if (hashtable->nbuckets_optimal <= INT_MAX / 2 &&
1677 184 : hashtable->nbuckets_optimal * 2 <= MaxAllocSize / sizeof(HashJoinTuple))
1678 : {
1679 184 : hashtable->nbuckets_optimal *= 2;
1680 184 : hashtable->log2_nbuckets_optimal += 1;
1681 : }
1682 : }
1683 :
1684 : /* Account for space used, and back off if we've used too much */
1685 7400966 : hashtable->spaceUsed += hashTupleSize;
1686 7400966 : if (hashtable->spaceUsed > hashtable->spacePeak)
1687 5514476 : hashtable->spacePeak = hashtable->spaceUsed;
1688 7400966 : if (hashtable->spaceUsed +
1689 7400966 : hashtable->nbuckets_optimal * sizeof(HashJoinTuple)
1690 7400966 : > hashtable->spaceAllowed)
1691 449142 : ExecHashIncreaseNumBatches(hashtable);
1692 : }
1693 : else
1694 : {
1695 : /*
1696 : * put the tuple into a temp file for later batches
1697 : */
1698 : Assert(batchno > hashtable->curbatch);
1699 2161548 : ExecHashJoinSaveTuple(tuple,
1700 : hashvalue,
1701 2161548 : &hashtable->innerBatchFile[batchno],
1702 : hashtable);
1703 : }
1704 :
1705 9562514 : if (shouldFree)
1706 7158592 : heap_free_minimal_tuple(tuple);
1707 9562514 : }
1708 :
1709 : /*
1710 : * ExecParallelHashTableInsert
1711 : * insert a tuple into a shared hash table or shared batch tuplestore
1712 : */
1713 : void
1714 2160094 : ExecParallelHashTableInsert(HashJoinTable hashtable,
1715 : TupleTableSlot *slot,
1716 : uint32 hashvalue)
1717 : {
1718 : bool shouldFree;
1719 2160094 : MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
1720 : dsa_pointer shared;
1721 : int bucketno;
1722 : int batchno;
1723 :
1724 2160436 : retry:
1725 2160436 : ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
1726 :
1727 2160436 : if (batchno == 0)
1728 : {
1729 : HashJoinTuple hashTuple;
1730 :
1731 : /* Try to load it into memory. */
1732 : Assert(BarrierPhase(&hashtable->parallel_state->build_barrier) ==
1733 : PHJ_BUILD_HASH_INNER);
1734 1247308 : hashTuple = ExecParallelHashTupleAlloc(hashtable,
1735 1247308 : HJTUPLE_OVERHEAD + tuple->t_len,
1736 : &shared);
1737 1247308 : if (hashTuple == NULL)
1738 318 : goto retry;
1739 :
1740 : /* Store the hash value in the HashJoinTuple header. */
1741 1246990 : hashTuple->hashvalue = hashvalue;
1742 1246990 : memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
1743 1246990 : HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
1744 :
1745 : /* Push it onto the front of the bucket's list */
1746 1246990 : ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
1747 : hashTuple, shared);
1748 : }
1749 : else
1750 : {
1751 913128 : size_t tuple_size = MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
1752 :
1753 : Assert(batchno > 0);
1754 :
1755 : /* Try to preallocate space in the batch if necessary. */
1756 913128 : if (hashtable->batches[batchno].preallocated < tuple_size)
1757 : {
1758 1704 : if (!ExecParallelHashTuplePrealloc(hashtable, batchno, tuple_size))
1759 24 : goto retry;
1760 : }
1761 :
1762 : Assert(hashtable->batches[batchno].preallocated >= tuple_size);
1763 913104 : hashtable->batches[batchno].preallocated -= tuple_size;
1764 913104 : sts_puttuple(hashtable->batches[batchno].inner_tuples, &hashvalue,
1765 : tuple);
1766 : }
1767 2160094 : ++hashtable->batches[batchno].ntuples;
1768 :
1769 2160094 : if (shouldFree)
1770 2160094 : heap_free_minimal_tuple(tuple);
1771 2160094 : }
1772 :
1773 : /*
1774 : * Insert a tuple into the current hash table. Unlike
1775 : * ExecParallelHashTableInsert, this version is not prepared to send the tuple
1776 : * to other batches or to run out of memory, and should only be called with
1777 : * tuples that belong in the current batch once growth has been disabled.
1778 : */
1779 : void
1780 1082928 : ExecParallelHashTableInsertCurrentBatch(HashJoinTable hashtable,
1781 : TupleTableSlot *slot,
1782 : uint32 hashvalue)
1783 : {
1784 : bool shouldFree;
1785 1082928 : MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
1786 : HashJoinTuple hashTuple;
1787 : dsa_pointer shared;
1788 : int batchno;
1789 : int bucketno;
1790 :
1791 1082928 : ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
1792 : Assert(batchno == hashtable->curbatch);
1793 1082928 : hashTuple = ExecParallelHashTupleAlloc(hashtable,
1794 1082928 : HJTUPLE_OVERHEAD + tuple->t_len,
1795 : &shared);
1796 1082928 : hashTuple->hashvalue = hashvalue;
1797 1082928 : memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
1798 1082928 : HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
1799 1082928 : ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
1800 : hashTuple, shared);
1801 :
1802 1082928 : if (shouldFree)
1803 0 : heap_free_minimal_tuple(tuple);
1804 1082928 : }
1805 :
1806 : /*
1807 : * ExecHashGetHashValue
1808 : * Compute the hash value for a tuple
1809 : *
1810 : * The tuple to be tested must be in econtext->ecxt_outertuple (thus Vars in
1811 : * the hashkeys expressions need to have OUTER_VAR as varno). If outer_tuple
1812 : * is false (meaning it's the HashJoin's inner node, Hash), econtext,
1813 : * hashkeys, and slot need to be from Hash, with hashkeys/slot referencing and
1814 : * being suitable for tuples from the node below the Hash. Conversely, if
1815 : * outer_tuple is true, econtext is from HashJoin, and hashkeys/slot need to
1816 : * be appropriate for tuples from HashJoin's outer node.
1817 : *
1818 : * A true result means the tuple's hash value has been successfully computed
1819 : * and stored at *hashvalue. A false result means the tuple cannot match
1820 : * because it contains a null attribute, and hence it should be discarded
1821 : * immediately. (If keep_nulls is true then false is never returned.)
1822 : */
1823 : bool
1824 24741334 : ExecHashGetHashValue(HashJoinTable hashtable,
1825 : ExprContext *econtext,
1826 : List *hashkeys,
1827 : bool outer_tuple,
1828 : bool keep_nulls,
1829 : uint32 *hashvalue)
1830 : {
1831 24741334 : uint32 hashkey = 0;
1832 : FmgrInfo *hashfunctions;
1833 : ListCell *hk;
1834 24741334 : int i = 0;
1835 : MemoryContext oldContext;
1836 :
1837 : /*
1838 : * We reset the eval context each time to reclaim any memory leaked in the
1839 : * hashkey expressions.
1840 : */
1841 24741334 : ResetExprContext(econtext);
1842 :
1843 24741334 : oldContext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory);
1844 :
1845 24741334 : if (outer_tuple)
1846 15382710 : hashfunctions = hashtable->outer_hashfunctions;
1847 : else
1848 9358624 : hashfunctions = hashtable->inner_hashfunctions;
1849 :
1850 51177518 : foreach(hk, hashkeys)
1851 : {
1852 26437040 : ExprState *keyexpr = (ExprState *) lfirst(hk);
1853 : Datum keyval;
1854 : bool isNull;
1855 :
1856 : /* combine successive hashkeys by rotating */
1857 26437040 : hashkey = pg_rotate_left32(hashkey, 1);
1858 :
1859 : /*
1860 : * Get the join attribute value of the tuple
1861 : */
1862 26437040 : keyval = ExecEvalExpr(keyexpr, econtext, &isNull);
1863 :
1864 : /*
1865 : * If the attribute is NULL, and the join operator is strict, then
1866 : * this tuple cannot pass the join qual so we can reject it
1867 : * immediately (unless we're scanning the outside of an outer join, in
1868 : * which case we must not reject it). Otherwise we act like the
1869 : * hashcode of NULL is zero (this will support operators that act like
1870 : * IS NOT DISTINCT, though not any more-random behavior). We treat
1871 : * the hash support function as strict even if the operator is not.
1872 : *
1873 : * Note: currently, all hashjoinable operators must be strict since
1874 : * the hash index AM assumes that. However, it takes so little extra
1875 : * code here to allow non-strict that we may as well do it.
1876 : */
1877 26437040 : if (isNull)
1878 : {
1879 1088 : if (hashtable->hashStrict[i] && !keep_nulls)
1880 : {
1881 856 : MemoryContextSwitchTo(oldContext);
1882 856 : return false; /* cannot match */
1883 : }
1884 : /* else, leave hashkey unmodified, equivalent to hashcode 0 */
1885 : }
1886 : else
1887 : {
1888 : /* Compute the hash function */
1889 : uint32 hkey;
1890 :
1891 26435952 : hkey = DatumGetUInt32(FunctionCall1Coll(&hashfunctions[i], hashtable->collations[i], keyval));
1892 26435952 : hashkey ^= hkey;
1893 : }
1894 :
1895 26436184 : i++;
1896 : }
1897 :
1898 24740478 : MemoryContextSwitchTo(oldContext);
1899 :
1900 24740478 : *hashvalue = hashkey;
1901 24740478 : return true;
1902 : }
1903 :
1904 : /*
1905 : * ExecHashGetBucketAndBatch
1906 : * Determine the bucket number and batch number for a hash value
1907 : *
1908 : * Note: on-the-fly increases of nbatch must not change the bucket number
1909 : * for a given hash code (since we don't move tuples to different hash
1910 : * chains), and must only cause the batch number to remain the same or
1911 : * increase. Our algorithm is
1912 : * bucketno = hashvalue MOD nbuckets
1913 : * batchno = ROR(hashvalue, log2_nbuckets) MOD nbatch
1914 : * where nbuckets and nbatch are both expected to be powers of 2, so we can
1915 : * do the computations by shifting and masking. (This assumes that all hash
1916 : * functions are good about randomizing all their output bits, else we are
1917 : * likely to have very skewed bucket or batch occupancy.)
1918 : *
1919 : * nbuckets and log2_nbuckets may change while nbatch == 1 because of dynamic
1920 : * bucket count growth. Once we start batching, the value is fixed and does
1921 : * not change over the course of the join (making it possible to compute batch
1922 : * number the way we do here).
1923 : *
1924 : * nbatch is always a power of 2; we increase it only by doubling it. This
1925 : * effectively adds one more bit to the top of the batchno. In very large
1926 : * joins, we might run out of bits to add, so we do this by rotating the hash
1927 : * value. This causes batchno to steal bits from bucketno when the number of
1928 : * virtual buckets exceeds 2^32. It's better to have longer bucket chains
1929 : * than to lose the ability to divide batches.
1930 : */
1931 : void
1932 32832500 : ExecHashGetBucketAndBatch(HashJoinTable hashtable,
1933 : uint32 hashvalue,
1934 : int *bucketno,
1935 : int *batchno)
1936 : {
1937 32832500 : uint32 nbuckets = (uint32) hashtable->nbuckets;
1938 32832500 : uint32 nbatch = (uint32) hashtable->nbatch;
1939 :
1940 32832500 : if (nbatch > 1)
1941 : {
1942 13192210 : *bucketno = hashvalue & (nbuckets - 1);
1943 13192210 : *batchno = pg_rotate_right32(hashvalue,
1944 13192210 : hashtable->log2_nbuckets) & (nbatch - 1);
1945 : }
1946 : else
1947 : {
1948 19640290 : *bucketno = hashvalue & (nbuckets - 1);
1949 19640290 : *batchno = 0;
1950 : }
1951 32832500 : }
1952 :
1953 : /*
1954 : * ExecScanHashBucket
1955 : * scan a hash bucket for matches to the current outer tuple
1956 : *
1957 : * The current outer tuple must be stored in econtext->ecxt_outertuple.
1958 : *
1959 : * On success, the inner tuple is stored into hjstate->hj_CurTuple and
1960 : * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
1961 : * for the latter.
1962 : */
1963 : bool
1964 17641886 : ExecScanHashBucket(HashJoinState *hjstate,
1965 : ExprContext *econtext)
1966 : {
1967 17641886 : ExprState *hjclauses = hjstate->hashclauses;
1968 17641886 : HashJoinTable hashtable = hjstate->hj_HashTable;
1969 17641886 : HashJoinTuple hashTuple = hjstate->hj_CurTuple;
1970 17641886 : uint32 hashvalue = hjstate->hj_CurHashValue;
1971 :
1972 : /*
1973 : * hj_CurTuple is the address of the tuple last returned from the current
1974 : * bucket, or NULL if it's time to start scanning a new bucket.
1975 : *
1976 : * If the tuple hashed to a skew bucket then scan the skew bucket
1977 : * otherwise scan the standard hashtable bucket.
1978 : */
1979 17641886 : if (hashTuple != NULL)
1980 4420020 : hashTuple = hashTuple->next.unshared;
1981 13221866 : else if (hjstate->hj_CurSkewBucketNo != INVALID_SKEW_BUCKET_NO)
1982 2400 : hashTuple = hashtable->skewBucket[hjstate->hj_CurSkewBucketNo]->tuples;
1983 : else
1984 13219466 : hashTuple = hashtable->buckets.unshared[hjstate->hj_CurBucketNo];
1985 :
1986 21665152 : while (hashTuple != NULL)
1987 : {
1988 11812002 : if (hashTuple->hashvalue == hashvalue)
1989 : {
1990 : TupleTableSlot *inntuple;
1991 :
1992 : /* insert hashtable's tuple into exec slot so ExecQual sees it */
1993 7788742 : inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
1994 : hjstate->hj_HashTupleSlot,
1995 : false); /* do not pfree */
1996 7788742 : econtext->ecxt_innertuple = inntuple;
1997 :
1998 7788742 : if (ExecQualAndReset(hjclauses, econtext))
1999 : {
2000 7788736 : hjstate->hj_CurTuple = hashTuple;
2001 7788736 : return true;
2002 : }
2003 : }
2004 :
2005 4023266 : hashTuple = hashTuple->next.unshared;
2006 : }
2007 :
2008 : /*
2009 : * no match
2010 : */
2011 9853150 : return false;
2012 : }
2013 :
2014 : /*
2015 : * ExecParallelScanHashBucket
2016 : * scan a hash bucket for matches to the current outer tuple
2017 : *
2018 : * The current outer tuple must be stored in econtext->ecxt_outertuple.
2019 : *
2020 : * On success, the inner tuple is stored into hjstate->hj_CurTuple and
2021 : * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
2022 : * for the latter.
2023 : */
2024 : bool
2025 4200054 : ExecParallelScanHashBucket(HashJoinState *hjstate,
2026 : ExprContext *econtext)
2027 : {
2028 4200054 : ExprState *hjclauses = hjstate->hashclauses;
2029 4200054 : HashJoinTable hashtable = hjstate->hj_HashTable;
2030 4200054 : HashJoinTuple hashTuple = hjstate->hj_CurTuple;
2031 4200054 : uint32 hashvalue = hjstate->hj_CurHashValue;
2032 :
2033 : /*
2034 : * hj_CurTuple is the address of the tuple last returned from the current
2035 : * bucket, or NULL if it's time to start scanning a new bucket.
2036 : */
2037 4200054 : if (hashTuple != NULL)
2038 2040024 : hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
2039 : else
2040 2160030 : hashTuple = ExecParallelHashFirstTuple(hashtable,
2041 : hjstate->hj_CurBucketNo);
2042 :
2043 5607210 : while (hashTuple != NULL)
2044 : {
2045 3447180 : if (hashTuple->hashvalue == hashvalue)
2046 : {
2047 : TupleTableSlot *inntuple;
2048 :
2049 : /* insert hashtable's tuple into exec slot so ExecQual sees it */
2050 2040024 : inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
2051 : hjstate->hj_HashTupleSlot,
2052 : false); /* do not pfree */
2053 2040024 : econtext->ecxt_innertuple = inntuple;
2054 :
2055 2040024 : if (ExecQualAndReset(hjclauses, econtext))
2056 : {
2057 2040024 : hjstate->hj_CurTuple = hashTuple;
2058 2040024 : return true;
2059 : }
2060 : }
2061 :
2062 1407156 : hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
2063 : }
2064 :
2065 : /*
2066 : * no match
2067 : */
2068 2160030 : return false;
2069 : }
2070 :
2071 : /*
2072 : * ExecPrepHashTableForUnmatched
2073 : * set up for a series of ExecScanHashTableForUnmatched calls
2074 : */
2075 : void
2076 5182 : ExecPrepHashTableForUnmatched(HashJoinState *hjstate)
2077 : {
2078 : /*----------
2079 : * During this scan we use the HashJoinState fields as follows:
2080 : *
2081 : * hj_CurBucketNo: next regular bucket to scan
2082 : * hj_CurSkewBucketNo: next skew bucket (an index into skewBucketNums)
2083 : * hj_CurTuple: last tuple returned, or NULL to start next bucket
2084 : *----------
2085 : */
2086 5182 : hjstate->hj_CurBucketNo = 0;
2087 5182 : hjstate->hj_CurSkewBucketNo = 0;
2088 5182 : hjstate->hj_CurTuple = NULL;
2089 5182 : }
2090 :
2091 : /*
2092 : * Decide if this process is allowed to run the unmatched scan. If so, the
2093 : * batch barrier is advanced to PHJ_BATCH_SCAN and true is returned.
2094 : * Otherwise the batch is detached and false is returned.
2095 : */
2096 : bool
2097 74 : ExecParallelPrepHashTableForUnmatched(HashJoinState *hjstate)
2098 : {
2099 74 : HashJoinTable hashtable = hjstate->hj_HashTable;
2100 74 : int curbatch = hashtable->curbatch;
2101 74 : ParallelHashJoinBatch *batch = hashtable->batches[curbatch].shared;
2102 :
2103 : Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE);
2104 :
2105 : /*
2106 : * It would not be deadlock-free to wait on the batch barrier, because it
2107 : * is in PHJ_BATCH_PROBE phase, and thus processes attached to it have
2108 : * already emitted tuples. Therefore, we'll hold a wait-free election:
2109 : * only one process can continue to the next phase, and all others detach
2110 : * from this batch. They can still go any work on other batches, if there
2111 : * are any.
2112 : */
2113 74 : if (!BarrierArriveAndDetachExceptLast(&batch->batch_barrier))
2114 : {
2115 : /* This process considers the batch to be done. */
2116 8 : hashtable->batches[hashtable->curbatch].done = true;
2117 :
2118 : /* Make sure any temporary files are closed. */
2119 8 : sts_end_parallel_scan(hashtable->batches[curbatch].inner_tuples);
2120 8 : sts_end_parallel_scan(hashtable->batches[curbatch].outer_tuples);
2121 :
2122 : /*
2123 : * Track largest batch we've seen, which would normally happen in
2124 : * ExecHashTableDetachBatch().
2125 : */
2126 8 : hashtable->spacePeak =
2127 8 : Max(hashtable->spacePeak,
2128 : batch->size + sizeof(dsa_pointer_atomic) * hashtable->nbuckets);
2129 8 : hashtable->curbatch = -1;
2130 8 : return false;
2131 : }
2132 :
2133 : /* Now we are alone with this batch. */
2134 : Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_SCAN);
2135 :
2136 : /*
2137 : * Has another process decided to give up early and command all processes
2138 : * to skip the unmatched scan?
2139 : */
2140 66 : if (batch->skip_unmatched)
2141 : {
2142 0 : hashtable->batches[hashtable->curbatch].done = true;
2143 0 : ExecHashTableDetachBatch(hashtable);
2144 0 : return false;
2145 : }
2146 :
2147 : /* Now prepare the process local state, just as for non-parallel join. */
2148 66 : ExecPrepHashTableForUnmatched(hjstate);
2149 :
2150 66 : return true;
2151 : }
2152 :
2153 : /*
2154 : * ExecScanHashTableForUnmatched
2155 : * scan the hash table for unmatched inner tuples
2156 : *
2157 : * On success, the inner tuple is stored into hjstate->hj_CurTuple and
2158 : * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
2159 : * for the latter.
2160 : */
2161 : bool
2162 326076 : ExecScanHashTableForUnmatched(HashJoinState *hjstate, ExprContext *econtext)
2163 : {
2164 326076 : HashJoinTable hashtable = hjstate->hj_HashTable;
2165 326076 : HashJoinTuple hashTuple = hjstate->hj_CurTuple;
2166 :
2167 : for (;;)
2168 : {
2169 : /*
2170 : * hj_CurTuple is the address of the tuple last returned from the
2171 : * current bucket, or NULL if it's time to start scanning a new
2172 : * bucket.
2173 : */
2174 6572960 : if (hashTuple != NULL)
2175 320960 : hashTuple = hashTuple->next.unshared;
2176 6252000 : else if (hjstate->hj_CurBucketNo < hashtable->nbuckets)
2177 : {
2178 6246896 : hashTuple = hashtable->buckets.unshared[hjstate->hj_CurBucketNo];
2179 6246896 : hjstate->hj_CurBucketNo++;
2180 : }
2181 5104 : else if (hjstate->hj_CurSkewBucketNo < hashtable->nSkewBuckets)
2182 : {
2183 0 : int j = hashtable->skewBucketNums[hjstate->hj_CurSkewBucketNo];
2184 :
2185 0 : hashTuple = hashtable->skewBucket[j]->tuples;
2186 0 : hjstate->hj_CurSkewBucketNo++;
2187 : }
2188 : else
2189 5104 : break; /* finished all buckets */
2190 :
2191 6939124 : while (hashTuple != NULL)
2192 : {
2193 692240 : if (!HeapTupleHeaderHasMatch(HJTUPLE_MINTUPLE(hashTuple)))
2194 : {
2195 : TupleTableSlot *inntuple;
2196 :
2197 : /* insert hashtable's tuple into exec slot */
2198 320972 : inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
2199 : hjstate->hj_HashTupleSlot,
2200 : false); /* do not pfree */
2201 320972 : econtext->ecxt_innertuple = inntuple;
2202 :
2203 : /*
2204 : * Reset temp memory each time; although this function doesn't
2205 : * do any qual eval, the caller will, so let's keep it
2206 : * parallel to ExecScanHashBucket.
2207 : */
2208 320972 : ResetExprContext(econtext);
2209 :
2210 320972 : hjstate->hj_CurTuple = hashTuple;
2211 320972 : return true;
2212 : }
2213 :
2214 371268 : hashTuple = hashTuple->next.unshared;
2215 : }
2216 :
2217 : /* allow this loop to be cancellable */
2218 6246884 : CHECK_FOR_INTERRUPTS();
2219 : }
2220 :
2221 : /*
2222 : * no more unmatched tuples
2223 : */
2224 5104 : return false;
2225 : }
2226 :
2227 : /*
2228 : * ExecParallelScanHashTableForUnmatched
2229 : * scan the hash table for unmatched inner tuples, in parallel join
2230 : *
2231 : * On success, the inner tuple is stored into hjstate->hj_CurTuple and
2232 : * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
2233 : * for the latter.
2234 : */
2235 : bool
2236 120072 : ExecParallelScanHashTableForUnmatched(HashJoinState *hjstate,
2237 : ExprContext *econtext)
2238 : {
2239 120072 : HashJoinTable hashtable = hjstate->hj_HashTable;
2240 120072 : HashJoinTuple hashTuple = hjstate->hj_CurTuple;
2241 :
2242 : for (;;)
2243 : {
2244 : /*
2245 : * hj_CurTuple is the address of the tuple last returned from the
2246 : * current bucket, or NULL if it's time to start scanning a new
2247 : * bucket.
2248 : */
2249 734472 : if (hashTuple != NULL)
2250 120006 : hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
2251 614466 : else if (hjstate->hj_CurBucketNo < hashtable->nbuckets)
2252 614400 : hashTuple = ExecParallelHashFirstTuple(hashtable,
2253 614400 : hjstate->hj_CurBucketNo++);
2254 : else
2255 66 : break; /* finished all buckets */
2256 :
2257 974406 : while (hashTuple != NULL)
2258 : {
2259 360006 : if (!HeapTupleHeaderHasMatch(HJTUPLE_MINTUPLE(hashTuple)))
2260 : {
2261 : TupleTableSlot *inntuple;
2262 :
2263 : /* insert hashtable's tuple into exec slot */
2264 120006 : inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
2265 : hjstate->hj_HashTupleSlot,
2266 : false); /* do not pfree */
2267 120006 : econtext->ecxt_innertuple = inntuple;
2268 :
2269 : /*
2270 : * Reset temp memory each time; although this function doesn't
2271 : * do any qual eval, the caller will, so let's keep it
2272 : * parallel to ExecScanHashBucket.
2273 : */
2274 120006 : ResetExprContext(econtext);
2275 :
2276 120006 : hjstate->hj_CurTuple = hashTuple;
2277 120006 : return true;
2278 : }
2279 :
2280 240000 : hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
2281 : }
2282 :
2283 : /* allow this loop to be cancellable */
2284 614400 : CHECK_FOR_INTERRUPTS();
2285 : }
2286 :
2287 : /*
2288 : * no more unmatched tuples
2289 : */
2290 66 : return false;
2291 : }
2292 :
2293 : /*
2294 : * ExecHashTableReset
2295 : *
2296 : * reset hash table header for new batch
2297 : */
2298 : void
2299 1148 : ExecHashTableReset(HashJoinTable hashtable)
2300 : {
2301 : MemoryContext oldcxt;
2302 1148 : int nbuckets = hashtable->nbuckets;
2303 :
2304 : /*
2305 : * Release all the hash buckets and tuples acquired in the prior pass, and
2306 : * reinitialize the context for a new pass.
2307 : */
2308 1148 : MemoryContextReset(hashtable->batchCxt);
2309 1148 : oldcxt = MemoryContextSwitchTo(hashtable->batchCxt);
2310 :
2311 : /* Reallocate and reinitialize the hash bucket headers. */
2312 1148 : hashtable->buckets.unshared = palloc0_array(HashJoinTuple, nbuckets);
2313 :
2314 1148 : hashtable->spaceUsed = 0;
2315 :
2316 1148 : MemoryContextSwitchTo(oldcxt);
2317 :
2318 : /* Forget the chunks (the memory was freed by the context reset above). */
2319 1148 : hashtable->chunks = NULL;
2320 1148 : }
2321 :
2322 : /*
2323 : * ExecHashTableResetMatchFlags
2324 : * Clear all the HeapTupleHeaderHasMatch flags in the table
2325 : */
2326 : void
2327 20 : ExecHashTableResetMatchFlags(HashJoinTable hashtable)
2328 : {
2329 : HashJoinTuple tuple;
2330 : int i;
2331 :
2332 : /* Reset all flags in the main table ... */
2333 20500 : for (i = 0; i < hashtable->nbuckets; i++)
2334 : {
2335 20650 : for (tuple = hashtable->buckets.unshared[i]; tuple != NULL;
2336 170 : tuple = tuple->next.unshared)
2337 170 : HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(tuple));
2338 : }
2339 :
2340 : /* ... and the same for the skew buckets, if any */
2341 20 : for (i = 0; i < hashtable->nSkewBuckets; i++)
2342 : {
2343 0 : int j = hashtable->skewBucketNums[i];
2344 0 : HashSkewBucket *skewBucket = hashtable->skewBucket[j];
2345 :
2346 0 : for (tuple = skewBucket->tuples; tuple != NULL; tuple = tuple->next.unshared)
2347 0 : HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(tuple));
2348 : }
2349 20 : }
2350 :
2351 :
2352 : void
2353 1972 : ExecReScanHash(HashState *node)
2354 : {
2355 1972 : PlanState *outerPlan = outerPlanState(node);
2356 :
2357 : /*
2358 : * if chgParam of subnode is not null then plan will be re-scanned by
2359 : * first ExecProcNode.
2360 : */
2361 1972 : if (outerPlan->chgParam == NULL)
2362 30 : ExecReScan(outerPlan);
2363 1972 : }
2364 :
2365 :
2366 : /*
2367 : * ExecHashBuildSkewHash
2368 : *
2369 : * Set up for skew optimization if we can identify the most common values
2370 : * (MCVs) of the outer relation's join key. We make a skew hash bucket
2371 : * for the hash value of each MCV, up to the number of slots allowed
2372 : * based on available memory.
2373 : */
2374 : static void
2375 104 : ExecHashBuildSkewHash(HashJoinTable hashtable, Hash *node, int mcvsToUse)
2376 : {
2377 : HeapTupleData *statsTuple;
2378 : AttStatsSlot sslot;
2379 :
2380 : /* Do nothing if planner didn't identify the outer relation's join key */
2381 104 : if (!OidIsValid(node->skewTable))
2382 72 : return;
2383 : /* Also, do nothing if we don't have room for at least one skew bucket */
2384 104 : if (mcvsToUse <= 0)
2385 0 : return;
2386 :
2387 : /*
2388 : * Try to find the MCV statistics for the outer relation's join key.
2389 : */
2390 104 : statsTuple = SearchSysCache3(STATRELATTINH,
2391 : ObjectIdGetDatum(node->skewTable),
2392 104 : Int16GetDatum(node->skewColumn),
2393 104 : BoolGetDatum(node->skewInherit));
2394 104 : if (!HeapTupleIsValid(statsTuple))
2395 72 : return;
2396 :
2397 32 : if (get_attstatsslot(&sslot, statsTuple,
2398 : STATISTIC_KIND_MCV, InvalidOid,
2399 : ATTSTATSSLOT_VALUES | ATTSTATSSLOT_NUMBERS))
2400 : {
2401 : double frac;
2402 : int nbuckets;
2403 : FmgrInfo *hashfunctions;
2404 : int i;
2405 :
2406 6 : if (mcvsToUse > sslot.nvalues)
2407 0 : mcvsToUse = sslot.nvalues;
2408 :
2409 : /*
2410 : * Calculate the expected fraction of outer relation that will
2411 : * participate in the skew optimization. If this isn't at least
2412 : * SKEW_MIN_OUTER_FRACTION, don't use skew optimization.
2413 : */
2414 6 : frac = 0;
2415 132 : for (i = 0; i < mcvsToUse; i++)
2416 126 : frac += sslot.numbers[i];
2417 6 : if (frac < SKEW_MIN_OUTER_FRACTION)
2418 : {
2419 0 : free_attstatsslot(&sslot);
2420 0 : ReleaseSysCache(statsTuple);
2421 0 : return;
2422 : }
2423 :
2424 : /*
2425 : * Okay, set up the skew hashtable.
2426 : *
2427 : * skewBucket[] is an open addressing hashtable with a power of 2 size
2428 : * that is greater than the number of MCV values. (This ensures there
2429 : * will be at least one null entry, so searches will always
2430 : * terminate.)
2431 : *
2432 : * Note: this code could fail if mcvsToUse exceeds INT_MAX/8 or
2433 : * MaxAllocSize/sizeof(void *)/8, but that is not currently possible
2434 : * since we limit pg_statistic entries to much less than that.
2435 : */
2436 6 : nbuckets = pg_nextpower2_32(mcvsToUse + 1);
2437 : /* use two more bits just to help avoid collisions */
2438 6 : nbuckets <<= 2;
2439 :
2440 6 : hashtable->skewEnabled = true;
2441 6 : hashtable->skewBucketLen = nbuckets;
2442 :
2443 : /*
2444 : * We allocate the bucket memory in the hashtable's batch context. It
2445 : * is only needed during the first batch, and this ensures it will be
2446 : * automatically removed once the first batch is done.
2447 : */
2448 6 : hashtable->skewBucket = (HashSkewBucket **)
2449 6 : MemoryContextAllocZero(hashtable->batchCxt,
2450 : nbuckets * sizeof(HashSkewBucket *));
2451 6 : hashtable->skewBucketNums = (int *)
2452 6 : MemoryContextAllocZero(hashtable->batchCxt,
2453 : mcvsToUse * sizeof(int));
2454 :
2455 6 : hashtable->spaceUsed += nbuckets * sizeof(HashSkewBucket *)
2456 6 : + mcvsToUse * sizeof(int);
2457 6 : hashtable->spaceUsedSkew += nbuckets * sizeof(HashSkewBucket *)
2458 6 : + mcvsToUse * sizeof(int);
2459 6 : if (hashtable->spaceUsed > hashtable->spacePeak)
2460 6 : hashtable->spacePeak = hashtable->spaceUsed;
2461 :
2462 : /*
2463 : * Create a skew bucket for each MCV hash value.
2464 : *
2465 : * Note: it is very important that we create the buckets in order of
2466 : * decreasing MCV frequency. If we have to remove some buckets, they
2467 : * must be removed in reverse order of creation (see notes in
2468 : * ExecHashRemoveNextSkewBucket) and we want the least common MCVs to
2469 : * be removed first.
2470 : */
2471 6 : hashfunctions = hashtable->outer_hashfunctions;
2472 :
2473 132 : for (i = 0; i < mcvsToUse; i++)
2474 : {
2475 : uint32 hashvalue;
2476 : int bucket;
2477 :
2478 126 : hashvalue = DatumGetUInt32(FunctionCall1Coll(&hashfunctions[0],
2479 126 : hashtable->collations[0],
2480 126 : sslot.values[i]));
2481 :
2482 : /*
2483 : * While we have not hit a hole in the hashtable and have not hit
2484 : * the desired bucket, we have collided with some previous hash
2485 : * value, so try the next bucket location. NB: this code must
2486 : * match ExecHashGetSkewBucket.
2487 : */
2488 126 : bucket = hashvalue & (nbuckets - 1);
2489 126 : while (hashtable->skewBucket[bucket] != NULL &&
2490 0 : hashtable->skewBucket[bucket]->hashvalue != hashvalue)
2491 0 : bucket = (bucket + 1) & (nbuckets - 1);
2492 :
2493 : /*
2494 : * If we found an existing bucket with the same hashvalue, leave
2495 : * it alone. It's okay for two MCVs to share a hashvalue.
2496 : */
2497 126 : if (hashtable->skewBucket[bucket] != NULL)
2498 0 : continue;
2499 :
2500 : /* Okay, create a new skew bucket for this hashvalue. */
2501 252 : hashtable->skewBucket[bucket] = (HashSkewBucket *)
2502 126 : MemoryContextAlloc(hashtable->batchCxt,
2503 : sizeof(HashSkewBucket));
2504 126 : hashtable->skewBucket[bucket]->hashvalue = hashvalue;
2505 126 : hashtable->skewBucket[bucket]->tuples = NULL;
2506 126 : hashtable->skewBucketNums[hashtable->nSkewBuckets] = bucket;
2507 126 : hashtable->nSkewBuckets++;
2508 126 : hashtable->spaceUsed += SKEW_BUCKET_OVERHEAD;
2509 126 : hashtable->spaceUsedSkew += SKEW_BUCKET_OVERHEAD;
2510 126 : if (hashtable->spaceUsed > hashtable->spacePeak)
2511 126 : hashtable->spacePeak = hashtable->spaceUsed;
2512 : }
2513 :
2514 6 : free_attstatsslot(&sslot);
2515 : }
2516 :
2517 32 : ReleaseSysCache(statsTuple);
2518 : }
2519 :
2520 : /*
2521 : * ExecHashGetSkewBucket
2522 : *
2523 : * Returns the index of the skew bucket for this hashvalue,
2524 : * or INVALID_SKEW_BUCKET_NO if the hashvalue is not
2525 : * associated with any active skew bucket.
2526 : */
2527 : int
2528 24050576 : ExecHashGetSkewBucket(HashJoinTable hashtable, uint32 hashvalue)
2529 : {
2530 : int bucket;
2531 :
2532 : /*
2533 : * Always return INVALID_SKEW_BUCKET_NO if not doing skew optimization (in
2534 : * particular, this happens after the initial batch is done).
2535 : */
2536 24050576 : if (!hashtable->skewEnabled)
2537 23930576 : return INVALID_SKEW_BUCKET_NO;
2538 :
2539 : /*
2540 : * Since skewBucketLen is a power of 2, we can do a modulo by ANDing.
2541 : */
2542 120000 : bucket = hashvalue & (hashtable->skewBucketLen - 1);
2543 :
2544 : /*
2545 : * While we have not hit a hole in the hashtable and have not hit the
2546 : * desired bucket, we have collided with some other hash value, so try the
2547 : * next bucket location.
2548 : */
2549 127830 : while (hashtable->skewBucket[bucket] != NULL &&
2550 10818 : hashtable->skewBucket[bucket]->hashvalue != hashvalue)
2551 7830 : bucket = (bucket + 1) & (hashtable->skewBucketLen - 1);
2552 :
2553 : /*
2554 : * Found the desired bucket?
2555 : */
2556 120000 : if (hashtable->skewBucket[bucket] != NULL)
2557 2988 : return bucket;
2558 :
2559 : /*
2560 : * There must not be any hashtable entry for this hash value.
2561 : */
2562 117012 : return INVALID_SKEW_BUCKET_NO;
2563 : }
2564 :
2565 : /*
2566 : * ExecHashSkewTableInsert
2567 : *
2568 : * Insert a tuple into the skew hashtable.
2569 : *
2570 : * This should generally match up with the current-batch case in
2571 : * ExecHashTableInsert.
2572 : */
2573 : static void
2574 588 : ExecHashSkewTableInsert(HashJoinTable hashtable,
2575 : TupleTableSlot *slot,
2576 : uint32 hashvalue,
2577 : int bucketNumber)
2578 : {
2579 : bool shouldFree;
2580 588 : MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
2581 : HashJoinTuple hashTuple;
2582 : int hashTupleSize;
2583 :
2584 : /* Create the HashJoinTuple */
2585 588 : hashTupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
2586 588 : hashTuple = (HashJoinTuple) MemoryContextAlloc(hashtable->batchCxt,
2587 : hashTupleSize);
2588 588 : hashTuple->hashvalue = hashvalue;
2589 588 : memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
2590 588 : HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
2591 :
2592 : /* Push it onto the front of the skew bucket's list */
2593 588 : hashTuple->next.unshared = hashtable->skewBucket[bucketNumber]->tuples;
2594 588 : hashtable->skewBucket[bucketNumber]->tuples = hashTuple;
2595 : Assert(hashTuple != hashTuple->next.unshared);
2596 :
2597 : /* Account for space used, and back off if we've used too much */
2598 588 : hashtable->spaceUsed += hashTupleSize;
2599 588 : hashtable->spaceUsedSkew += hashTupleSize;
2600 588 : if (hashtable->spaceUsed > hashtable->spacePeak)
2601 432 : hashtable->spacePeak = hashtable->spaceUsed;
2602 690 : while (hashtable->spaceUsedSkew > hashtable->spaceAllowedSkew)
2603 102 : ExecHashRemoveNextSkewBucket(hashtable);
2604 :
2605 : /* Check we are not over the total spaceAllowed, either */
2606 588 : if (hashtable->spaceUsed > hashtable->spaceAllowed)
2607 0 : ExecHashIncreaseNumBatches(hashtable);
2608 :
2609 588 : if (shouldFree)
2610 588 : heap_free_minimal_tuple(tuple);
2611 588 : }
2612 :
2613 : /*
2614 : * ExecHashRemoveNextSkewBucket
2615 : *
2616 : * Remove the least valuable skew bucket by pushing its tuples into
2617 : * the main hash table.
2618 : */
2619 : static void
2620 102 : ExecHashRemoveNextSkewBucket(HashJoinTable hashtable)
2621 : {
2622 : int bucketToRemove;
2623 : HashSkewBucket *bucket;
2624 : uint32 hashvalue;
2625 : int bucketno;
2626 : int batchno;
2627 : HashJoinTuple hashTuple;
2628 :
2629 : /* Locate the bucket to remove */
2630 102 : bucketToRemove = hashtable->skewBucketNums[hashtable->nSkewBuckets - 1];
2631 102 : bucket = hashtable->skewBucket[bucketToRemove];
2632 :
2633 : /*
2634 : * Calculate which bucket and batch the tuples belong to in the main
2635 : * hashtable. They all have the same hash value, so it's the same for all
2636 : * of them. Also note that it's not possible for nbatch to increase while
2637 : * we are processing the tuples.
2638 : */
2639 102 : hashvalue = bucket->hashvalue;
2640 102 : ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
2641 :
2642 : /* Process all tuples in the bucket */
2643 102 : hashTuple = bucket->tuples;
2644 450 : while (hashTuple != NULL)
2645 : {
2646 348 : HashJoinTuple nextHashTuple = hashTuple->next.unshared;
2647 : MinimalTuple tuple;
2648 : Size tupleSize;
2649 :
2650 : /*
2651 : * This code must agree with ExecHashTableInsert. We do not use
2652 : * ExecHashTableInsert directly as ExecHashTableInsert expects a
2653 : * TupleTableSlot while we already have HashJoinTuples.
2654 : */
2655 348 : tuple = HJTUPLE_MINTUPLE(hashTuple);
2656 348 : tupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
2657 :
2658 : /* Decide whether to put the tuple in the hash table or a temp file */
2659 348 : if (batchno == hashtable->curbatch)
2660 : {
2661 : /* Move the tuple to the main hash table */
2662 : HashJoinTuple copyTuple;
2663 :
2664 : /*
2665 : * We must copy the tuple into the dense storage, else it will not
2666 : * be found by, eg, ExecHashIncreaseNumBatches.
2667 : */
2668 138 : copyTuple = (HashJoinTuple) dense_alloc(hashtable, tupleSize);
2669 138 : memcpy(copyTuple, hashTuple, tupleSize);
2670 138 : pfree(hashTuple);
2671 :
2672 138 : copyTuple->next.unshared = hashtable->buckets.unshared[bucketno];
2673 138 : hashtable->buckets.unshared[bucketno] = copyTuple;
2674 :
2675 : /* We have reduced skew space, but overall space doesn't change */
2676 138 : hashtable->spaceUsedSkew -= tupleSize;
2677 : }
2678 : else
2679 : {
2680 : /* Put the tuple into a temp file for later batches */
2681 : Assert(batchno > hashtable->curbatch);
2682 210 : ExecHashJoinSaveTuple(tuple, hashvalue,
2683 210 : &hashtable->innerBatchFile[batchno],
2684 : hashtable);
2685 210 : pfree(hashTuple);
2686 210 : hashtable->spaceUsed -= tupleSize;
2687 210 : hashtable->spaceUsedSkew -= tupleSize;
2688 : }
2689 :
2690 348 : hashTuple = nextHashTuple;
2691 :
2692 : /* allow this loop to be cancellable */
2693 348 : CHECK_FOR_INTERRUPTS();
2694 : }
2695 :
2696 : /*
2697 : * Free the bucket struct itself and reset the hashtable entry to NULL.
2698 : *
2699 : * NOTE: this is not nearly as simple as it looks on the surface, because
2700 : * of the possibility of collisions in the hashtable. Suppose that hash
2701 : * values A and B collide at a particular hashtable entry, and that A was
2702 : * entered first so B gets shifted to a different table entry. If we were
2703 : * to remove A first then ExecHashGetSkewBucket would mistakenly start
2704 : * reporting that B is not in the hashtable, because it would hit the NULL
2705 : * before finding B. However, we always remove entries in the reverse
2706 : * order of creation, so this failure cannot happen.
2707 : */
2708 102 : hashtable->skewBucket[bucketToRemove] = NULL;
2709 102 : hashtable->nSkewBuckets--;
2710 102 : pfree(bucket);
2711 102 : hashtable->spaceUsed -= SKEW_BUCKET_OVERHEAD;
2712 102 : hashtable->spaceUsedSkew -= SKEW_BUCKET_OVERHEAD;
2713 :
2714 : /*
2715 : * If we have removed all skew buckets then give up on skew optimization.
2716 : * Release the arrays since they aren't useful any more.
2717 : */
2718 102 : if (hashtable->nSkewBuckets == 0)
2719 : {
2720 0 : hashtable->skewEnabled = false;
2721 0 : pfree(hashtable->skewBucket);
2722 0 : pfree(hashtable->skewBucketNums);
2723 0 : hashtable->skewBucket = NULL;
2724 0 : hashtable->skewBucketNums = NULL;
2725 0 : hashtable->spaceUsed -= hashtable->spaceUsedSkew;
2726 0 : hashtable->spaceUsedSkew = 0;
2727 : }
2728 102 : }
2729 :
2730 : /*
2731 : * Reserve space in the DSM segment for instrumentation data.
2732 : */
2733 : void
2734 192 : ExecHashEstimate(HashState *node, ParallelContext *pcxt)
2735 : {
2736 : size_t size;
2737 :
2738 : /* don't need this if not instrumenting or no workers */
2739 192 : if (!node->ps.instrument || pcxt->nworkers == 0)
2740 108 : return;
2741 :
2742 84 : size = mul_size(pcxt->nworkers, sizeof(HashInstrumentation));
2743 84 : size = add_size(size, offsetof(SharedHashInfo, hinstrument));
2744 84 : shm_toc_estimate_chunk(&pcxt->estimator, size);
2745 84 : shm_toc_estimate_keys(&pcxt->estimator, 1);
2746 : }
2747 :
2748 : /*
2749 : * Set up a space in the DSM for all workers to record instrumentation data
2750 : * about their hash table.
2751 : */
2752 : void
2753 192 : ExecHashInitializeDSM(HashState *node, ParallelContext *pcxt)
2754 : {
2755 : size_t size;
2756 :
2757 : /* don't need this if not instrumenting or no workers */
2758 192 : if (!node->ps.instrument || pcxt->nworkers == 0)
2759 108 : return;
2760 :
2761 84 : size = offsetof(SharedHashInfo, hinstrument) +
2762 84 : pcxt->nworkers * sizeof(HashInstrumentation);
2763 84 : node->shared_info = (SharedHashInfo *) shm_toc_allocate(pcxt->toc, size);
2764 :
2765 : /* Each per-worker area must start out as zeroes. */
2766 84 : memset(node->shared_info, 0, size);
2767 :
2768 84 : node->shared_info->num_workers = pcxt->nworkers;
2769 84 : shm_toc_insert(pcxt->toc, node->ps.plan->plan_node_id,
2770 84 : node->shared_info);
2771 : }
2772 :
2773 : /*
2774 : * Locate the DSM space for hash table instrumentation data that we'll write
2775 : * to at shutdown time.
2776 : */
2777 : void
2778 548 : ExecHashInitializeWorker(HashState *node, ParallelWorkerContext *pwcxt)
2779 : {
2780 : SharedHashInfo *shared_info;
2781 :
2782 : /* don't need this if not instrumenting */
2783 548 : if (!node->ps.instrument)
2784 296 : return;
2785 :
2786 : /*
2787 : * Find our entry in the shared area, and set up a pointer to it so that
2788 : * we'll accumulate stats there when shutting down or rebuilding the hash
2789 : * table.
2790 : */
2791 : shared_info = (SharedHashInfo *)
2792 252 : shm_toc_lookup(pwcxt->toc, node->ps.plan->plan_node_id, false);
2793 252 : node->hinstrument = &shared_info->hinstrument[ParallelWorkerNumber];
2794 : }
2795 :
2796 : /*
2797 : * Collect EXPLAIN stats if needed, saving them into DSM memory if
2798 : * ExecHashInitializeWorker was called, or local storage if not. In the
2799 : * parallel case, this must be done in ExecShutdownHash() rather than
2800 : * ExecEndHash() because the latter runs after we've detached from the DSM
2801 : * segment.
2802 : */
2803 : void
2804 26354 : ExecShutdownHash(HashState *node)
2805 : {
2806 : /* Allocate save space if EXPLAIN'ing and we didn't do so already */
2807 26354 : if (node->ps.instrument && !node->hinstrument)
2808 108 : node->hinstrument = palloc0_object(HashInstrumentation);
2809 : /* Now accumulate data for the current (final) hash table */
2810 26354 : if (node->hinstrument && node->hashtable)
2811 292 : ExecHashAccumInstrumentation(node->hinstrument, node->hashtable);
2812 26354 : }
2813 :
2814 : /*
2815 : * Retrieve instrumentation data from workers before the DSM segment is
2816 : * detached, so that EXPLAIN can access it.
2817 : */
2818 : void
2819 84 : ExecHashRetrieveInstrumentation(HashState *node)
2820 : {
2821 84 : SharedHashInfo *shared_info = node->shared_info;
2822 : size_t size;
2823 :
2824 84 : if (shared_info == NULL)
2825 0 : return;
2826 :
2827 : /* Replace node->shared_info with a copy in backend-local memory. */
2828 84 : size = offsetof(SharedHashInfo, hinstrument) +
2829 84 : shared_info->num_workers * sizeof(HashInstrumentation);
2830 84 : node->shared_info = palloc(size);
2831 84 : memcpy(node->shared_info, shared_info, size);
2832 : }
2833 :
2834 : /*
2835 : * Accumulate instrumentation data from 'hashtable' into an
2836 : * initially-zeroed HashInstrumentation struct.
2837 : *
2838 : * This is used to merge information across successive hash table instances
2839 : * within a single plan node. We take the maximum values of each interesting
2840 : * number. The largest nbuckets and largest nbatch values might have occurred
2841 : * in different instances, so there's some risk of confusion from reporting
2842 : * unrelated numbers; but there's a bigger risk of misdiagnosing a performance
2843 : * issue if we don't report the largest values. Similarly, we want to report
2844 : * the largest spacePeak regardless of whether it happened in the same
2845 : * instance as the largest nbuckets or nbatch. All the instances should have
2846 : * the same nbuckets_original and nbatch_original; but there's little value
2847 : * in depending on that here, so handle them the same way.
2848 : */
2849 : void
2850 292 : ExecHashAccumInstrumentation(HashInstrumentation *instrument,
2851 : HashJoinTable hashtable)
2852 : {
2853 292 : instrument->nbuckets = Max(instrument->nbuckets,
2854 : hashtable->nbuckets);
2855 292 : instrument->nbuckets_original = Max(instrument->nbuckets_original,
2856 : hashtable->nbuckets_original);
2857 292 : instrument->nbatch = Max(instrument->nbatch,
2858 : hashtable->nbatch);
2859 292 : instrument->nbatch_original = Max(instrument->nbatch_original,
2860 : hashtable->nbatch_original);
2861 292 : instrument->space_peak = Max(instrument->space_peak,
2862 : hashtable->spacePeak);
2863 292 : }
2864 :
2865 : /*
2866 : * Allocate 'size' bytes from the currently active HashMemoryChunk
2867 : */
2868 : static void *
2869 7530998 : dense_alloc(HashJoinTable hashtable, Size size)
2870 : {
2871 : HashMemoryChunk newChunk;
2872 : char *ptr;
2873 :
2874 : /* just in case the size is not already aligned properly */
2875 7530998 : size = MAXALIGN(size);
2876 :
2877 : /*
2878 : * If tuple size is larger than threshold, allocate a separate chunk.
2879 : */
2880 7530998 : if (size > HASH_CHUNK_THRESHOLD)
2881 : {
2882 : /* allocate new chunk and put it at the beginning of the list */
2883 0 : newChunk = (HashMemoryChunk) MemoryContextAlloc(hashtable->batchCxt,
2884 : HASH_CHUNK_HEADER_SIZE + size);
2885 0 : newChunk->maxlen = size;
2886 0 : newChunk->used = size;
2887 0 : newChunk->ntuples = 1;
2888 :
2889 : /*
2890 : * Add this chunk to the list after the first existing chunk, so that
2891 : * we don't lose the remaining space in the "current" chunk.
2892 : */
2893 0 : if (hashtable->chunks != NULL)
2894 : {
2895 0 : newChunk->next = hashtable->chunks->next;
2896 0 : hashtable->chunks->next.unshared = newChunk;
2897 : }
2898 : else
2899 : {
2900 0 : newChunk->next.unshared = hashtable->chunks;
2901 0 : hashtable->chunks = newChunk;
2902 : }
2903 :
2904 0 : return HASH_CHUNK_DATA(newChunk);
2905 : }
2906 :
2907 : /*
2908 : * See if we have enough space for it in the current chunk (if any). If
2909 : * not, allocate a fresh chunk.
2910 : */
2911 7530998 : if ((hashtable->chunks == NULL) ||
2912 7511798 : (hashtable->chunks->maxlen - hashtable->chunks->used) < size)
2913 : {
2914 : /* allocate new chunk and put it at the beginning of the list */
2915 28774 : newChunk = (HashMemoryChunk) MemoryContextAlloc(hashtable->batchCxt,
2916 : HASH_CHUNK_HEADER_SIZE + HASH_CHUNK_SIZE);
2917 :
2918 28774 : newChunk->maxlen = HASH_CHUNK_SIZE;
2919 28774 : newChunk->used = size;
2920 28774 : newChunk->ntuples = 1;
2921 :
2922 28774 : newChunk->next.unshared = hashtable->chunks;
2923 28774 : hashtable->chunks = newChunk;
2924 :
2925 28774 : return HASH_CHUNK_DATA(newChunk);
2926 : }
2927 :
2928 : /* There is enough space in the current chunk, let's add the tuple */
2929 7502224 : ptr = HASH_CHUNK_DATA(hashtable->chunks) + hashtable->chunks->used;
2930 7502224 : hashtable->chunks->used += size;
2931 7502224 : hashtable->chunks->ntuples += 1;
2932 :
2933 : /* return pointer to the start of the tuple memory */
2934 7502224 : return ptr;
2935 : }
2936 :
2937 : /*
2938 : * Allocate space for a tuple in shared dense storage. This is equivalent to
2939 : * dense_alloc but for Parallel Hash using shared memory.
2940 : *
2941 : * While loading a tuple into shared memory, we might run out of memory and
2942 : * decide to repartition, or determine that the load factor is too high and
2943 : * decide to expand the bucket array, or discover that another participant has
2944 : * commanded us to help do that. Return NULL if number of buckets or batches
2945 : * has changed, indicating that the caller must retry (considering the
2946 : * possibility that the tuple no longer belongs in the same batch).
2947 : */
2948 : static HashJoinTuple
2949 2380984 : ExecParallelHashTupleAlloc(HashJoinTable hashtable, size_t size,
2950 : dsa_pointer *shared)
2951 : {
2952 2380984 : ParallelHashJoinState *pstate = hashtable->parallel_state;
2953 : dsa_pointer chunk_shared;
2954 : HashMemoryChunk chunk;
2955 : Size chunk_size;
2956 : HashJoinTuple result;
2957 2380984 : int curbatch = hashtable->curbatch;
2958 :
2959 2380984 : size = MAXALIGN(size);
2960 :
2961 : /*
2962 : * Fast path: if there is enough space in this backend's current chunk,
2963 : * then we can allocate without any locking.
2964 : */
2965 2380984 : chunk = hashtable->current_chunk;
2966 2380984 : if (chunk != NULL &&
2967 2380028 : size <= HASH_CHUNK_THRESHOLD &&
2968 2380028 : chunk->maxlen - chunk->used >= size)
2969 : {
2970 :
2971 2377248 : chunk_shared = hashtable->current_chunk_shared;
2972 : Assert(chunk == dsa_get_address(hashtable->area, chunk_shared));
2973 2377248 : *shared = chunk_shared + HASH_CHUNK_HEADER_SIZE + chunk->used;
2974 2377248 : result = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + chunk->used);
2975 2377248 : chunk->used += size;
2976 :
2977 : Assert(chunk->used <= chunk->maxlen);
2978 : Assert(result == dsa_get_address(hashtable->area, *shared));
2979 :
2980 2377248 : return result;
2981 : }
2982 :
2983 : /* Slow path: try to allocate a new chunk. */
2984 3736 : LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
2985 :
2986 : /*
2987 : * Check if we need to help increase the number of buckets or batches.
2988 : */
2989 3736 : if (pstate->growth == PHJ_GROWTH_NEED_MORE_BATCHES ||
2990 3700 : pstate->growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
2991 : {
2992 174 : ParallelHashGrowth growth = pstate->growth;
2993 :
2994 174 : hashtable->current_chunk = NULL;
2995 174 : LWLockRelease(&pstate->lock);
2996 :
2997 : /* Another participant has commanded us to help grow. */
2998 174 : if (growth == PHJ_GROWTH_NEED_MORE_BATCHES)
2999 36 : ExecParallelHashIncreaseNumBatches(hashtable);
3000 138 : else if (growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
3001 138 : ExecParallelHashIncreaseNumBuckets(hashtable);
3002 :
3003 : /* The caller must retry. */
3004 174 : return NULL;
3005 : }
3006 :
3007 : /* Oversized tuples get their own chunk. */
3008 3562 : if (size > HASH_CHUNK_THRESHOLD)
3009 48 : chunk_size = size + HASH_CHUNK_HEADER_SIZE;
3010 : else
3011 3514 : chunk_size = HASH_CHUNK_SIZE;
3012 :
3013 : /* Check if it's time to grow batches or buckets. */
3014 3562 : if (pstate->growth != PHJ_GROWTH_DISABLED)
3015 : {
3016 : Assert(curbatch == 0);
3017 : Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASH_INNER);
3018 :
3019 : /*
3020 : * Check if our space limit would be exceeded. To avoid choking on
3021 : * very large tuples or very low hash_mem setting, we'll always allow
3022 : * each backend to allocate at least one chunk.
3023 : */
3024 1818 : if (hashtable->batches[0].at_least_one_chunk &&
3025 1448 : hashtable->batches[0].shared->size +
3026 1448 : chunk_size > pstate->space_allowed)
3027 : {
3028 36 : pstate->growth = PHJ_GROWTH_NEED_MORE_BATCHES;
3029 36 : hashtable->batches[0].shared->space_exhausted = true;
3030 36 : LWLockRelease(&pstate->lock);
3031 :
3032 36 : return NULL;
3033 : }
3034 :
3035 : /* Check if our load factor limit would be exceeded. */
3036 1782 : if (hashtable->nbatch == 1)
3037 : {
3038 1554 : hashtable->batches[0].shared->ntuples += hashtable->batches[0].ntuples;
3039 1554 : hashtable->batches[0].ntuples = 0;
3040 : /* Guard against integer overflow and alloc size overflow */
3041 1554 : if (hashtable->batches[0].shared->ntuples + 1 >
3042 1554 : hashtable->nbuckets * NTUP_PER_BUCKET &&
3043 108 : hashtable->nbuckets < (INT_MAX / 2) &&
3044 108 : hashtable->nbuckets * 2 <=
3045 : MaxAllocSize / sizeof(dsa_pointer_atomic))
3046 : {
3047 108 : pstate->growth = PHJ_GROWTH_NEED_MORE_BUCKETS;
3048 108 : LWLockRelease(&pstate->lock);
3049 :
3050 108 : return NULL;
3051 : }
3052 : }
3053 : }
3054 :
3055 : /* We are cleared to allocate a new chunk. */
3056 3418 : chunk_shared = dsa_allocate(hashtable->area, chunk_size);
3057 3418 : hashtable->batches[curbatch].shared->size += chunk_size;
3058 3418 : hashtable->batches[curbatch].at_least_one_chunk = true;
3059 :
3060 : /* Set up the chunk. */
3061 3418 : chunk = (HashMemoryChunk) dsa_get_address(hashtable->area, chunk_shared);
3062 3418 : *shared = chunk_shared + HASH_CHUNK_HEADER_SIZE;
3063 3418 : chunk->maxlen = chunk_size - HASH_CHUNK_HEADER_SIZE;
3064 3418 : chunk->used = size;
3065 :
3066 : /*
3067 : * Push it onto the list of chunks, so that it can be found if we need to
3068 : * increase the number of buckets or batches (batch 0 only) and later for
3069 : * freeing the memory (all batches).
3070 : */
3071 3418 : chunk->next.shared = hashtable->batches[curbatch].shared->chunks;
3072 3418 : hashtable->batches[curbatch].shared->chunks = chunk_shared;
3073 :
3074 3418 : if (size <= HASH_CHUNK_THRESHOLD)
3075 : {
3076 : /*
3077 : * Make this the current chunk so that we can use the fast path to
3078 : * fill the rest of it up in future calls.
3079 : */
3080 3382 : hashtable->current_chunk = chunk;
3081 3382 : hashtable->current_chunk_shared = chunk_shared;
3082 : }
3083 3418 : LWLockRelease(&pstate->lock);
3084 :
3085 : Assert(HASH_CHUNK_DATA(chunk) == dsa_get_address(hashtable->area, *shared));
3086 3418 : result = (HashJoinTuple) HASH_CHUNK_DATA(chunk);
3087 :
3088 3418 : return result;
3089 : }
3090 :
3091 : /*
3092 : * One backend needs to set up the shared batch state including tuplestores.
3093 : * Other backends will ensure they have correctly configured accessors by
3094 : * called ExecParallelHashEnsureBatchAccessors().
3095 : */
3096 : static void
3097 216 : ExecParallelHashJoinSetUpBatches(HashJoinTable hashtable, int nbatch)
3098 : {
3099 216 : ParallelHashJoinState *pstate = hashtable->parallel_state;
3100 : ParallelHashJoinBatch *batches;
3101 : MemoryContext oldcxt;
3102 : int i;
3103 :
3104 : Assert(hashtable->batches == NULL);
3105 :
3106 : /* Allocate space. */
3107 216 : pstate->batches =
3108 216 : dsa_allocate0(hashtable->area,
3109 : EstimateParallelHashJoinBatch(hashtable) * nbatch);
3110 216 : pstate->nbatch = nbatch;
3111 216 : batches = dsa_get_address(hashtable->area, pstate->batches);
3112 :
3113 : /*
3114 : * Use hash join spill memory context to allocate accessors, including
3115 : * buffers for the temporary files.
3116 : */
3117 216 : oldcxt = MemoryContextSwitchTo(hashtable->spillCxt);
3118 :
3119 : /* Allocate this backend's accessor array. */
3120 216 : hashtable->nbatch = nbatch;
3121 216 : hashtable->batches =
3122 216 : palloc0_array(ParallelHashJoinBatchAccessor, hashtable->nbatch);
3123 :
3124 : /* Set up the shared state, tuplestores and backend-local accessors. */
3125 1050 : for (i = 0; i < hashtable->nbatch; ++i)
3126 : {
3127 834 : ParallelHashJoinBatchAccessor *accessor = &hashtable->batches[i];
3128 834 : ParallelHashJoinBatch *shared = NthParallelHashJoinBatch(batches, i);
3129 : char name[MAXPGPATH];
3130 :
3131 : /*
3132 : * All members of shared were zero-initialized. We just need to set
3133 : * up the Barrier.
3134 : */
3135 834 : BarrierInit(&shared->batch_barrier, 0);
3136 834 : if (i == 0)
3137 : {
3138 : /* Batch 0 doesn't need to be loaded. */
3139 216 : BarrierAttach(&shared->batch_barrier);
3140 864 : while (BarrierPhase(&shared->batch_barrier) < PHJ_BATCH_PROBE)
3141 648 : BarrierArriveAndWait(&shared->batch_barrier, 0);
3142 216 : BarrierDetach(&shared->batch_barrier);
3143 : }
3144 :
3145 : /* Initialize accessor state. All members were zero-initialized. */
3146 834 : accessor->shared = shared;
3147 :
3148 : /* Initialize the shared tuplestores. */
3149 834 : snprintf(name, sizeof(name), "i%dof%d", i, hashtable->nbatch);
3150 834 : accessor->inner_tuples =
3151 834 : sts_initialize(ParallelHashJoinBatchInner(shared),
3152 : pstate->nparticipants,
3153 : ParallelWorkerNumber + 1,
3154 : sizeof(uint32),
3155 : SHARED_TUPLESTORE_SINGLE_PASS,
3156 : &pstate->fileset,
3157 : name);
3158 834 : snprintf(name, sizeof(name), "o%dof%d", i, hashtable->nbatch);
3159 834 : accessor->outer_tuples =
3160 834 : sts_initialize(ParallelHashJoinBatchOuter(shared,
3161 : pstate->nparticipants),
3162 : pstate->nparticipants,
3163 : ParallelWorkerNumber + 1,
3164 : sizeof(uint32),
3165 : SHARED_TUPLESTORE_SINGLE_PASS,
3166 : &pstate->fileset,
3167 : name);
3168 : }
3169 :
3170 216 : MemoryContextSwitchTo(oldcxt);
3171 216 : }
3172 :
3173 : /*
3174 : * Free the current set of ParallelHashJoinBatchAccessor objects.
3175 : */
3176 : static void
3177 48 : ExecParallelHashCloseBatchAccessors(HashJoinTable hashtable)
3178 : {
3179 : int i;
3180 :
3181 132 : for (i = 0; i < hashtable->nbatch; ++i)
3182 : {
3183 : /* Make sure no files are left open. */
3184 84 : sts_end_write(hashtable->batches[i].inner_tuples);
3185 84 : sts_end_write(hashtable->batches[i].outer_tuples);
3186 84 : sts_end_parallel_scan(hashtable->batches[i].inner_tuples);
3187 84 : sts_end_parallel_scan(hashtable->batches[i].outer_tuples);
3188 : }
3189 48 : pfree(hashtable->batches);
3190 48 : hashtable->batches = NULL;
3191 48 : }
3192 :
3193 : /*
3194 : * Make sure this backend has up-to-date accessors for the current set of
3195 : * batches.
3196 : */
3197 : static void
3198 872 : ExecParallelHashEnsureBatchAccessors(HashJoinTable hashtable)
3199 : {
3200 872 : ParallelHashJoinState *pstate = hashtable->parallel_state;
3201 : ParallelHashJoinBatch *batches;
3202 : MemoryContext oldcxt;
3203 : int i;
3204 :
3205 872 : if (hashtable->batches != NULL)
3206 : {
3207 642 : if (hashtable->nbatch == pstate->nbatch)
3208 642 : return;
3209 0 : ExecParallelHashCloseBatchAccessors(hashtable);
3210 : }
3211 :
3212 : /*
3213 : * We should never see a state where the batch-tracking array is freed,
3214 : * because we should have given up sooner if we join when the build
3215 : * barrier has reached the PHJ_BUILD_FREE phase.
3216 : */
3217 : Assert(DsaPointerIsValid(pstate->batches));
3218 :
3219 : /*
3220 : * Use hash join spill memory context to allocate accessors, including
3221 : * buffers for the temporary files.
3222 : */
3223 230 : oldcxt = MemoryContextSwitchTo(hashtable->spillCxt);
3224 :
3225 : /* Allocate this backend's accessor array. */
3226 230 : hashtable->nbatch = pstate->nbatch;
3227 230 : hashtable->batches =
3228 230 : palloc0_array(ParallelHashJoinBatchAccessor, hashtable->nbatch);
3229 :
3230 : /* Find the base of the pseudo-array of ParallelHashJoinBatch objects. */
3231 : batches = (ParallelHashJoinBatch *)
3232 230 : dsa_get_address(hashtable->area, pstate->batches);
3233 :
3234 : /* Set up the accessor array and attach to the tuplestores. */
3235 1252 : for (i = 0; i < hashtable->nbatch; ++i)
3236 : {
3237 1022 : ParallelHashJoinBatchAccessor *accessor = &hashtable->batches[i];
3238 1022 : ParallelHashJoinBatch *shared = NthParallelHashJoinBatch(batches, i);
3239 :
3240 1022 : accessor->shared = shared;
3241 1022 : accessor->preallocated = 0;
3242 1022 : accessor->done = false;
3243 1022 : accessor->outer_eof = false;
3244 1022 : accessor->inner_tuples =
3245 1022 : sts_attach(ParallelHashJoinBatchInner(shared),
3246 : ParallelWorkerNumber + 1,
3247 : &pstate->fileset);
3248 1022 : accessor->outer_tuples =
3249 1022 : sts_attach(ParallelHashJoinBatchOuter(shared,
3250 : pstate->nparticipants),
3251 : ParallelWorkerNumber + 1,
3252 : &pstate->fileset);
3253 : }
3254 :
3255 230 : MemoryContextSwitchTo(oldcxt);
3256 : }
3257 :
3258 : /*
3259 : * Allocate an empty shared memory hash table for a given batch.
3260 : */
3261 : void
3262 750 : ExecParallelHashTableAlloc(HashJoinTable hashtable, int batchno)
3263 : {
3264 750 : ParallelHashJoinBatch *batch = hashtable->batches[batchno].shared;
3265 : dsa_pointer_atomic *buckets;
3266 750 : int nbuckets = hashtable->parallel_state->nbuckets;
3267 : int i;
3268 :
3269 750 : batch->buckets =
3270 750 : dsa_allocate(hashtable->area, sizeof(dsa_pointer_atomic) * nbuckets);
3271 : buckets = (dsa_pointer_atomic *)
3272 750 : dsa_get_address(hashtable->area, batch->buckets);
3273 3115758 : for (i = 0; i < nbuckets; ++i)
3274 3115008 : dsa_pointer_atomic_init(&buckets[i], InvalidDsaPointer);
3275 750 : }
3276 :
3277 : /*
3278 : * If we are currently attached to a shared hash join batch, detach. If we
3279 : * are last to detach, clean up.
3280 : */
3281 : void
3282 19518 : ExecHashTableDetachBatch(HashJoinTable hashtable)
3283 : {
3284 19518 : if (hashtable->parallel_state != NULL &&
3285 1262 : hashtable->curbatch >= 0)
3286 : {
3287 864 : int curbatch = hashtable->curbatch;
3288 864 : ParallelHashJoinBatch *batch = hashtable->batches[curbatch].shared;
3289 864 : bool attached = true;
3290 :
3291 : /* Make sure any temporary files are closed. */
3292 864 : sts_end_parallel_scan(hashtable->batches[curbatch].inner_tuples);
3293 864 : sts_end_parallel_scan(hashtable->batches[curbatch].outer_tuples);
3294 :
3295 : /* After attaching we always get at least to PHJ_BATCH_PROBE. */
3296 : Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE ||
3297 : BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_SCAN);
3298 :
3299 : /*
3300 : * If we're abandoning the PHJ_BATCH_PROBE phase early without having
3301 : * reached the end of it, it means the plan doesn't want any more
3302 : * tuples, and it is happy to abandon any tuples buffered in this
3303 : * process's subplans. For correctness, we can't allow any process to
3304 : * execute the PHJ_BATCH_SCAN phase, because we will never have the
3305 : * complete set of match bits. Therefore we skip emitting unmatched
3306 : * tuples in all backends (if this is a full/right join), as if those
3307 : * tuples were all due to be emitted by this process and it has
3308 : * abandoned them too.
3309 : */
3310 864 : if (BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE &&
3311 798 : !hashtable->batches[curbatch].outer_eof)
3312 : {
3313 : /*
3314 : * This flag may be written to by multiple backends during
3315 : * PHJ_BATCH_PROBE phase, but will only be read in PHJ_BATCH_SCAN
3316 : * phase so requires no extra locking.
3317 : */
3318 0 : batch->skip_unmatched = true;
3319 : }
3320 :
3321 : /*
3322 : * Even if we aren't doing a full/right outer join, we'll step through
3323 : * the PHJ_BATCH_SCAN phase just to maintain the invariant that
3324 : * freeing happens in PHJ_BATCH_FREE, but that'll be wait-free.
3325 : */
3326 864 : if (BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE)
3327 798 : attached = BarrierArriveAndDetachExceptLast(&batch->batch_barrier);
3328 864 : if (attached && BarrierArriveAndDetach(&batch->batch_barrier))
3329 : {
3330 : /*
3331 : * We are not longer attached to the batch barrier, but we're the
3332 : * process that was chosen to free resources and it's safe to
3333 : * assert the current phase. The ParallelHashJoinBatch can't go
3334 : * away underneath us while we are attached to the build barrier,
3335 : * making this access safe.
3336 : */
3337 : Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_FREE);
3338 :
3339 : /* Free shared chunks and buckets. */
3340 3878 : while (DsaPointerIsValid(batch->chunks))
3341 : {
3342 : HashMemoryChunk chunk =
3343 3130 : dsa_get_address(hashtable->area, batch->chunks);
3344 3130 : dsa_pointer next = chunk->next.shared;
3345 :
3346 3130 : dsa_free(hashtable->area, batch->chunks);
3347 3130 : batch->chunks = next;
3348 : }
3349 748 : if (DsaPointerIsValid(batch->buckets))
3350 : {
3351 748 : dsa_free(hashtable->area, batch->buckets);
3352 748 : batch->buckets = InvalidDsaPointer;
3353 : }
3354 : }
3355 :
3356 : /*
3357 : * Track the largest batch we've been attached to. Though each
3358 : * backend might see a different subset of batches, explain.c will
3359 : * scan the results from all backends to find the largest value.
3360 : */
3361 864 : hashtable->spacePeak =
3362 864 : Max(hashtable->spacePeak,
3363 : batch->size + sizeof(dsa_pointer_atomic) * hashtable->nbuckets);
3364 :
3365 : /* Remember that we are not attached to a batch. */
3366 864 : hashtable->curbatch = -1;
3367 : }
3368 19518 : }
3369 :
3370 : /*
3371 : * Detach from all shared resources. If we are last to detach, clean up.
3372 : */
3373 : void
3374 18654 : ExecHashTableDetach(HashJoinTable hashtable)
3375 : {
3376 18654 : ParallelHashJoinState *pstate = hashtable->parallel_state;
3377 :
3378 : /*
3379 : * If we're involved in a parallel query, we must either have gotten all
3380 : * the way to PHJ_BUILD_RUN, or joined too late and be in PHJ_BUILD_FREE.
3381 : */
3382 : Assert(!pstate ||
3383 : BarrierPhase(&pstate->build_barrier) >= PHJ_BUILD_RUN);
3384 :
3385 18654 : if (pstate && BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_RUN)
3386 : {
3387 : int i;
3388 :
3389 : /* Make sure any temporary files are closed. */
3390 398 : if (hashtable->batches)
3391 : {
3392 2170 : for (i = 0; i < hashtable->nbatch; ++i)
3393 : {
3394 1772 : sts_end_write(hashtable->batches[i].inner_tuples);
3395 1772 : sts_end_write(hashtable->batches[i].outer_tuples);
3396 1772 : sts_end_parallel_scan(hashtable->batches[i].inner_tuples);
3397 1772 : sts_end_parallel_scan(hashtable->batches[i].outer_tuples);
3398 : }
3399 : }
3400 :
3401 : /* If we're last to detach, clean up shared memory. */
3402 398 : if (BarrierArriveAndDetach(&pstate->build_barrier))
3403 : {
3404 : /*
3405 : * Late joining processes will see this state and give up
3406 : * immediately.
3407 : */
3408 : Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_FREE);
3409 :
3410 168 : if (DsaPointerIsValid(pstate->batches))
3411 : {
3412 168 : dsa_free(hashtable->area, pstate->batches);
3413 168 : pstate->batches = InvalidDsaPointer;
3414 : }
3415 : }
3416 : }
3417 18654 : hashtable->parallel_state = NULL;
3418 18654 : }
3419 :
3420 : /*
3421 : * Get the first tuple in a given bucket identified by number.
3422 : */
3423 : static inline HashJoinTuple
3424 2774430 : ExecParallelHashFirstTuple(HashJoinTable hashtable, int bucketno)
3425 : {
3426 : HashJoinTuple tuple;
3427 : dsa_pointer p;
3428 :
3429 : Assert(hashtable->parallel_state);
3430 2774430 : p = dsa_pointer_atomic_read(&hashtable->buckets.shared[bucketno]);
3431 2774430 : tuple = (HashJoinTuple) dsa_get_address(hashtable->area, p);
3432 :
3433 2774430 : return tuple;
3434 : }
3435 :
3436 : /*
3437 : * Get the next tuple in the same bucket as 'tuple'.
3438 : */
3439 : static inline HashJoinTuple
3440 3807186 : ExecParallelHashNextTuple(HashJoinTable hashtable, HashJoinTuple tuple)
3441 : {
3442 : HashJoinTuple next;
3443 :
3444 : Assert(hashtable->parallel_state);
3445 3807186 : next = (HashJoinTuple) dsa_get_address(hashtable->area, tuple->next.shared);
3446 :
3447 3807186 : return next;
3448 : }
3449 :
3450 : /*
3451 : * Insert a tuple at the front of a chain of tuples in DSA memory atomically.
3452 : */
3453 : static inline void
3454 2927818 : ExecParallelHashPushTuple(dsa_pointer_atomic *head,
3455 : HashJoinTuple tuple,
3456 : dsa_pointer tuple_shared)
3457 : {
3458 : for (;;)
3459 : {
3460 2927818 : tuple->next.shared = dsa_pointer_atomic_read(head);
3461 2927818 : if (dsa_pointer_atomic_compare_exchange(head,
3462 2927818 : &tuple->next.shared,
3463 : tuple_shared))
3464 2920388 : break;
3465 : }
3466 2920388 : }
3467 :
3468 : /*
3469 : * Prepare to work on a given batch.
3470 : */
3471 : void
3472 1942 : ExecParallelHashTableSetCurrentBatch(HashJoinTable hashtable, int batchno)
3473 : {
3474 : Assert(hashtable->batches[batchno].shared->buckets != InvalidDsaPointer);
3475 :
3476 1942 : hashtable->curbatch = batchno;
3477 1942 : hashtable->buckets.shared = (dsa_pointer_atomic *)
3478 1942 : dsa_get_address(hashtable->area,
3479 1942 : hashtable->batches[batchno].shared->buckets);
3480 1942 : hashtable->nbuckets = hashtable->parallel_state->nbuckets;
3481 1942 : hashtable->log2_nbuckets = my_log2(hashtable->nbuckets);
3482 1942 : hashtable->current_chunk = NULL;
3483 1942 : hashtable->current_chunk_shared = InvalidDsaPointer;
3484 1942 : hashtable->batches[batchno].at_least_one_chunk = false;
3485 1942 : }
3486 :
3487 : /*
3488 : * Take the next available chunk from the queue of chunks being worked on in
3489 : * parallel. Return NULL if there are none left. Otherwise return a pointer
3490 : * to the chunk, and set *shared to the DSA pointer to the chunk.
3491 : */
3492 : static HashMemoryChunk
3493 1136 : ExecParallelHashPopChunkQueue(HashJoinTable hashtable, dsa_pointer *shared)
3494 : {
3495 1136 : ParallelHashJoinState *pstate = hashtable->parallel_state;
3496 : HashMemoryChunk chunk;
3497 :
3498 1136 : LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
3499 1136 : if (DsaPointerIsValid(pstate->chunk_work_queue))
3500 : {
3501 950 : *shared = pstate->chunk_work_queue;
3502 : chunk = (HashMemoryChunk)
3503 950 : dsa_get_address(hashtable->area, *shared);
3504 950 : pstate->chunk_work_queue = chunk->next.shared;
3505 : }
3506 : else
3507 186 : chunk = NULL;
3508 1136 : LWLockRelease(&pstate->lock);
3509 :
3510 1136 : return chunk;
3511 : }
3512 :
3513 : /*
3514 : * Increase the space preallocated in this backend for a given inner batch by
3515 : * at least a given amount. This allows us to track whether a given batch
3516 : * would fit in memory when loaded back in. Also increase the number of
3517 : * batches or buckets if required.
3518 : *
3519 : * This maintains a running estimation of how much space will be taken when we
3520 : * load the batch back into memory by simulating the way chunks will be handed
3521 : * out to workers. It's not perfectly accurate because the tuples will be
3522 : * packed into memory chunks differently by ExecParallelHashTupleAlloc(), but
3523 : * it should be pretty close. It tends to overestimate by a fraction of a
3524 : * chunk per worker since all workers gang up to preallocate during hashing,
3525 : * but workers tend to reload batches alone if there are enough to go around,
3526 : * leaving fewer partially filled chunks. This effect is bounded by
3527 : * nparticipants.
3528 : *
3529 : * Return false if the number of batches or buckets has changed, and the
3530 : * caller should reconsider which batch a given tuple now belongs in and call
3531 : * again.
3532 : */
3533 : static bool
3534 1704 : ExecParallelHashTuplePrealloc(HashJoinTable hashtable, int batchno, size_t size)
3535 : {
3536 1704 : ParallelHashJoinState *pstate = hashtable->parallel_state;
3537 1704 : ParallelHashJoinBatchAccessor *batch = &hashtable->batches[batchno];
3538 1704 : size_t want = Max(size, HASH_CHUNK_SIZE - HASH_CHUNK_HEADER_SIZE);
3539 :
3540 : Assert(batchno > 0);
3541 : Assert(batchno < hashtable->nbatch);
3542 : Assert(size == MAXALIGN(size));
3543 :
3544 1704 : LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
3545 :
3546 : /* Has another participant commanded us to help grow? */
3547 1704 : if (pstate->growth == PHJ_GROWTH_NEED_MORE_BATCHES ||
3548 1692 : pstate->growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
3549 : {
3550 12 : ParallelHashGrowth growth = pstate->growth;
3551 :
3552 12 : LWLockRelease(&pstate->lock);
3553 12 : if (growth == PHJ_GROWTH_NEED_MORE_BATCHES)
3554 12 : ExecParallelHashIncreaseNumBatches(hashtable);
3555 0 : else if (growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
3556 0 : ExecParallelHashIncreaseNumBuckets(hashtable);
3557 :
3558 12 : return false;
3559 : }
3560 :
3561 1692 : if (pstate->growth != PHJ_GROWTH_DISABLED &&
3562 1464 : batch->at_least_one_chunk &&
3563 714 : (batch->shared->estimated_size + want + HASH_CHUNK_HEADER_SIZE
3564 714 : > pstate->space_allowed))
3565 : {
3566 : /*
3567 : * We have determined that this batch would exceed the space budget if
3568 : * loaded into memory. Command all participants to help repartition.
3569 : */
3570 12 : batch->shared->space_exhausted = true;
3571 12 : pstate->growth = PHJ_GROWTH_NEED_MORE_BATCHES;
3572 12 : LWLockRelease(&pstate->lock);
3573 :
3574 12 : return false;
3575 : }
3576 :
3577 1680 : batch->at_least_one_chunk = true;
3578 1680 : batch->shared->estimated_size += want + HASH_CHUNK_HEADER_SIZE;
3579 1680 : batch->preallocated = want;
3580 1680 : LWLockRelease(&pstate->lock);
3581 :
3582 1680 : return true;
3583 : }
3584 :
3585 : /*
3586 : * Calculate the limit on how much memory can be used by Hash and similar
3587 : * plan types. This is work_mem times hash_mem_multiplier, and is
3588 : * expressed in bytes.
3589 : *
3590 : * Exported for use by the planner, as well as other hash-like executor
3591 : * nodes. This is a rather random place for this, but there is no better
3592 : * place.
3593 : */
3594 : size_t
3595 1088432 : get_hash_memory_limit(void)
3596 : {
3597 : double mem_limit;
3598 :
3599 : /* Do initial calculation in double arithmetic */
3600 1088432 : mem_limit = (double) work_mem * hash_mem_multiplier * 1024.0;
3601 :
3602 : /* Clamp in case it doesn't fit in size_t */
3603 1088432 : mem_limit = Min(mem_limit, (double) SIZE_MAX);
3604 :
3605 1088432 : return (size_t) mem_limit;
3606 : }
|