Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * nodeHash.c
4 : * Routines to hash relations for hashjoin
5 : *
6 : * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/executor/nodeHash.c
12 : *
13 : * See note on parallelism in nodeHashjoin.c.
14 : *
15 : *-------------------------------------------------------------------------
16 : */
17 : /*
18 : * INTERFACE ROUTINES
19 : * MultiExecHash - generate an in-memory hash table of the relation
20 : * ExecInitHash - initialize node and subnodes
21 : * ExecEndHash - shutdown node and subnodes
22 : */
23 :
24 : #include "postgres.h"
25 :
26 : #include <math.h>
27 : #include <limits.h>
28 :
29 : #include "access/htup_details.h"
30 : #include "access/parallel.h"
31 : #include "catalog/pg_statistic.h"
32 : #include "commands/tablespace.h"
33 : #include "executor/execdebug.h"
34 : #include "executor/hashjoin.h"
35 : #include "executor/nodeHash.h"
36 : #include "executor/nodeHashjoin.h"
37 : #include "miscadmin.h"
38 : #include "pgstat.h"
39 : #include "port/atomics.h"
40 : #include "port/pg_bitutils.h"
41 : #include "utils/dynahash.h"
42 : #include "utils/guc.h"
43 : #include "utils/lsyscache.h"
44 : #include "utils/memutils.h"
45 : #include "utils/syscache.h"
46 :
47 : static void ExecHashIncreaseNumBatches(HashJoinTable hashtable);
48 : static void ExecHashIncreaseNumBuckets(HashJoinTable hashtable);
49 : static void ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable);
50 : static void ExecParallelHashIncreaseNumBuckets(HashJoinTable hashtable);
51 : static void ExecHashBuildSkewHash(HashJoinTable hashtable, Hash *node,
52 : int mcvsToUse);
53 : static void ExecHashSkewTableInsert(HashJoinTable hashtable,
54 : TupleTableSlot *slot,
55 : uint32 hashvalue,
56 : int bucketNumber);
57 : static void ExecHashRemoveNextSkewBucket(HashJoinTable hashtable);
58 :
59 : static void *dense_alloc(HashJoinTable hashtable, Size size);
60 : static HashJoinTuple ExecParallelHashTupleAlloc(HashJoinTable hashtable,
61 : size_t size,
62 : dsa_pointer *shared);
63 : static void MultiExecPrivateHash(HashState *node);
64 : static void MultiExecParallelHash(HashState *node);
65 : static inline HashJoinTuple ExecParallelHashFirstTuple(HashJoinTable hashtable,
66 : int bucketno);
67 : static inline HashJoinTuple ExecParallelHashNextTuple(HashJoinTable hashtable,
68 : HashJoinTuple tuple);
69 : static inline void ExecParallelHashPushTuple(dsa_pointer_atomic *head,
70 : HashJoinTuple tuple,
71 : dsa_pointer tuple_shared);
72 : static void ExecParallelHashJoinSetUpBatches(HashJoinTable hashtable, int nbatch);
73 : static void ExecParallelHashEnsureBatchAccessors(HashJoinTable hashtable);
74 : static void ExecParallelHashRepartitionFirst(HashJoinTable hashtable);
75 : static void ExecParallelHashRepartitionRest(HashJoinTable hashtable);
76 : static HashMemoryChunk ExecParallelHashPopChunkQueue(HashJoinTable hashtable,
77 : dsa_pointer *shared);
78 : static bool ExecParallelHashTuplePrealloc(HashJoinTable hashtable,
79 : int batchno,
80 : size_t size);
81 : static void ExecParallelHashMergeCounters(HashJoinTable hashtable);
82 : static void ExecParallelHashCloseBatchAccessors(HashJoinTable hashtable);
83 :
84 :
85 : /* ----------------------------------------------------------------
86 : * ExecHash
87 : *
88 : * stub for pro forma compliance
89 : * ----------------------------------------------------------------
90 : */
91 : static TupleTableSlot *
92 0 : ExecHash(PlanState *pstate)
93 : {
94 0 : elog(ERROR, "Hash node does not support ExecProcNode call convention");
95 : return NULL;
96 : }
97 :
98 : /* ----------------------------------------------------------------
99 : * MultiExecHash
100 : *
101 : * build hash table for hashjoin, doing partitioning if more
102 : * than one batch is required.
103 : * ----------------------------------------------------------------
104 : */
105 : Node *
106 18686 : MultiExecHash(HashState *node)
107 : {
108 : /* must provide our own instrumentation support */
109 18686 : if (node->ps.instrument)
110 296 : InstrStartNode(node->ps.instrument);
111 :
112 18686 : if (node->parallel_state != NULL)
113 396 : MultiExecParallelHash(node);
114 : else
115 18290 : MultiExecPrivateHash(node);
116 :
117 : /* must provide our own instrumentation support */
118 18686 : if (node->ps.instrument)
119 296 : InstrStopNode(node->ps.instrument, node->hashtable->partialTuples);
120 :
121 : /*
122 : * We do not return the hash table directly because it's not a subtype of
123 : * Node, and so would violate the MultiExecProcNode API. Instead, our
124 : * parent Hashjoin node is expected to know how to fish it out of our node
125 : * state. Ugly but not really worth cleaning up, since Hashjoin knows
126 : * quite a bit more about Hash besides that.
127 : */
128 18686 : return NULL;
129 : }
130 :
131 : /* ----------------------------------------------------------------
132 : * MultiExecPrivateHash
133 : *
134 : * parallel-oblivious version, building a backend-private
135 : * hash table and (if necessary) batch files.
136 : * ----------------------------------------------------------------
137 : */
138 : static void
139 18290 : MultiExecPrivateHash(HashState *node)
140 : {
141 : PlanState *outerNode;
142 : List *hashkeys;
143 : HashJoinTable hashtable;
144 : TupleTableSlot *slot;
145 : ExprContext *econtext;
146 : uint32 hashvalue;
147 :
148 : /*
149 : * get state info from node
150 : */
151 18290 : outerNode = outerPlanState(node);
152 18290 : hashtable = node->hashtable;
153 :
154 : /*
155 : * set expression context
156 : */
157 18290 : hashkeys = node->hashkeys;
158 18290 : econtext = node->ps.ps_ExprContext;
159 :
160 : /*
161 : * Get all tuples from the node below the Hash node and insert into the
162 : * hash table (or temp files).
163 : */
164 : for (;;)
165 : {
166 7356606 : slot = ExecProcNode(outerNode);
167 7356606 : if (TupIsNull(slot))
168 : break;
169 : /* We have to compute the hash value */
170 7338316 : econtext->ecxt_outertuple = slot;
171 7338316 : if (ExecHashGetHashValue(hashtable, econtext, hashkeys,
172 7338316 : false, hashtable->keepNulls,
173 : &hashvalue))
174 : {
175 : int bucketNumber;
176 :
177 7338304 : bucketNumber = ExecHashGetSkewBucket(hashtable, hashvalue);
178 7338304 : if (bucketNumber != INVALID_SKEW_BUCKET_NO)
179 : {
180 : /* It's a skew tuple, so put it into that hash table */
181 588 : ExecHashSkewTableInsert(hashtable, slot, hashvalue,
182 : bucketNumber);
183 588 : hashtable->skewTuples += 1;
184 : }
185 : else
186 : {
187 : /* Not subject to skew optimization, so insert normally */
188 7337716 : ExecHashTableInsert(hashtable, slot, hashvalue);
189 : }
190 7338304 : hashtable->totalTuples += 1;
191 : }
192 : }
193 :
194 : /* resize the hash table if needed (NTUP_PER_BUCKET exceeded) */
195 18290 : if (hashtable->nbuckets != hashtable->nbuckets_optimal)
196 88 : ExecHashIncreaseNumBuckets(hashtable);
197 :
198 : /* Account for the buckets in spaceUsed (reported in EXPLAIN ANALYZE) */
199 18290 : hashtable->spaceUsed += hashtable->nbuckets * sizeof(HashJoinTuple);
200 18290 : if (hashtable->spaceUsed > hashtable->spacePeak)
201 18262 : hashtable->spacePeak = hashtable->spaceUsed;
202 :
203 18290 : hashtable->partialTuples = hashtable->totalTuples;
204 18290 : }
205 :
206 : /* ----------------------------------------------------------------
207 : * MultiExecParallelHash
208 : *
209 : * parallel-aware version, building a shared hash table and
210 : * (if necessary) batch files using the combined effort of
211 : * a set of co-operating backends.
212 : * ----------------------------------------------------------------
213 : */
214 : static void
215 396 : MultiExecParallelHash(HashState *node)
216 : {
217 : ParallelHashJoinState *pstate;
218 : PlanState *outerNode;
219 : List *hashkeys;
220 : HashJoinTable hashtable;
221 : TupleTableSlot *slot;
222 : ExprContext *econtext;
223 : uint32 hashvalue;
224 : Barrier *build_barrier;
225 : int i;
226 :
227 : /*
228 : * get state info from node
229 : */
230 396 : outerNode = outerPlanState(node);
231 396 : hashtable = node->hashtable;
232 :
233 : /*
234 : * set expression context
235 : */
236 396 : hashkeys = node->hashkeys;
237 396 : econtext = node->ps.ps_ExprContext;
238 :
239 : /*
240 : * Synchronize the parallel hash table build. At this stage we know that
241 : * the shared hash table has been or is being set up by
242 : * ExecHashTableCreate(), but we don't know if our peers have returned
243 : * from there or are here in MultiExecParallelHash(), and if so how far
244 : * through they are. To find out, we check the build_barrier phase then
245 : * and jump to the right step in the build algorithm.
246 : */
247 396 : pstate = hashtable->parallel_state;
248 396 : build_barrier = &pstate->build_barrier;
249 : Assert(BarrierPhase(build_barrier) >= PHJ_BUILD_ALLOCATE);
250 396 : switch (BarrierPhase(build_barrier))
251 : {
252 168 : case PHJ_BUILD_ALLOCATE:
253 :
254 : /*
255 : * Either I just allocated the initial hash table in
256 : * ExecHashTableCreate(), or someone else is doing that. Either
257 : * way, wait for everyone to arrive here so we can proceed.
258 : */
259 168 : BarrierArriveAndWait(build_barrier, WAIT_EVENT_HASH_BUILD_ALLOCATE);
260 : /* Fall through. */
261 :
262 240 : case PHJ_BUILD_HASH_INNER:
263 :
264 : /*
265 : * It's time to begin hashing, or if we just arrived here then
266 : * hashing is already underway, so join in that effort. While
267 : * hashing we have to be prepared to help increase the number of
268 : * batches or buckets at any time, and if we arrived here when
269 : * that was already underway we'll have to help complete that work
270 : * immediately so that it's safe to access batches and buckets
271 : * below.
272 : */
273 240 : if (PHJ_GROW_BATCHES_PHASE(BarrierAttach(&pstate->grow_batches_barrier)) !=
274 : PHJ_GROW_BATCHES_ELECT)
275 0 : ExecParallelHashIncreaseNumBatches(hashtable);
276 240 : if (PHJ_GROW_BUCKETS_PHASE(BarrierAttach(&pstate->grow_buckets_barrier)) !=
277 : PHJ_GROW_BUCKETS_ELECT)
278 0 : ExecParallelHashIncreaseNumBuckets(hashtable);
279 240 : ExecParallelHashEnsureBatchAccessors(hashtable);
280 240 : ExecParallelHashTableSetCurrentBatch(hashtable, 0);
281 : for (;;)
282 : {
283 2160360 : slot = ExecProcNode(outerNode);
284 2160360 : if (TupIsNull(slot))
285 : break;
286 2160120 : econtext->ecxt_outertuple = slot;
287 2160120 : if (ExecHashGetHashValue(hashtable, econtext, hashkeys,
288 2160120 : false, hashtable->keepNulls,
289 : &hashvalue))
290 2160120 : ExecParallelHashTableInsert(hashtable, slot, hashvalue);
291 2160120 : hashtable->partialTuples++;
292 : }
293 :
294 : /*
295 : * Make sure that any tuples we wrote to disk are visible to
296 : * others before anyone tries to load them.
297 : */
298 1314 : for (i = 0; i < hashtable->nbatch; ++i)
299 1074 : sts_end_write(hashtable->batches[i].inner_tuples);
300 :
301 : /*
302 : * Update shared counters. We need an accurate total tuple count
303 : * to control the empty table optimization.
304 : */
305 240 : ExecParallelHashMergeCounters(hashtable);
306 :
307 240 : BarrierDetach(&pstate->grow_buckets_barrier);
308 240 : BarrierDetach(&pstate->grow_batches_barrier);
309 :
310 : /*
311 : * Wait for everyone to finish building and flushing files and
312 : * counters.
313 : */
314 240 : if (BarrierArriveAndWait(build_barrier,
315 : WAIT_EVENT_HASH_BUILD_HASH_INNER))
316 : {
317 : /*
318 : * Elect one backend to disable any further growth. Batches
319 : * are now fixed. While building them we made sure they'd fit
320 : * in our memory budget when we load them back in later (or we
321 : * tried to do that and gave up because we detected extreme
322 : * skew).
323 : */
324 168 : pstate->growth = PHJ_GROWTH_DISABLED;
325 : }
326 : }
327 :
328 : /*
329 : * We're not yet attached to a batch. We all agree on the dimensions and
330 : * number of inner tuples (for the empty table optimization).
331 : */
332 396 : hashtable->curbatch = -1;
333 396 : hashtable->nbuckets = pstate->nbuckets;
334 396 : hashtable->log2_nbuckets = my_log2(hashtable->nbuckets);
335 396 : hashtable->totalTuples = pstate->total_tuples;
336 :
337 : /*
338 : * Unless we're completely done and the batch state has been freed, make
339 : * sure we have accessors.
340 : */
341 396 : if (BarrierPhase(build_barrier) < PHJ_BUILD_FREE)
342 396 : ExecParallelHashEnsureBatchAccessors(hashtable);
343 :
344 : /*
345 : * The next synchronization point is in ExecHashJoin's HJ_BUILD_HASHTABLE
346 : * case, which will bring the build phase to PHJ_BUILD_RUN (if it isn't
347 : * there already).
348 : */
349 : Assert(BarrierPhase(build_barrier) == PHJ_BUILD_HASH_OUTER ||
350 : BarrierPhase(build_barrier) == PHJ_BUILD_RUN ||
351 : BarrierPhase(build_barrier) == PHJ_BUILD_FREE);
352 396 : }
353 :
354 : /* ----------------------------------------------------------------
355 : * ExecInitHash
356 : *
357 : * Init routine for Hash node
358 : * ----------------------------------------------------------------
359 : */
360 : HashState *
361 27592 : ExecInitHash(Hash *node, EState *estate, int eflags)
362 : {
363 : HashState *hashstate;
364 :
365 : /* check for unsupported flags */
366 : Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
367 :
368 : /*
369 : * create state structure
370 : */
371 27592 : hashstate = makeNode(HashState);
372 27592 : hashstate->ps.plan = (Plan *) node;
373 27592 : hashstate->ps.state = estate;
374 27592 : hashstate->ps.ExecProcNode = ExecHash;
375 27592 : hashstate->hashtable = NULL;
376 27592 : hashstate->hashkeys = NIL; /* will be set by parent HashJoin */
377 :
378 : /*
379 : * Miscellaneous initialization
380 : *
381 : * create expression context for node
382 : */
383 27592 : ExecAssignExprContext(estate, &hashstate->ps);
384 :
385 : /*
386 : * initialize child nodes
387 : */
388 27592 : outerPlanState(hashstate) = ExecInitNode(outerPlan(node), estate, eflags);
389 :
390 : /*
391 : * initialize our result slot and type. No need to build projection
392 : * because this node doesn't do projections.
393 : */
394 27592 : ExecInitResultTupleSlotTL(&hashstate->ps, &TTSOpsMinimalTuple);
395 27592 : hashstate->ps.ps_ProjInfo = NULL;
396 :
397 : /*
398 : * initialize child expressions
399 : */
400 : Assert(node->plan.qual == NIL);
401 27592 : hashstate->hashkeys =
402 27592 : ExecInitExprList(node->hashkeys, (PlanState *) hashstate);
403 :
404 27592 : return hashstate;
405 : }
406 :
407 : /* ---------------------------------------------------------------
408 : * ExecEndHash
409 : *
410 : * clean up routine for Hash node
411 : * ----------------------------------------------------------------
412 : */
413 : void
414 27508 : ExecEndHash(HashState *node)
415 : {
416 : PlanState *outerPlan;
417 :
418 : /*
419 : * shut down the subplan
420 : */
421 27508 : outerPlan = outerPlanState(node);
422 27508 : ExecEndNode(outerPlan);
423 27508 : }
424 :
425 :
426 : /* ----------------------------------------------------------------
427 : * ExecHashTableCreate
428 : *
429 : * create an empty hashtable data structure for hashjoin.
430 : * ----------------------------------------------------------------
431 : */
432 : HashJoinTable
433 18686 : ExecHashTableCreate(HashState *state, List *hashOperators, List *hashCollations, bool keepNulls)
434 : {
435 : Hash *node;
436 : HashJoinTable hashtable;
437 : Plan *outerNode;
438 : size_t space_allowed;
439 : int nbuckets;
440 : int nbatch;
441 : double rows;
442 : int num_skew_mcvs;
443 : int log2_nbuckets;
444 : int nkeys;
445 : int i;
446 : ListCell *ho;
447 : ListCell *hc;
448 : MemoryContext oldcxt;
449 :
450 : /*
451 : * Get information about the size of the relation to be hashed (it's the
452 : * "outer" subtree of this node, but the inner relation of the hashjoin).
453 : * Compute the appropriate size of the hash table.
454 : */
455 18686 : node = (Hash *) state->ps.plan;
456 18686 : outerNode = outerPlan(node);
457 :
458 : /*
459 : * If this is shared hash table with a partial plan, then we can't use
460 : * outerNode->plan_rows to estimate its size. We need an estimate of the
461 : * total number of rows across all copies of the partial plan.
462 : */
463 18686 : rows = node->plan.parallel_aware ? node->rows_total : outerNode->plan_rows;
464 :
465 18290 : ExecChooseHashTableSize(rows, outerNode->plan_width,
466 18686 : OidIsValid(node->skewTable),
467 18686 : state->parallel_state != NULL,
468 18686 : state->parallel_state != NULL ?
469 396 : state->parallel_state->nparticipants - 1 : 0,
470 : &space_allowed,
471 : &nbuckets, &nbatch, &num_skew_mcvs);
472 :
473 : /* nbuckets must be a power of 2 */
474 18686 : log2_nbuckets = my_log2(nbuckets);
475 : Assert(nbuckets == (1 << log2_nbuckets));
476 :
477 : /*
478 : * Initialize the hash table control block.
479 : *
480 : * The hashtable control block is just palloc'd from the executor's
481 : * per-query memory context. Everything else should be kept inside the
482 : * subsidiary hashCxt, batchCxt or spillCxt.
483 : */
484 18686 : hashtable = palloc_object(HashJoinTableData);
485 18686 : hashtable->nbuckets = nbuckets;
486 18686 : hashtable->nbuckets_original = nbuckets;
487 18686 : hashtable->nbuckets_optimal = nbuckets;
488 18686 : hashtable->log2_nbuckets = log2_nbuckets;
489 18686 : hashtable->log2_nbuckets_optimal = log2_nbuckets;
490 18686 : hashtable->buckets.unshared = NULL;
491 18686 : hashtable->keepNulls = keepNulls;
492 18686 : hashtable->skewEnabled = false;
493 18686 : hashtable->skewBucket = NULL;
494 18686 : hashtable->skewBucketLen = 0;
495 18686 : hashtable->nSkewBuckets = 0;
496 18686 : hashtable->skewBucketNums = NULL;
497 18686 : hashtable->nbatch = nbatch;
498 18686 : hashtable->curbatch = 0;
499 18686 : hashtable->nbatch_original = nbatch;
500 18686 : hashtable->nbatch_outstart = nbatch;
501 18686 : hashtable->growEnabled = true;
502 18686 : hashtable->totalTuples = 0;
503 18686 : hashtable->partialTuples = 0;
504 18686 : hashtable->skewTuples = 0;
505 18686 : hashtable->innerBatchFile = NULL;
506 18686 : hashtable->outerBatchFile = NULL;
507 18686 : hashtable->spaceUsed = 0;
508 18686 : hashtable->spacePeak = 0;
509 18686 : hashtable->spaceAllowed = space_allowed;
510 18686 : hashtable->spaceUsedSkew = 0;
511 18686 : hashtable->spaceAllowedSkew =
512 18686 : hashtable->spaceAllowed * SKEW_HASH_MEM_PERCENT / 100;
513 18686 : hashtable->chunks = NULL;
514 18686 : hashtable->current_chunk = NULL;
515 18686 : hashtable->parallel_state = state->parallel_state;
516 18686 : hashtable->area = state->ps.state->es_query_dsa;
517 18686 : hashtable->batches = NULL;
518 :
519 : #ifdef HJDEBUG
520 : printf("Hashjoin %p: initial nbatch = %d, nbuckets = %d\n",
521 : hashtable, nbatch, nbuckets);
522 : #endif
523 :
524 : /*
525 : * Create temporary memory contexts in which to keep the hashtable working
526 : * storage. See notes in executor/hashjoin.h.
527 : */
528 18686 : hashtable->hashCxt = AllocSetContextCreate(CurrentMemoryContext,
529 : "HashTableContext",
530 : ALLOCSET_DEFAULT_SIZES);
531 :
532 18686 : hashtable->batchCxt = AllocSetContextCreate(hashtable->hashCxt,
533 : "HashBatchContext",
534 : ALLOCSET_DEFAULT_SIZES);
535 :
536 18686 : hashtable->spillCxt = AllocSetContextCreate(hashtable->hashCxt,
537 : "HashSpillContext",
538 : ALLOCSET_DEFAULT_SIZES);
539 :
540 : /* Allocate data that will live for the life of the hashjoin */
541 :
542 18686 : oldcxt = MemoryContextSwitchTo(hashtable->hashCxt);
543 :
544 : /*
545 : * Get info about the hash functions to be used for each hash key. Also
546 : * remember whether the join operators are strict.
547 : */
548 18686 : nkeys = list_length(hashOperators);
549 18686 : hashtable->outer_hashfunctions = palloc_array(FmgrInfo, nkeys);
550 18686 : hashtable->inner_hashfunctions = palloc_array(FmgrInfo, nkeys);
551 18686 : hashtable->hashStrict = palloc_array(bool, nkeys);
552 18686 : hashtable->collations = palloc_array(Oid, nkeys);
553 18686 : i = 0;
554 38694 : forboth(ho, hashOperators, hc, hashCollations)
555 : {
556 20008 : Oid hashop = lfirst_oid(ho);
557 : Oid left_hashfn;
558 : Oid right_hashfn;
559 :
560 20008 : if (!get_op_hash_functions(hashop, &left_hashfn, &right_hashfn))
561 0 : elog(ERROR, "could not find hash function for hash operator %u",
562 : hashop);
563 20008 : fmgr_info(left_hashfn, &hashtable->outer_hashfunctions[i]);
564 20008 : fmgr_info(right_hashfn, &hashtable->inner_hashfunctions[i]);
565 20008 : hashtable->hashStrict[i] = op_strict(hashop);
566 20008 : hashtable->collations[i] = lfirst_oid(hc);
567 20008 : i++;
568 : }
569 :
570 18686 : if (nbatch > 1 && hashtable->parallel_state == NULL)
571 : {
572 : MemoryContext oldctx;
573 :
574 : /*
575 : * allocate and initialize the file arrays in hashCxt (not needed for
576 : * parallel case which uses shared tuplestores instead of raw files)
577 : */
578 110 : oldctx = MemoryContextSwitchTo(hashtable->spillCxt);
579 :
580 110 : hashtable->innerBatchFile = palloc0_array(BufFile *, nbatch);
581 110 : hashtable->outerBatchFile = palloc0_array(BufFile *, nbatch);
582 :
583 110 : MemoryContextSwitchTo(oldctx);
584 :
585 : /* The files will not be opened until needed... */
586 : /* ... but make sure we have temp tablespaces established for them */
587 110 : PrepareTempTablespaces();
588 : }
589 :
590 18686 : MemoryContextSwitchTo(oldcxt);
591 :
592 18686 : if (hashtable->parallel_state)
593 : {
594 396 : ParallelHashJoinState *pstate = hashtable->parallel_state;
595 : Barrier *build_barrier;
596 :
597 : /*
598 : * Attach to the build barrier. The corresponding detach operation is
599 : * in ExecHashTableDetach. Note that we won't attach to the
600 : * batch_barrier for batch 0 yet. We'll attach later and start it out
601 : * in PHJ_BATCH_PROBE phase, because batch 0 is allocated up front and
602 : * then loaded while hashing (the standard hybrid hash join
603 : * algorithm), and we'll coordinate that using build_barrier.
604 : */
605 396 : build_barrier = &pstate->build_barrier;
606 396 : BarrierAttach(build_barrier);
607 :
608 : /*
609 : * So far we have no idea whether there are any other participants,
610 : * and if so, what phase they are working on. The only thing we care
611 : * about at this point is whether someone has already created the
612 : * SharedHashJoinBatch objects and the hash table for batch 0. One
613 : * backend will be elected to do that now if necessary.
614 : */
615 564 : if (BarrierPhase(build_barrier) == PHJ_BUILD_ELECT &&
616 168 : BarrierArriveAndWait(build_barrier, WAIT_EVENT_HASH_BUILD_ELECT))
617 : {
618 168 : pstate->nbatch = nbatch;
619 168 : pstate->space_allowed = space_allowed;
620 168 : pstate->growth = PHJ_GROWTH_OK;
621 :
622 : /* Set up the shared state for coordinating batches. */
623 168 : ExecParallelHashJoinSetUpBatches(hashtable, nbatch);
624 :
625 : /*
626 : * Allocate batch 0's hash table up front so we can load it
627 : * directly while hashing.
628 : */
629 168 : pstate->nbuckets = nbuckets;
630 168 : ExecParallelHashTableAlloc(hashtable, 0);
631 : }
632 :
633 : /*
634 : * The next Parallel Hash synchronization point is in
635 : * MultiExecParallelHash(), which will progress it all the way to
636 : * PHJ_BUILD_RUN. The caller must not return control from this
637 : * executor node between now and then.
638 : */
639 : }
640 : else
641 : {
642 : /*
643 : * Prepare context for the first-scan space allocations; allocate the
644 : * hashbucket array therein, and set each bucket "empty".
645 : */
646 18290 : MemoryContextSwitchTo(hashtable->batchCxt);
647 :
648 18290 : hashtable->buckets.unshared = palloc0_array(HashJoinTuple, nbuckets);
649 :
650 : /*
651 : * Set up for skew optimization, if possible and there's a need for
652 : * more than one batch. (In a one-batch join, there's no point in
653 : * it.)
654 : */
655 18290 : if (nbatch > 1)
656 110 : ExecHashBuildSkewHash(hashtable, node, num_skew_mcvs);
657 :
658 18290 : MemoryContextSwitchTo(oldcxt);
659 : }
660 :
661 18686 : return hashtable;
662 : }
663 :
664 :
665 : /*
666 : * Compute appropriate size for hashtable given the estimated size of the
667 : * relation to be hashed (number of rows and average row width).
668 : *
669 : * This is exported so that the planner's costsize.c can use it.
670 : */
671 :
672 : /* Target bucket loading (tuples per bucket) */
673 : #define NTUP_PER_BUCKET 1
674 :
675 : void
676 516340 : ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
677 : bool try_combined_hash_mem,
678 : int parallel_workers,
679 : size_t *space_allowed,
680 : int *numbuckets,
681 : int *numbatches,
682 : int *num_skew_mcvs)
683 : {
684 : int tupsize;
685 : double inner_rel_bytes;
686 : size_t hash_table_bytes;
687 : size_t bucket_bytes;
688 : size_t max_pointers;
689 516340 : int nbatch = 1;
690 : int nbuckets;
691 : double dbuckets;
692 :
693 : /* Force a plausible relation size if no info */
694 516340 : if (ntuples <= 0.0)
695 150 : ntuples = 1000.0;
696 :
697 : /*
698 : * Estimate tupsize based on footprint of tuple in hashtable... note this
699 : * does not allow for any palloc overhead. The manipulations of spaceUsed
700 : * don't count palloc overhead either.
701 : */
702 516340 : tupsize = HJTUPLE_OVERHEAD +
703 516340 : MAXALIGN(SizeofMinimalTupleHeader) +
704 516340 : MAXALIGN(tupwidth);
705 516340 : inner_rel_bytes = ntuples * tupsize;
706 :
707 : /*
708 : * Compute in-memory hashtable size limit from GUCs.
709 : */
710 516340 : hash_table_bytes = get_hash_memory_limit();
711 :
712 : /*
713 : * Parallel Hash tries to use the combined hash_mem of all workers to
714 : * avoid the need to batch. If that won't work, it falls back to hash_mem
715 : * per worker and tries to process batches in parallel.
716 : */
717 516340 : if (try_combined_hash_mem)
718 : {
719 : /* Careful, this could overflow size_t */
720 : double newlimit;
721 :
722 11970 : newlimit = (double) hash_table_bytes * (double) (parallel_workers + 1);
723 11970 : newlimit = Min(newlimit, (double) SIZE_MAX);
724 11970 : hash_table_bytes = (size_t) newlimit;
725 : }
726 :
727 516340 : *space_allowed = hash_table_bytes;
728 :
729 : /*
730 : * If skew optimization is possible, estimate the number of skew buckets
731 : * that will fit in the memory allowed, and decrement the assumed space
732 : * available for the main hash table accordingly.
733 : *
734 : * We make the optimistic assumption that each skew bucket will contain
735 : * one inner-relation tuple. If that turns out to be low, we will recover
736 : * at runtime by reducing the number of skew buckets.
737 : *
738 : * hashtable->skewBucket will have up to 8 times as many HashSkewBucket
739 : * pointers as the number of MCVs we allow, since ExecHashBuildSkewHash
740 : * will round up to the next power of 2 and then multiply by 4 to reduce
741 : * collisions.
742 : */
743 516340 : if (useskew)
744 : {
745 : size_t bytes_per_mcv;
746 : size_t skew_mcvs;
747 :
748 : /*----------
749 : * Compute number of MCVs we could hold in hash_table_bytes
750 : *
751 : * Divisor is:
752 : * size of a hash tuple +
753 : * worst-case size of skewBucket[] per MCV +
754 : * size of skewBucketNums[] entry +
755 : * size of skew bucket struct itself
756 : *----------
757 : */
758 512496 : bytes_per_mcv = tupsize +
759 : (8 * sizeof(HashSkewBucket *)) +
760 512496 : sizeof(int) +
761 : SKEW_BUCKET_OVERHEAD;
762 512496 : skew_mcvs = hash_table_bytes / bytes_per_mcv;
763 :
764 : /*
765 : * Now scale by SKEW_HASH_MEM_PERCENT (we do it in this order so as
766 : * not to worry about size_t overflow in the multiplication)
767 : */
768 512496 : skew_mcvs = (skew_mcvs * SKEW_HASH_MEM_PERCENT) / 100;
769 :
770 : /* Now clamp to integer range */
771 512496 : skew_mcvs = Min(skew_mcvs, INT_MAX);
772 :
773 512496 : *num_skew_mcvs = (int) skew_mcvs;
774 :
775 : /* Reduce hash_table_bytes by the amount needed for the skew table */
776 512496 : if (skew_mcvs > 0)
777 512496 : hash_table_bytes -= skew_mcvs * bytes_per_mcv;
778 : }
779 : else
780 3844 : *num_skew_mcvs = 0;
781 :
782 : /*
783 : * Set nbuckets to achieve an average bucket load of NTUP_PER_BUCKET when
784 : * memory is filled, assuming a single batch; but limit the value so that
785 : * the pointer arrays we'll try to allocate do not exceed hash_table_bytes
786 : * nor MaxAllocSize.
787 : *
788 : * Note that both nbuckets and nbatch must be powers of 2 to make
789 : * ExecHashGetBucketAndBatch fast.
790 : */
791 516340 : max_pointers = hash_table_bytes / sizeof(HashJoinTuple);
792 516340 : max_pointers = Min(max_pointers, MaxAllocSize / sizeof(HashJoinTuple));
793 : /* If max_pointers isn't a power of 2, must round it down to one */
794 516340 : max_pointers = pg_prevpower2_size_t(max_pointers);
795 :
796 : /* Also ensure we avoid integer overflow in nbatch and nbuckets */
797 : /* (this step is redundant given the current value of MaxAllocSize) */
798 516340 : max_pointers = Min(max_pointers, INT_MAX / 2 + 1);
799 :
800 516340 : dbuckets = ceil(ntuples / NTUP_PER_BUCKET);
801 516340 : dbuckets = Min(dbuckets, max_pointers);
802 516340 : nbuckets = (int) dbuckets;
803 : /* don't let nbuckets be really small, though ... */
804 516340 : nbuckets = Max(nbuckets, 1024);
805 : /* ... and force it to be a power of 2. */
806 516340 : nbuckets = pg_nextpower2_32(nbuckets);
807 :
808 : /*
809 : * If there's not enough space to store the projected number of tuples and
810 : * the required bucket headers, we will need multiple batches.
811 : */
812 516340 : bucket_bytes = sizeof(HashJoinTuple) * nbuckets;
813 516340 : if (inner_rel_bytes + bucket_bytes > hash_table_bytes)
814 : {
815 : /* We'll need multiple batches */
816 : size_t sbuckets;
817 : double dbatch;
818 : int minbatch;
819 : size_t bucket_size;
820 :
821 : /*
822 : * If Parallel Hash with combined hash_mem would still need multiple
823 : * batches, we'll have to fall back to regular hash_mem budget.
824 : */
825 4774 : if (try_combined_hash_mem)
826 : {
827 246 : ExecChooseHashTableSize(ntuples, tupwidth, useskew,
828 : false, parallel_workers,
829 : space_allowed,
830 : numbuckets,
831 : numbatches,
832 : num_skew_mcvs);
833 246 : return;
834 : }
835 :
836 : /*
837 : * Estimate the number of buckets we'll want to have when hash_mem is
838 : * entirely full. Each bucket will contain a bucket pointer plus
839 : * NTUP_PER_BUCKET tuples, whose projected size already includes
840 : * overhead for the hash code, pointer to the next tuple, etc.
841 : */
842 4528 : bucket_size = (tupsize * NTUP_PER_BUCKET + sizeof(HashJoinTuple));
843 4528 : if (hash_table_bytes <= bucket_size)
844 0 : sbuckets = 1; /* avoid pg_nextpower2_size_t(0) */
845 : else
846 4528 : sbuckets = pg_nextpower2_size_t(hash_table_bytes / bucket_size);
847 4528 : sbuckets = Min(sbuckets, max_pointers);
848 4528 : nbuckets = (int) sbuckets;
849 4528 : nbuckets = pg_nextpower2_32(nbuckets);
850 4528 : bucket_bytes = nbuckets * sizeof(HashJoinTuple);
851 :
852 : /*
853 : * Buckets are simple pointers to hashjoin tuples, while tupsize
854 : * includes the pointer, hash code, and MinimalTupleData. So buckets
855 : * should never really exceed 25% of hash_mem (even for
856 : * NTUP_PER_BUCKET=1); except maybe for hash_mem values that are not
857 : * 2^N bytes, where we might get more because of doubling. So let's
858 : * look for 50% here.
859 : */
860 : Assert(bucket_bytes <= hash_table_bytes / 2);
861 :
862 : /* Calculate required number of batches. */
863 4528 : dbatch = ceil(inner_rel_bytes / (hash_table_bytes - bucket_bytes));
864 4528 : dbatch = Min(dbatch, max_pointers);
865 4528 : minbatch = (int) dbatch;
866 4528 : nbatch = pg_nextpower2_32(Max(2, minbatch));
867 : }
868 :
869 : Assert(nbuckets > 0);
870 : Assert(nbatch > 0);
871 :
872 516094 : *numbuckets = nbuckets;
873 516094 : *numbatches = nbatch;
874 : }
875 :
876 :
877 : /* ----------------------------------------------------------------
878 : * ExecHashTableDestroy
879 : *
880 : * destroy a hash table
881 : * ----------------------------------------------------------------
882 : */
883 : void
884 18602 : ExecHashTableDestroy(HashJoinTable hashtable)
885 : {
886 : int i;
887 :
888 : /*
889 : * Make sure all the temp files are closed. We skip batch 0, since it
890 : * can't have any temp files (and the arrays might not even exist if
891 : * nbatch is only 1). Parallel hash joins don't use these files.
892 : */
893 18602 : if (hashtable->innerBatchFile != NULL)
894 : {
895 1424 : for (i = 1; i < hashtable->nbatch; i++)
896 : {
897 1254 : if (hashtable->innerBatchFile[i])
898 0 : BufFileClose(hashtable->innerBatchFile[i]);
899 1254 : if (hashtable->outerBatchFile[i])
900 0 : BufFileClose(hashtable->outerBatchFile[i]);
901 : }
902 : }
903 :
904 : /* Release working memory (batchCxt is a child, so it goes away too) */
905 18602 : MemoryContextDelete(hashtable->hashCxt);
906 :
907 : /* And drop the control block */
908 18602 : pfree(hashtable);
909 18602 : }
910 :
911 : /*
912 : * ExecHashIncreaseNumBatches
913 : * increase the original number of batches in order to reduce
914 : * current memory consumption
915 : */
916 : static void
917 483690 : ExecHashIncreaseNumBatches(HashJoinTable hashtable)
918 : {
919 483690 : int oldnbatch = hashtable->nbatch;
920 483690 : int curbatch = hashtable->curbatch;
921 : int nbatch;
922 : long ninmemory;
923 : long nfreed;
924 : HashMemoryChunk oldchunks;
925 :
926 : /* do nothing if we've decided to shut off growth */
927 483690 : if (!hashtable->growEnabled)
928 483560 : return;
929 :
930 : /* safety check to avoid overflow */
931 130 : if (oldnbatch > Min(INT_MAX / 2, MaxAllocSize / (sizeof(void *) * 2)))
932 0 : return;
933 :
934 130 : nbatch = oldnbatch * 2;
935 : Assert(nbatch > 1);
936 :
937 : #ifdef HJDEBUG
938 : printf("Hashjoin %p: increasing nbatch to %d because space = %zu\n",
939 : hashtable, nbatch, hashtable->spaceUsed);
940 : #endif
941 :
942 130 : if (hashtable->innerBatchFile == NULL)
943 : {
944 60 : MemoryContext oldcxt = MemoryContextSwitchTo(hashtable->spillCxt);
945 :
946 : /* we had no file arrays before */
947 60 : hashtable->innerBatchFile = palloc0_array(BufFile *, nbatch);
948 60 : hashtable->outerBatchFile = palloc0_array(BufFile *, nbatch);
949 :
950 60 : MemoryContextSwitchTo(oldcxt);
951 :
952 : /* time to establish the temp tablespaces, too */
953 60 : PrepareTempTablespaces();
954 : }
955 : else
956 : {
957 : /* enlarge arrays and zero out added entries */
958 70 : hashtable->innerBatchFile = repalloc0_array(hashtable->innerBatchFile, BufFile *, oldnbatch, nbatch);
959 70 : hashtable->outerBatchFile = repalloc0_array(hashtable->outerBatchFile, BufFile *, oldnbatch, nbatch);
960 : }
961 :
962 130 : hashtable->nbatch = nbatch;
963 :
964 : /*
965 : * Scan through the existing hash table entries and dump out any that are
966 : * no longer of the current batch.
967 : */
968 130 : ninmemory = nfreed = 0;
969 :
970 : /* If know we need to resize nbuckets, we can do it while rebatching. */
971 130 : if (hashtable->nbuckets_optimal != hashtable->nbuckets)
972 : {
973 : /* we never decrease the number of buckets */
974 : Assert(hashtable->nbuckets_optimal > hashtable->nbuckets);
975 :
976 60 : hashtable->nbuckets = hashtable->nbuckets_optimal;
977 60 : hashtable->log2_nbuckets = hashtable->log2_nbuckets_optimal;
978 :
979 60 : hashtable->buckets.unshared =
980 60 : repalloc_array(hashtable->buckets.unshared,
981 : HashJoinTuple, hashtable->nbuckets);
982 : }
983 :
984 : /*
985 : * We will scan through the chunks directly, so that we can reset the
986 : * buckets now and not have to keep track which tuples in the buckets have
987 : * already been processed. We will free the old chunks as we go.
988 : */
989 130 : memset(hashtable->buckets.unshared, 0,
990 130 : sizeof(HashJoinTuple) * hashtable->nbuckets);
991 130 : oldchunks = hashtable->chunks;
992 130 : hashtable->chunks = NULL;
993 :
994 : /* so, let's scan through the old chunks, and all tuples in each chunk */
995 650 : while (oldchunks != NULL)
996 : {
997 520 : HashMemoryChunk nextchunk = oldchunks->next.unshared;
998 :
999 : /* position within the buffer (up to oldchunks->used) */
1000 520 : size_t idx = 0;
1001 :
1002 : /* process all tuples stored in this chunk (and then free it) */
1003 355118 : while (idx < oldchunks->used)
1004 : {
1005 354598 : HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(oldchunks) + idx);
1006 354598 : MinimalTuple tuple = HJTUPLE_MINTUPLE(hashTuple);
1007 354598 : int hashTupleSize = (HJTUPLE_OVERHEAD + tuple->t_len);
1008 : int bucketno;
1009 : int batchno;
1010 :
1011 354598 : ninmemory++;
1012 354598 : ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
1013 : &bucketno, &batchno);
1014 :
1015 354598 : if (batchno == curbatch)
1016 : {
1017 : /* keep tuple in memory - copy it into the new chunk */
1018 : HashJoinTuple copyTuple;
1019 :
1020 138002 : copyTuple = (HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
1021 138002 : memcpy(copyTuple, hashTuple, hashTupleSize);
1022 :
1023 : /* and add it back to the appropriate bucket */
1024 138002 : copyTuple->next.unshared = hashtable->buckets.unshared[bucketno];
1025 138002 : hashtable->buckets.unshared[bucketno] = copyTuple;
1026 : }
1027 : else
1028 : {
1029 : /* dump it out */
1030 : Assert(batchno > curbatch);
1031 216596 : ExecHashJoinSaveTuple(HJTUPLE_MINTUPLE(hashTuple),
1032 : hashTuple->hashvalue,
1033 216596 : &hashtable->innerBatchFile[batchno],
1034 : hashtable);
1035 :
1036 216596 : hashtable->spaceUsed -= hashTupleSize;
1037 216596 : nfreed++;
1038 : }
1039 :
1040 : /* next tuple in this chunk */
1041 354598 : idx += MAXALIGN(hashTupleSize);
1042 :
1043 : /* allow this loop to be cancellable */
1044 354598 : CHECK_FOR_INTERRUPTS();
1045 : }
1046 :
1047 : /* we're done with this chunk - free it and proceed to the next one */
1048 520 : pfree(oldchunks);
1049 520 : oldchunks = nextchunk;
1050 : }
1051 :
1052 : #ifdef HJDEBUG
1053 : printf("Hashjoin %p: freed %ld of %ld tuples, space now %zu\n",
1054 : hashtable, nfreed, ninmemory, hashtable->spaceUsed);
1055 : #endif
1056 :
1057 : /*
1058 : * If we dumped out either all or none of the tuples in the table, disable
1059 : * further expansion of nbatch. This situation implies that we have
1060 : * enough tuples of identical hashvalues to overflow spaceAllowed.
1061 : * Increasing nbatch will not fix it since there's no way to subdivide the
1062 : * group any more finely. We have to just gut it out and hope the server
1063 : * has enough RAM.
1064 : */
1065 130 : if (nfreed == 0 || nfreed == ninmemory)
1066 : {
1067 28 : hashtable->growEnabled = false;
1068 : #ifdef HJDEBUG
1069 : printf("Hashjoin %p: disabling further increase of nbatch\n",
1070 : hashtable);
1071 : #endif
1072 : }
1073 : }
1074 :
1075 : /*
1076 : * ExecParallelHashIncreaseNumBatches
1077 : * Every participant attached to grow_batches_barrier must run this
1078 : * function when it observes growth == PHJ_GROWTH_NEED_MORE_BATCHES.
1079 : */
1080 : static void
1081 48 : ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
1082 : {
1083 48 : ParallelHashJoinState *pstate = hashtable->parallel_state;
1084 :
1085 : Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASH_INNER);
1086 :
1087 : /*
1088 : * It's unlikely, but we need to be prepared for new participants to show
1089 : * up while we're in the middle of this operation so we need to switch on
1090 : * barrier phase here.
1091 : */
1092 48 : switch (PHJ_GROW_BATCHES_PHASE(BarrierPhase(&pstate->grow_batches_barrier)))
1093 : {
1094 48 : case PHJ_GROW_BATCHES_ELECT:
1095 :
1096 : /*
1097 : * Elect one participant to prepare to grow the number of batches.
1098 : * This involves reallocating or resetting the buckets of batch 0
1099 : * in preparation for all participants to begin repartitioning the
1100 : * tuples.
1101 : */
1102 48 : if (BarrierArriveAndWait(&pstate->grow_batches_barrier,
1103 : WAIT_EVENT_HASH_GROW_BATCHES_ELECT))
1104 : {
1105 : dsa_pointer_atomic *buckets;
1106 : ParallelHashJoinBatch *old_batch0;
1107 : int new_nbatch;
1108 : int i;
1109 :
1110 : /* Move the old batch out of the way. */
1111 48 : old_batch0 = hashtable->batches[0].shared;
1112 48 : pstate->old_batches = pstate->batches;
1113 48 : pstate->old_nbatch = hashtable->nbatch;
1114 48 : pstate->batches = InvalidDsaPointer;
1115 :
1116 : /* Free this backend's old accessors. */
1117 48 : ExecParallelHashCloseBatchAccessors(hashtable);
1118 :
1119 : /* Figure out how many batches to use. */
1120 48 : if (hashtable->nbatch == 1)
1121 : {
1122 : /*
1123 : * We are going from single-batch to multi-batch. We need
1124 : * to switch from one large combined memory budget to the
1125 : * regular hash_mem budget.
1126 : */
1127 36 : pstate->space_allowed = get_hash_memory_limit();
1128 :
1129 : /*
1130 : * The combined hash_mem of all participants wasn't
1131 : * enough. Therefore one batch per participant would be
1132 : * approximately equivalent and would probably also be
1133 : * insufficient. So try two batches per participant,
1134 : * rounded up to a power of two.
1135 : */
1136 36 : new_nbatch = pg_nextpower2_32(pstate->nparticipants * 2);
1137 : }
1138 : else
1139 : {
1140 : /*
1141 : * We were already multi-batched. Try doubling the number
1142 : * of batches.
1143 : */
1144 12 : new_nbatch = hashtable->nbatch * 2;
1145 : }
1146 :
1147 : /* Allocate new larger generation of batches. */
1148 : Assert(hashtable->nbatch == pstate->nbatch);
1149 48 : ExecParallelHashJoinSetUpBatches(hashtable, new_nbatch);
1150 : Assert(hashtable->nbatch == pstate->nbatch);
1151 :
1152 : /* Replace or recycle batch 0's bucket array. */
1153 48 : if (pstate->old_nbatch == 1)
1154 : {
1155 : double dtuples;
1156 : double dbuckets;
1157 : int new_nbuckets;
1158 :
1159 : /*
1160 : * We probably also need a smaller bucket array. How many
1161 : * tuples do we expect per batch, assuming we have only
1162 : * half of them so far? Normally we don't need to change
1163 : * the bucket array's size, because the size of each batch
1164 : * stays the same as we add more batches, but in this
1165 : * special case we move from a large batch to many smaller
1166 : * batches and it would be wasteful to keep the large
1167 : * array.
1168 : */
1169 36 : dtuples = (old_batch0->ntuples * 2.0) / new_nbatch;
1170 36 : dbuckets = ceil(dtuples / NTUP_PER_BUCKET);
1171 36 : dbuckets = Min(dbuckets,
1172 : MaxAllocSize / sizeof(dsa_pointer_atomic));
1173 36 : new_nbuckets = (int) dbuckets;
1174 36 : new_nbuckets = Max(new_nbuckets, 1024);
1175 36 : new_nbuckets = pg_nextpower2_32(new_nbuckets);
1176 36 : dsa_free(hashtable->area, old_batch0->buckets);
1177 72 : hashtable->batches[0].shared->buckets =
1178 36 : dsa_allocate(hashtable->area,
1179 : sizeof(dsa_pointer_atomic) * new_nbuckets);
1180 : buckets = (dsa_pointer_atomic *)
1181 36 : dsa_get_address(hashtable->area,
1182 36 : hashtable->batches[0].shared->buckets);
1183 110628 : for (i = 0; i < new_nbuckets; ++i)
1184 110592 : dsa_pointer_atomic_init(&buckets[i], InvalidDsaPointer);
1185 36 : pstate->nbuckets = new_nbuckets;
1186 : }
1187 : else
1188 : {
1189 : /* Recycle the existing bucket array. */
1190 12 : hashtable->batches[0].shared->buckets = old_batch0->buckets;
1191 : buckets = (dsa_pointer_atomic *)
1192 12 : dsa_get_address(hashtable->area, old_batch0->buckets);
1193 49164 : for (i = 0; i < hashtable->nbuckets; ++i)
1194 49152 : dsa_pointer_atomic_write(&buckets[i], InvalidDsaPointer);
1195 : }
1196 :
1197 : /* Move all chunks to the work queue for parallel processing. */
1198 48 : pstate->chunk_work_queue = old_batch0->chunks;
1199 :
1200 : /* Disable further growth temporarily while we're growing. */
1201 48 : pstate->growth = PHJ_GROWTH_DISABLED;
1202 : }
1203 : else
1204 : {
1205 : /* All other participants just flush their tuples to disk. */
1206 0 : ExecParallelHashCloseBatchAccessors(hashtable);
1207 : }
1208 : /* Fall through. */
1209 :
1210 : case PHJ_GROW_BATCHES_REALLOCATE:
1211 : /* Wait for the above to be finished. */
1212 48 : BarrierArriveAndWait(&pstate->grow_batches_barrier,
1213 : WAIT_EVENT_HASH_GROW_BATCHES_REALLOCATE);
1214 : /* Fall through. */
1215 :
1216 48 : case PHJ_GROW_BATCHES_REPARTITION:
1217 : /* Make sure that we have the current dimensions and buckets. */
1218 48 : ExecParallelHashEnsureBatchAccessors(hashtable);
1219 48 : ExecParallelHashTableSetCurrentBatch(hashtable, 0);
1220 : /* Then partition, flush counters. */
1221 48 : ExecParallelHashRepartitionFirst(hashtable);
1222 48 : ExecParallelHashRepartitionRest(hashtable);
1223 48 : ExecParallelHashMergeCounters(hashtable);
1224 : /* Wait for the above to be finished. */
1225 48 : BarrierArriveAndWait(&pstate->grow_batches_barrier,
1226 : WAIT_EVENT_HASH_GROW_BATCHES_REPARTITION);
1227 : /* Fall through. */
1228 :
1229 48 : case PHJ_GROW_BATCHES_DECIDE:
1230 :
1231 : /*
1232 : * Elect one participant to clean up and decide whether further
1233 : * repartitioning is needed, or should be disabled because it's
1234 : * not helping.
1235 : */
1236 48 : if (BarrierArriveAndWait(&pstate->grow_batches_barrier,
1237 : WAIT_EVENT_HASH_GROW_BATCHES_DECIDE))
1238 : {
1239 48 : bool space_exhausted = false;
1240 48 : bool extreme_skew_detected = false;
1241 :
1242 : /* Make sure that we have the current dimensions and buckets. */
1243 48 : ExecParallelHashEnsureBatchAccessors(hashtable);
1244 48 : ExecParallelHashTableSetCurrentBatch(hashtable, 0);
1245 :
1246 : /* Are any of the new generation of batches exhausted? */
1247 336 : for (int i = 0; i < hashtable->nbatch; ++i)
1248 : {
1249 288 : ParallelHashJoinBatch *batch = hashtable->batches[i].shared;
1250 :
1251 288 : if (batch->space_exhausted ||
1252 288 : batch->estimated_size > pstate->space_allowed)
1253 : {
1254 : int parent;
1255 :
1256 24 : space_exhausted = true;
1257 :
1258 : /*
1259 : * Did this batch receive ALL of the tuples from its
1260 : * parent batch? That would indicate that further
1261 : * repartitioning isn't going to help (the hash values
1262 : * are probably all the same).
1263 : */
1264 24 : parent = i % pstate->old_nbatch;
1265 24 : if (batch->ntuples == hashtable->batches[parent].shared->old_ntuples)
1266 24 : extreme_skew_detected = true;
1267 : }
1268 : }
1269 :
1270 : /* Don't keep growing if it's not helping or we'd overflow. */
1271 48 : if (extreme_skew_detected || hashtable->nbatch >= INT_MAX / 2)
1272 24 : pstate->growth = PHJ_GROWTH_DISABLED;
1273 24 : else if (space_exhausted)
1274 0 : pstate->growth = PHJ_GROWTH_NEED_MORE_BATCHES;
1275 : else
1276 24 : pstate->growth = PHJ_GROWTH_OK;
1277 :
1278 : /* Free the old batches in shared memory. */
1279 48 : dsa_free(hashtable->area, pstate->old_batches);
1280 48 : pstate->old_batches = InvalidDsaPointer;
1281 : }
1282 : /* Fall through. */
1283 :
1284 : case PHJ_GROW_BATCHES_FINISH:
1285 : /* Wait for the above to complete. */
1286 48 : BarrierArriveAndWait(&pstate->grow_batches_barrier,
1287 : WAIT_EVENT_HASH_GROW_BATCHES_FINISH);
1288 : }
1289 48 : }
1290 :
1291 : /*
1292 : * Repartition the tuples currently loaded into memory for inner batch 0
1293 : * because the number of batches has been increased. Some tuples are retained
1294 : * in memory and some are written out to a later batch.
1295 : */
1296 : static void
1297 48 : ExecParallelHashRepartitionFirst(HashJoinTable hashtable)
1298 : {
1299 : dsa_pointer chunk_shared;
1300 : HashMemoryChunk chunk;
1301 :
1302 : Assert(hashtable->nbatch == hashtable->parallel_state->nbatch);
1303 :
1304 336 : while ((chunk = ExecParallelHashPopChunkQueue(hashtable, &chunk_shared)))
1305 : {
1306 288 : size_t idx = 0;
1307 :
1308 : /* Repartition all tuples in this chunk. */
1309 220860 : while (idx < chunk->used)
1310 : {
1311 220572 : HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
1312 220572 : MinimalTuple tuple = HJTUPLE_MINTUPLE(hashTuple);
1313 : HashJoinTuple copyTuple;
1314 : dsa_pointer shared;
1315 : int bucketno;
1316 : int batchno;
1317 :
1318 220572 : ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
1319 : &bucketno, &batchno);
1320 :
1321 : Assert(batchno < hashtable->nbatch);
1322 220572 : if (batchno == 0)
1323 : {
1324 : /* It still belongs in batch 0. Copy to a new chunk. */
1325 : copyTuple =
1326 50748 : ExecParallelHashTupleAlloc(hashtable,
1327 50748 : HJTUPLE_OVERHEAD + tuple->t_len,
1328 : &shared);
1329 50748 : copyTuple->hashvalue = hashTuple->hashvalue;
1330 50748 : memcpy(HJTUPLE_MINTUPLE(copyTuple), tuple, tuple->t_len);
1331 50748 : ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
1332 : copyTuple, shared);
1333 : }
1334 : else
1335 : {
1336 169824 : size_t tuple_size =
1337 169824 : MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
1338 :
1339 : /* It belongs in a later batch. */
1340 169824 : hashtable->batches[batchno].estimated_size += tuple_size;
1341 169824 : sts_puttuple(hashtable->batches[batchno].inner_tuples,
1342 169824 : &hashTuple->hashvalue, tuple);
1343 : }
1344 :
1345 : /* Count this tuple. */
1346 220572 : ++hashtable->batches[0].old_ntuples;
1347 220572 : ++hashtable->batches[batchno].ntuples;
1348 :
1349 220572 : idx += MAXALIGN(HJTUPLE_OVERHEAD +
1350 : HJTUPLE_MINTUPLE(hashTuple)->t_len);
1351 : }
1352 :
1353 : /* Free this chunk. */
1354 288 : dsa_free(hashtable->area, chunk_shared);
1355 :
1356 288 : CHECK_FOR_INTERRUPTS();
1357 : }
1358 48 : }
1359 :
1360 : /*
1361 : * Help repartition inner batches 1..n.
1362 : */
1363 : static void
1364 48 : ExecParallelHashRepartitionRest(HashJoinTable hashtable)
1365 : {
1366 48 : ParallelHashJoinState *pstate = hashtable->parallel_state;
1367 48 : int old_nbatch = pstate->old_nbatch;
1368 : SharedTuplestoreAccessor **old_inner_tuples;
1369 : ParallelHashJoinBatch *old_batches;
1370 : int i;
1371 :
1372 : /* Get our hands on the previous generation of batches. */
1373 : old_batches = (ParallelHashJoinBatch *)
1374 48 : dsa_get_address(hashtable->area, pstate->old_batches);
1375 48 : old_inner_tuples = palloc0_array(SharedTuplestoreAccessor *, old_nbatch);
1376 84 : for (i = 1; i < old_nbatch; ++i)
1377 : {
1378 36 : ParallelHashJoinBatch *shared =
1379 36 : NthParallelHashJoinBatch(old_batches, i);
1380 :
1381 36 : old_inner_tuples[i] = sts_attach(ParallelHashJoinBatchInner(shared),
1382 : ParallelWorkerNumber + 1,
1383 : &pstate->fileset);
1384 : }
1385 :
1386 : /* Join in the effort to repartition them. */
1387 84 : for (i = 1; i < old_nbatch; ++i)
1388 : {
1389 : MinimalTuple tuple;
1390 : uint32 hashvalue;
1391 :
1392 : /* Scan one partition from the previous generation. */
1393 36 : sts_begin_parallel_scan(old_inner_tuples[i]);
1394 161400 : while ((tuple = sts_parallel_scan_next(old_inner_tuples[i], &hashvalue)))
1395 : {
1396 161364 : size_t tuple_size = MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
1397 : int bucketno;
1398 : int batchno;
1399 :
1400 : /* Decide which partition it goes to in the new generation. */
1401 161364 : ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno,
1402 : &batchno);
1403 :
1404 161364 : hashtable->batches[batchno].estimated_size += tuple_size;
1405 161364 : ++hashtable->batches[batchno].ntuples;
1406 161364 : ++hashtable->batches[i].old_ntuples;
1407 :
1408 : /* Store the tuple its new batch. */
1409 161364 : sts_puttuple(hashtable->batches[batchno].inner_tuples,
1410 : &hashvalue, tuple);
1411 :
1412 161364 : CHECK_FOR_INTERRUPTS();
1413 : }
1414 36 : sts_end_parallel_scan(old_inner_tuples[i]);
1415 : }
1416 :
1417 48 : pfree(old_inner_tuples);
1418 48 : }
1419 :
1420 : /*
1421 : * Transfer the backend-local per-batch counters to the shared totals.
1422 : */
1423 : static void
1424 288 : ExecParallelHashMergeCounters(HashJoinTable hashtable)
1425 : {
1426 288 : ParallelHashJoinState *pstate = hashtable->parallel_state;
1427 : int i;
1428 :
1429 288 : LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
1430 288 : pstate->total_tuples = 0;
1431 1650 : for (i = 0; i < hashtable->nbatch; ++i)
1432 : {
1433 1362 : ParallelHashJoinBatchAccessor *batch = &hashtable->batches[i];
1434 :
1435 1362 : batch->shared->size += batch->size;
1436 1362 : batch->shared->estimated_size += batch->estimated_size;
1437 1362 : batch->shared->ntuples += batch->ntuples;
1438 1362 : batch->shared->old_ntuples += batch->old_ntuples;
1439 1362 : batch->size = 0;
1440 1362 : batch->estimated_size = 0;
1441 1362 : batch->ntuples = 0;
1442 1362 : batch->old_ntuples = 0;
1443 1362 : pstate->total_tuples += batch->shared->ntuples;
1444 : }
1445 288 : LWLockRelease(&pstate->lock);
1446 288 : }
1447 :
1448 : /*
1449 : * ExecHashIncreaseNumBuckets
1450 : * increase the original number of buckets in order to reduce
1451 : * number of tuples per bucket
1452 : */
1453 : static void
1454 88 : ExecHashIncreaseNumBuckets(HashJoinTable hashtable)
1455 : {
1456 : HashMemoryChunk chunk;
1457 :
1458 : /* do nothing if not an increase (it's called increase for a reason) */
1459 88 : if (hashtable->nbuckets >= hashtable->nbuckets_optimal)
1460 0 : return;
1461 :
1462 : #ifdef HJDEBUG
1463 : printf("Hashjoin %p: increasing nbuckets %d => %d\n",
1464 : hashtable, hashtable->nbuckets, hashtable->nbuckets_optimal);
1465 : #endif
1466 :
1467 88 : hashtable->nbuckets = hashtable->nbuckets_optimal;
1468 88 : hashtable->log2_nbuckets = hashtable->log2_nbuckets_optimal;
1469 :
1470 : Assert(hashtable->nbuckets > 1);
1471 : Assert(hashtable->nbuckets <= (INT_MAX / 2));
1472 : Assert(hashtable->nbuckets == (1 << hashtable->log2_nbuckets));
1473 :
1474 : /*
1475 : * Just reallocate the proper number of buckets - we don't need to walk
1476 : * through them - we can walk the dense-allocated chunks (just like in
1477 : * ExecHashIncreaseNumBatches, but without all the copying into new
1478 : * chunks)
1479 : */
1480 88 : hashtable->buckets.unshared =
1481 88 : repalloc_array(hashtable->buckets.unshared,
1482 : HashJoinTuple, hashtable->nbuckets);
1483 :
1484 88 : memset(hashtable->buckets.unshared, 0,
1485 88 : hashtable->nbuckets * sizeof(HashJoinTuple));
1486 :
1487 : /* scan through all tuples in all chunks to rebuild the hash table */
1488 1072 : for (chunk = hashtable->chunks; chunk != NULL; chunk = chunk->next.unshared)
1489 : {
1490 : /* process all tuples stored in this chunk */
1491 984 : size_t idx = 0;
1492 :
1493 754240 : while (idx < chunk->used)
1494 : {
1495 753256 : HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
1496 : int bucketno;
1497 : int batchno;
1498 :
1499 753256 : ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
1500 : &bucketno, &batchno);
1501 :
1502 : /* add the tuple to the proper bucket */
1503 753256 : hashTuple->next.unshared = hashtable->buckets.unshared[bucketno];
1504 753256 : hashtable->buckets.unshared[bucketno] = hashTuple;
1505 :
1506 : /* advance index past the tuple */
1507 753256 : idx += MAXALIGN(HJTUPLE_OVERHEAD +
1508 : HJTUPLE_MINTUPLE(hashTuple)->t_len);
1509 : }
1510 :
1511 : /* allow this loop to be cancellable */
1512 984 : CHECK_FOR_INTERRUPTS();
1513 : }
1514 : }
1515 :
1516 : static void
1517 144 : ExecParallelHashIncreaseNumBuckets(HashJoinTable hashtable)
1518 : {
1519 144 : ParallelHashJoinState *pstate = hashtable->parallel_state;
1520 : int i;
1521 : HashMemoryChunk chunk;
1522 : dsa_pointer chunk_s;
1523 :
1524 : Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASH_INNER);
1525 :
1526 : /*
1527 : * It's unlikely, but we need to be prepared for new participants to show
1528 : * up while we're in the middle of this operation so we need to switch on
1529 : * barrier phase here.
1530 : */
1531 144 : switch (PHJ_GROW_BUCKETS_PHASE(BarrierPhase(&pstate->grow_buckets_barrier)))
1532 : {
1533 144 : case PHJ_GROW_BUCKETS_ELECT:
1534 : /* Elect one participant to prepare to increase nbuckets. */
1535 144 : if (BarrierArriveAndWait(&pstate->grow_buckets_barrier,
1536 : WAIT_EVENT_HASH_GROW_BUCKETS_ELECT))
1537 : {
1538 : size_t size;
1539 : dsa_pointer_atomic *buckets;
1540 :
1541 : /* Double the size of the bucket array. */
1542 108 : pstate->nbuckets *= 2;
1543 108 : size = pstate->nbuckets * sizeof(dsa_pointer_atomic);
1544 108 : hashtable->batches[0].shared->size += size / 2;
1545 108 : dsa_free(hashtable->area, hashtable->batches[0].shared->buckets);
1546 216 : hashtable->batches[0].shared->buckets =
1547 108 : dsa_allocate(hashtable->area, size);
1548 : buckets = (dsa_pointer_atomic *)
1549 108 : dsa_get_address(hashtable->area,
1550 108 : hashtable->batches[0].shared->buckets);
1551 933996 : for (i = 0; i < pstate->nbuckets; ++i)
1552 933888 : dsa_pointer_atomic_init(&buckets[i], InvalidDsaPointer);
1553 :
1554 : /* Put the chunk list onto the work queue. */
1555 108 : pstate->chunk_work_queue = hashtable->batches[0].shared->chunks;
1556 :
1557 : /* Clear the flag. */
1558 108 : pstate->growth = PHJ_GROWTH_OK;
1559 : }
1560 : /* Fall through. */
1561 :
1562 : case PHJ_GROW_BUCKETS_REALLOCATE:
1563 : /* Wait for the above to complete. */
1564 144 : BarrierArriveAndWait(&pstate->grow_buckets_barrier,
1565 : WAIT_EVENT_HASH_GROW_BUCKETS_REALLOCATE);
1566 : /* Fall through. */
1567 :
1568 144 : case PHJ_GROW_BUCKETS_REINSERT:
1569 : /* Reinsert all tuples into the hash table. */
1570 144 : ExecParallelHashEnsureBatchAccessors(hashtable);
1571 144 : ExecParallelHashTableSetCurrentBatch(hashtable, 0);
1572 806 : while ((chunk = ExecParallelHashPopChunkQueue(hashtable, &chunk_s)))
1573 : {
1574 662 : size_t idx = 0;
1575 :
1576 542178 : while (idx < chunk->used)
1577 : {
1578 541516 : HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
1579 541516 : dsa_pointer shared = chunk_s + HASH_CHUNK_HEADER_SIZE + idx;
1580 : int bucketno;
1581 : int batchno;
1582 :
1583 541516 : ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
1584 : &bucketno, &batchno);
1585 : Assert(batchno == 0);
1586 :
1587 : /* add the tuple to the proper bucket */
1588 541516 : ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
1589 : hashTuple, shared);
1590 :
1591 : /* advance index past the tuple */
1592 541516 : idx += MAXALIGN(HJTUPLE_OVERHEAD +
1593 : HJTUPLE_MINTUPLE(hashTuple)->t_len);
1594 : }
1595 :
1596 : /* allow this loop to be cancellable */
1597 662 : CHECK_FOR_INTERRUPTS();
1598 : }
1599 144 : BarrierArriveAndWait(&pstate->grow_buckets_barrier,
1600 : WAIT_EVENT_HASH_GROW_BUCKETS_REINSERT);
1601 : }
1602 144 : }
1603 :
1604 : /*
1605 : * ExecHashTableInsert
1606 : * insert a tuple into the hash table depending on the hash value
1607 : * it may just go to a temp file for later batches
1608 : *
1609 : * Note: the passed TupleTableSlot may contain a regular, minimal, or virtual
1610 : * tuple; the minimal case in particular is certain to happen while reloading
1611 : * tuples from batch files. We could save some cycles in the regular-tuple
1612 : * case by not forcing the slot contents into minimal form; not clear if it's
1613 : * worth the messiness required.
1614 : */
1615 : void
1616 9899426 : ExecHashTableInsert(HashJoinTable hashtable,
1617 : TupleTableSlot *slot,
1618 : uint32 hashvalue)
1619 : {
1620 : bool shouldFree;
1621 9899426 : MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
1622 : int bucketno;
1623 : int batchno;
1624 :
1625 9899426 : ExecHashGetBucketAndBatch(hashtable, hashvalue,
1626 : &bucketno, &batchno);
1627 :
1628 : /*
1629 : * decide whether to put the tuple in the hash table or a temp file
1630 : */
1631 9899426 : if (batchno == hashtable->curbatch)
1632 : {
1633 : /*
1634 : * put the tuple in hash table
1635 : */
1636 : HashJoinTuple hashTuple;
1637 : int hashTupleSize;
1638 7554522 : double ntuples = (hashtable->totalTuples - hashtable->skewTuples);
1639 :
1640 : /* Create the HashJoinTuple */
1641 7554522 : hashTupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
1642 7554522 : hashTuple = (HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
1643 :
1644 7554522 : hashTuple->hashvalue = hashvalue;
1645 7554522 : memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
1646 :
1647 : /*
1648 : * We always reset the tuple-matched flag on insertion. This is okay
1649 : * even when reloading a tuple from a batch file, since the tuple
1650 : * could not possibly have been matched to an outer tuple before it
1651 : * went into the batch file.
1652 : */
1653 7554522 : HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
1654 :
1655 : /* Push it onto the front of the bucket's list */
1656 7554522 : hashTuple->next.unshared = hashtable->buckets.unshared[bucketno];
1657 7554522 : hashtable->buckets.unshared[bucketno] = hashTuple;
1658 :
1659 : /*
1660 : * Increase the (optimal) number of buckets if we just exceeded the
1661 : * NTUP_PER_BUCKET threshold, but only when there's still a single
1662 : * batch.
1663 : */
1664 7554522 : if (hashtable->nbatch == 1 &&
1665 4882164 : ntuples > (hashtable->nbuckets_optimal * NTUP_PER_BUCKET))
1666 : {
1667 : /* Guard against integer overflow and alloc size overflow */
1668 208 : if (hashtable->nbuckets_optimal <= INT_MAX / 2 &&
1669 208 : hashtable->nbuckets_optimal * 2 <= MaxAllocSize / sizeof(HashJoinTuple))
1670 : {
1671 208 : hashtable->nbuckets_optimal *= 2;
1672 208 : hashtable->log2_nbuckets_optimal += 1;
1673 : }
1674 : }
1675 :
1676 : /* Account for space used, and back off if we've used too much */
1677 7554522 : hashtable->spaceUsed += hashTupleSize;
1678 7554522 : if (hashtable->spaceUsed > hashtable->spacePeak)
1679 5507350 : hashtable->spacePeak = hashtable->spaceUsed;
1680 7554522 : if (hashtable->spaceUsed +
1681 7554522 : hashtable->nbuckets_optimal * sizeof(HashJoinTuple)
1682 7554522 : > hashtable->spaceAllowed)
1683 483690 : ExecHashIncreaseNumBatches(hashtable);
1684 : }
1685 : else
1686 : {
1687 : /*
1688 : * put the tuple into a temp file for later batches
1689 : */
1690 : Assert(batchno > hashtable->curbatch);
1691 2344904 : ExecHashJoinSaveTuple(tuple,
1692 : hashvalue,
1693 2344904 : &hashtable->innerBatchFile[batchno],
1694 : hashtable);
1695 : }
1696 :
1697 9899426 : if (shouldFree)
1698 7302092 : heap_free_minimal_tuple(tuple);
1699 9899426 : }
1700 :
1701 : /*
1702 : * ExecParallelHashTableInsert
1703 : * insert a tuple into a shared hash table or shared batch tuplestore
1704 : */
1705 : void
1706 2160120 : ExecParallelHashTableInsert(HashJoinTable hashtable,
1707 : TupleTableSlot *slot,
1708 : uint32 hashvalue)
1709 : {
1710 : bool shouldFree;
1711 2160120 : MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
1712 : dsa_pointer shared;
1713 : int bucketno;
1714 : int batchno;
1715 :
1716 2160468 : retry:
1717 2160468 : ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
1718 :
1719 2160468 : if (batchno == 0)
1720 : {
1721 : HashJoinTuple hashTuple;
1722 :
1723 : /* Try to load it into memory. */
1724 : Assert(BarrierPhase(&hashtable->parallel_state->build_barrier) ==
1725 : PHJ_BUILD_HASH_INNER);
1726 1247340 : hashTuple = ExecParallelHashTupleAlloc(hashtable,
1727 1247340 : HJTUPLE_OVERHEAD + tuple->t_len,
1728 : &shared);
1729 1247340 : if (hashTuple == NULL)
1730 324 : goto retry;
1731 :
1732 : /* Store the hash value in the HashJoinTuple header. */
1733 1247016 : hashTuple->hashvalue = hashvalue;
1734 1247016 : memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
1735 1247016 : HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
1736 :
1737 : /* Push it onto the front of the bucket's list */
1738 1247016 : ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
1739 : hashTuple, shared);
1740 : }
1741 : else
1742 : {
1743 913128 : size_t tuple_size = MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
1744 :
1745 : Assert(batchno > 0);
1746 :
1747 : /* Try to preallocate space in the batch if necessary. */
1748 913128 : if (hashtable->batches[batchno].preallocated < tuple_size)
1749 : {
1750 1690 : if (!ExecParallelHashTuplePrealloc(hashtable, batchno, tuple_size))
1751 24 : goto retry;
1752 : }
1753 :
1754 : Assert(hashtable->batches[batchno].preallocated >= tuple_size);
1755 913104 : hashtable->batches[batchno].preallocated -= tuple_size;
1756 913104 : sts_puttuple(hashtable->batches[batchno].inner_tuples, &hashvalue,
1757 : tuple);
1758 : }
1759 2160120 : ++hashtable->batches[batchno].ntuples;
1760 :
1761 2160120 : if (shouldFree)
1762 2160120 : heap_free_minimal_tuple(tuple);
1763 2160120 : }
1764 :
1765 : /*
1766 : * Insert a tuple into the current hash table. Unlike
1767 : * ExecParallelHashTableInsert, this version is not prepared to send the tuple
1768 : * to other batches or to run out of memory, and should only be called with
1769 : * tuples that belong in the current batch once growth has been disabled.
1770 : */
1771 : void
1772 1082928 : ExecParallelHashTableInsertCurrentBatch(HashJoinTable hashtable,
1773 : TupleTableSlot *slot,
1774 : uint32 hashvalue)
1775 : {
1776 : bool shouldFree;
1777 1082928 : MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
1778 : HashJoinTuple hashTuple;
1779 : dsa_pointer shared;
1780 : int batchno;
1781 : int bucketno;
1782 :
1783 1082928 : ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
1784 : Assert(batchno == hashtable->curbatch);
1785 1082928 : hashTuple = ExecParallelHashTupleAlloc(hashtable,
1786 1082928 : HJTUPLE_OVERHEAD + tuple->t_len,
1787 : &shared);
1788 1082928 : hashTuple->hashvalue = hashvalue;
1789 1082928 : memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
1790 1082928 : HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
1791 1082928 : ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
1792 : hashTuple, shared);
1793 :
1794 1082928 : if (shouldFree)
1795 0 : heap_free_minimal_tuple(tuple);
1796 1082928 : }
1797 :
1798 : /*
1799 : * ExecHashGetHashValue
1800 : * Compute the hash value for a tuple
1801 : *
1802 : * The tuple to be tested must be in econtext->ecxt_outertuple (thus Vars in
1803 : * the hashkeys expressions need to have OUTER_VAR as varno). If outer_tuple
1804 : * is false (meaning it's the HashJoin's inner node, Hash), econtext,
1805 : * hashkeys, and slot need to be from Hash, with hashkeys/slot referencing and
1806 : * being suitable for tuples from the node below the Hash. Conversely, if
1807 : * outer_tuple is true, econtext is from HashJoin, and hashkeys/slot need to
1808 : * be appropriate for tuples from HashJoin's outer node.
1809 : *
1810 : * A true result means the tuple's hash value has been successfully computed
1811 : * and stored at *hashvalue. A false result means the tuple cannot match
1812 : * because it contains a null attribute, and hence it should be discarded
1813 : * immediately. (If keep_nulls is true then false is never returned.)
1814 : */
1815 : bool
1816 24060676 : ExecHashGetHashValue(HashJoinTable hashtable,
1817 : ExprContext *econtext,
1818 : List *hashkeys,
1819 : bool outer_tuple,
1820 : bool keep_nulls,
1821 : uint32 *hashvalue)
1822 : {
1823 24060676 : uint32 hashkey = 0;
1824 : FmgrInfo *hashfunctions;
1825 : ListCell *hk;
1826 24060676 : int i = 0;
1827 : MemoryContext oldContext;
1828 :
1829 : /*
1830 : * We reset the eval context each time to reclaim any memory leaked in the
1831 : * hashkey expressions.
1832 : */
1833 24060676 : ResetExprContext(econtext);
1834 :
1835 24060676 : oldContext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory);
1836 :
1837 24060676 : if (outer_tuple)
1838 14562240 : hashfunctions = hashtable->outer_hashfunctions;
1839 : else
1840 9498436 : hashfunctions = hashtable->inner_hashfunctions;
1841 :
1842 49804666 : foreach(hk, hashkeys)
1843 : {
1844 25744816 : ExprState *keyexpr = (ExprState *) lfirst(hk);
1845 : Datum keyval;
1846 : bool isNull;
1847 :
1848 : /* combine successive hashkeys by rotating */
1849 25744816 : hashkey = pg_rotate_left32(hashkey, 1);
1850 :
1851 : /*
1852 : * Get the join attribute value of the tuple
1853 : */
1854 25744816 : keyval = ExecEvalExpr(keyexpr, econtext, &isNull);
1855 :
1856 : /*
1857 : * If the attribute is NULL, and the join operator is strict, then
1858 : * this tuple cannot pass the join qual so we can reject it
1859 : * immediately (unless we're scanning the outside of an outer join, in
1860 : * which case we must not reject it). Otherwise we act like the
1861 : * hashcode of NULL is zero (this will support operators that act like
1862 : * IS NOT DISTINCT, though not any more-random behavior). We treat
1863 : * the hash support function as strict even if the operator is not.
1864 : *
1865 : * Note: currently, all hashjoinable operators must be strict since
1866 : * the hash index AM assumes that. However, it takes so little extra
1867 : * code here to allow non-strict that we may as well do it.
1868 : */
1869 25744816 : if (isNull)
1870 : {
1871 1058 : if (hashtable->hashStrict[i] && !keep_nulls)
1872 : {
1873 826 : MemoryContextSwitchTo(oldContext);
1874 826 : return false; /* cannot match */
1875 : }
1876 : /* else, leave hashkey unmodified, equivalent to hashcode 0 */
1877 : }
1878 : else
1879 : {
1880 : /* Compute the hash function */
1881 : uint32 hkey;
1882 :
1883 25743758 : hkey = DatumGetUInt32(FunctionCall1Coll(&hashfunctions[i], hashtable->collations[i], keyval));
1884 25743758 : hashkey ^= hkey;
1885 : }
1886 :
1887 25743990 : i++;
1888 : }
1889 :
1890 24059850 : MemoryContextSwitchTo(oldContext);
1891 :
1892 24059850 : *hashvalue = hashkey;
1893 24059850 : return true;
1894 : }
1895 :
1896 : /*
1897 : * ExecHashGetBucketAndBatch
1898 : * Determine the bucket number and batch number for a hash value
1899 : *
1900 : * Note: on-the-fly increases of nbatch must not change the bucket number
1901 : * for a given hash code (since we don't move tuples to different hash
1902 : * chains), and must only cause the batch number to remain the same or
1903 : * increase. Our algorithm is
1904 : * bucketno = hashvalue MOD nbuckets
1905 : * batchno = ROR(hashvalue, log2_nbuckets) MOD nbatch
1906 : * where nbuckets and nbatch are both expected to be powers of 2, so we can
1907 : * do the computations by shifting and masking. (This assumes that all hash
1908 : * functions are good about randomizing all their output bits, else we are
1909 : * likely to have very skewed bucket or batch occupancy.)
1910 : *
1911 : * nbuckets and log2_nbuckets may change while nbatch == 1 because of dynamic
1912 : * bucket count growth. Once we start batching, the value is fixed and does
1913 : * not change over the course of the join (making it possible to compute batch
1914 : * number the way we do here).
1915 : *
1916 : * nbatch is always a power of 2; we increase it only by doubling it. This
1917 : * effectively adds one more bit to the top of the batchno. In very large
1918 : * joins, we might run out of bits to add, so we do this by rotating the hash
1919 : * value. This causes batchno to steal bits from bucketno when the number of
1920 : * virtual buckets exceeds 2^32. It's better to have longer bucket chains
1921 : * than to lose the ability to divide batches.
1922 : */
1923 : void
1924 32405872 : ExecHashGetBucketAndBatch(HashJoinTable hashtable,
1925 : uint32 hashvalue,
1926 : int *bucketno,
1927 : int *batchno)
1928 : {
1929 32405872 : uint32 nbuckets = (uint32) hashtable->nbuckets;
1930 32405872 : uint32 nbatch = (uint32) hashtable->nbatch;
1931 :
1932 32405872 : if (nbatch > 1)
1933 : {
1934 13600230 : *bucketno = hashvalue & (nbuckets - 1);
1935 13600230 : *batchno = pg_rotate_right32(hashvalue,
1936 13600230 : hashtable->log2_nbuckets) & (nbatch - 1);
1937 : }
1938 : else
1939 : {
1940 18805642 : *bucketno = hashvalue & (nbuckets - 1);
1941 18805642 : *batchno = 0;
1942 : }
1943 32405872 : }
1944 :
1945 : /*
1946 : * ExecScanHashBucket
1947 : * scan a hash bucket for matches to the current outer tuple
1948 : *
1949 : * The current outer tuple must be stored in econtext->ecxt_outertuple.
1950 : *
1951 : * On success, the inner tuple is stored into hjstate->hj_CurTuple and
1952 : * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
1953 : * for the latter.
1954 : */
1955 : bool
1956 16809230 : ExecScanHashBucket(HashJoinState *hjstate,
1957 : ExprContext *econtext)
1958 : {
1959 16809230 : ExprState *hjclauses = hjstate->hashclauses;
1960 16809230 : HashJoinTable hashtable = hjstate->hj_HashTable;
1961 16809230 : HashJoinTuple hashTuple = hjstate->hj_CurTuple;
1962 16809230 : uint32 hashvalue = hjstate->hj_CurHashValue;
1963 :
1964 : /*
1965 : * hj_CurTuple is the address of the tuple last returned from the current
1966 : * bucket, or NULL if it's time to start scanning a new bucket.
1967 : *
1968 : * If the tuple hashed to a skew bucket then scan the skew bucket
1969 : * otherwise scan the standard hashtable bucket.
1970 : */
1971 16809230 : if (hashTuple != NULL)
1972 4407834 : hashTuple = hashTuple->next.unshared;
1973 12401396 : else if (hjstate->hj_CurSkewBucketNo != INVALID_SKEW_BUCKET_NO)
1974 2400 : hashTuple = hashtable->skewBucket[hjstate->hj_CurSkewBucketNo]->tuples;
1975 : else
1976 12398996 : hashTuple = hashtable->buckets.unshared[hjstate->hj_CurBucketNo];
1977 :
1978 20816454 : while (hashTuple != NULL)
1979 : {
1980 11627622 : if (hashTuple->hashvalue == hashvalue)
1981 : {
1982 : TupleTableSlot *inntuple;
1983 :
1984 : /* insert hashtable's tuple into exec slot so ExecQual sees it */
1985 7620404 : inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
1986 : hjstate->hj_HashTupleSlot,
1987 : false); /* do not pfree */
1988 7620404 : econtext->ecxt_innertuple = inntuple;
1989 :
1990 7620404 : if (ExecQualAndReset(hjclauses, econtext))
1991 : {
1992 7620398 : hjstate->hj_CurTuple = hashTuple;
1993 7620398 : return true;
1994 : }
1995 : }
1996 :
1997 4007224 : hashTuple = hashTuple->next.unshared;
1998 : }
1999 :
2000 : /*
2001 : * no match
2002 : */
2003 9188832 : return false;
2004 : }
2005 :
2006 : /*
2007 : * ExecParallelScanHashBucket
2008 : * scan a hash bucket for matches to the current outer tuple
2009 : *
2010 : * The current outer tuple must be stored in econtext->ecxt_outertuple.
2011 : *
2012 : * On success, the inner tuple is stored into hjstate->hj_CurTuple and
2013 : * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
2014 : * for the latter.
2015 : */
2016 : bool
2017 4200054 : ExecParallelScanHashBucket(HashJoinState *hjstate,
2018 : ExprContext *econtext)
2019 : {
2020 4200054 : ExprState *hjclauses = hjstate->hashclauses;
2021 4200054 : HashJoinTable hashtable = hjstate->hj_HashTable;
2022 4200054 : HashJoinTuple hashTuple = hjstate->hj_CurTuple;
2023 4200054 : uint32 hashvalue = hjstate->hj_CurHashValue;
2024 :
2025 : /*
2026 : * hj_CurTuple is the address of the tuple last returned from the current
2027 : * bucket, or NULL if it's time to start scanning a new bucket.
2028 : */
2029 4200054 : if (hashTuple != NULL)
2030 2040024 : hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
2031 : else
2032 2160030 : hashTuple = ExecParallelHashFirstTuple(hashtable,
2033 : hjstate->hj_CurBucketNo);
2034 :
2035 5607210 : while (hashTuple != NULL)
2036 : {
2037 3447180 : if (hashTuple->hashvalue == hashvalue)
2038 : {
2039 : TupleTableSlot *inntuple;
2040 :
2041 : /* insert hashtable's tuple into exec slot so ExecQual sees it */
2042 2040024 : inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
2043 : hjstate->hj_HashTupleSlot,
2044 : false); /* do not pfree */
2045 2040024 : econtext->ecxt_innertuple = inntuple;
2046 :
2047 2040024 : if (ExecQualAndReset(hjclauses, econtext))
2048 : {
2049 2040024 : hjstate->hj_CurTuple = hashTuple;
2050 2040024 : return true;
2051 : }
2052 : }
2053 :
2054 1407156 : hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
2055 : }
2056 :
2057 : /*
2058 : * no match
2059 : */
2060 2160030 : return false;
2061 : }
2062 :
2063 : /*
2064 : * ExecPrepHashTableForUnmatched
2065 : * set up for a series of ExecScanHashTableForUnmatched calls
2066 : */
2067 : void
2068 4714 : ExecPrepHashTableForUnmatched(HashJoinState *hjstate)
2069 : {
2070 : /*----------
2071 : * During this scan we use the HashJoinState fields as follows:
2072 : *
2073 : * hj_CurBucketNo: next regular bucket to scan
2074 : * hj_CurSkewBucketNo: next skew bucket (an index into skewBucketNums)
2075 : * hj_CurTuple: last tuple returned, or NULL to start next bucket
2076 : *----------
2077 : */
2078 4714 : hjstate->hj_CurBucketNo = 0;
2079 4714 : hjstate->hj_CurSkewBucketNo = 0;
2080 4714 : hjstate->hj_CurTuple = NULL;
2081 4714 : }
2082 :
2083 : /*
2084 : * Decide if this process is allowed to run the unmatched scan. If so, the
2085 : * batch barrier is advanced to PHJ_BATCH_SCAN and true is returned.
2086 : * Otherwise the batch is detached and false is returned.
2087 : */
2088 : bool
2089 68 : ExecParallelPrepHashTableForUnmatched(HashJoinState *hjstate)
2090 : {
2091 68 : HashJoinTable hashtable = hjstate->hj_HashTable;
2092 68 : int curbatch = hashtable->curbatch;
2093 68 : ParallelHashJoinBatch *batch = hashtable->batches[curbatch].shared;
2094 :
2095 : Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE);
2096 :
2097 : /*
2098 : * It would not be deadlock-free to wait on the batch barrier, because it
2099 : * is in PHJ_BATCH_PROBE phase, and thus processes attached to it have
2100 : * already emitted tuples. Therefore, we'll hold a wait-free election:
2101 : * only one process can continue to the next phase, and all others detach
2102 : * from this batch. They can still go any work on other batches, if there
2103 : * are any.
2104 : */
2105 68 : if (!BarrierArriveAndDetachExceptLast(&batch->batch_barrier))
2106 : {
2107 : /* This process considers the batch to be done. */
2108 2 : hashtable->batches[hashtable->curbatch].done = true;
2109 :
2110 : /* Make sure any temporary files are closed. */
2111 2 : sts_end_parallel_scan(hashtable->batches[curbatch].inner_tuples);
2112 2 : sts_end_parallel_scan(hashtable->batches[curbatch].outer_tuples);
2113 :
2114 : /*
2115 : * Track largest batch we've seen, which would normally happen in
2116 : * ExecHashTableDetachBatch().
2117 : */
2118 2 : hashtable->spacePeak =
2119 2 : Max(hashtable->spacePeak,
2120 : batch->size + sizeof(dsa_pointer_atomic) * hashtable->nbuckets);
2121 2 : hashtable->curbatch = -1;
2122 2 : return false;
2123 : }
2124 :
2125 : /* Now we are alone with this batch. */
2126 : Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_SCAN);
2127 :
2128 : /*
2129 : * Has another process decided to give up early and command all processes
2130 : * to skip the unmatched scan?
2131 : */
2132 66 : if (batch->skip_unmatched)
2133 : {
2134 0 : hashtable->batches[hashtable->curbatch].done = true;
2135 0 : ExecHashTableDetachBatch(hashtable);
2136 0 : return false;
2137 : }
2138 :
2139 : /* Now prepare the process local state, just as for non-parallel join. */
2140 66 : ExecPrepHashTableForUnmatched(hjstate);
2141 :
2142 66 : return true;
2143 : }
2144 :
2145 : /*
2146 : * ExecScanHashTableForUnmatched
2147 : * scan the hash table for unmatched inner tuples
2148 : *
2149 : * On success, the inner tuple is stored into hjstate->hj_CurTuple and
2150 : * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
2151 : * for the latter.
2152 : */
2153 : bool
2154 312106 : ExecScanHashTableForUnmatched(HashJoinState *hjstate, ExprContext *econtext)
2155 : {
2156 312106 : HashJoinTable hashtable = hjstate->hj_HashTable;
2157 312106 : HashJoinTuple hashTuple = hjstate->hj_CurTuple;
2158 :
2159 : for (;;)
2160 : {
2161 : /*
2162 : * hj_CurTuple is the address of the tuple last returned from the
2163 : * current bucket, or NULL if it's time to start scanning a new
2164 : * bucket.
2165 : */
2166 6064938 : if (hashTuple != NULL)
2167 307458 : hashTuple = hashTuple->next.unshared;
2168 5757480 : else if (hjstate->hj_CurBucketNo < hashtable->nbuckets)
2169 : {
2170 5752838 : hashTuple = hashtable->buckets.unshared[hjstate->hj_CurBucketNo];
2171 5752838 : hjstate->hj_CurBucketNo++;
2172 : }
2173 4642 : else if (hjstate->hj_CurSkewBucketNo < hashtable->nSkewBuckets)
2174 : {
2175 0 : int j = hashtable->skewBucketNums[hjstate->hj_CurSkewBucketNo];
2176 :
2177 0 : hashTuple = hashtable->skewBucket[j]->tuples;
2178 0 : hjstate->hj_CurSkewBucketNo++;
2179 : }
2180 : else
2181 4642 : break; /* finished all buckets */
2182 :
2183 6426898 : while (hashTuple != NULL)
2184 : {
2185 674066 : if (!HeapTupleHeaderHasMatch(HJTUPLE_MINTUPLE(hashTuple)))
2186 : {
2187 : TupleTableSlot *inntuple;
2188 :
2189 : /* insert hashtable's tuple into exec slot */
2190 307464 : inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
2191 : hjstate->hj_HashTupleSlot,
2192 : false); /* do not pfree */
2193 307464 : econtext->ecxt_innertuple = inntuple;
2194 :
2195 : /*
2196 : * Reset temp memory each time; although this function doesn't
2197 : * do any qual eval, the caller will, so let's keep it
2198 : * parallel to ExecScanHashBucket.
2199 : */
2200 307464 : ResetExprContext(econtext);
2201 :
2202 307464 : hjstate->hj_CurTuple = hashTuple;
2203 307464 : return true;
2204 : }
2205 :
2206 366602 : hashTuple = hashTuple->next.unshared;
2207 : }
2208 :
2209 : /* allow this loop to be cancellable */
2210 5752832 : CHECK_FOR_INTERRUPTS();
2211 : }
2212 :
2213 : /*
2214 : * no more unmatched tuples
2215 : */
2216 4642 : return false;
2217 : }
2218 :
2219 : /*
2220 : * ExecParallelScanHashTableForUnmatched
2221 : * scan the hash table for unmatched inner tuples, in parallel join
2222 : *
2223 : * On success, the inner tuple is stored into hjstate->hj_CurTuple and
2224 : * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
2225 : * for the latter.
2226 : */
2227 : bool
2228 120072 : ExecParallelScanHashTableForUnmatched(HashJoinState *hjstate,
2229 : ExprContext *econtext)
2230 : {
2231 120072 : HashJoinTable hashtable = hjstate->hj_HashTable;
2232 120072 : HashJoinTuple hashTuple = hjstate->hj_CurTuple;
2233 :
2234 : for (;;)
2235 : {
2236 : /*
2237 : * hj_CurTuple is the address of the tuple last returned from the
2238 : * current bucket, or NULL if it's time to start scanning a new
2239 : * bucket.
2240 : */
2241 734472 : if (hashTuple != NULL)
2242 120006 : hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
2243 614466 : else if (hjstate->hj_CurBucketNo < hashtable->nbuckets)
2244 614400 : hashTuple = ExecParallelHashFirstTuple(hashtable,
2245 614400 : hjstate->hj_CurBucketNo++);
2246 : else
2247 66 : break; /* finished all buckets */
2248 :
2249 974406 : while (hashTuple != NULL)
2250 : {
2251 360006 : if (!HeapTupleHeaderHasMatch(HJTUPLE_MINTUPLE(hashTuple)))
2252 : {
2253 : TupleTableSlot *inntuple;
2254 :
2255 : /* insert hashtable's tuple into exec slot */
2256 120006 : inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
2257 : hjstate->hj_HashTupleSlot,
2258 : false); /* do not pfree */
2259 120006 : econtext->ecxt_innertuple = inntuple;
2260 :
2261 : /*
2262 : * Reset temp memory each time; although this function doesn't
2263 : * do any qual eval, the caller will, so let's keep it
2264 : * parallel to ExecScanHashBucket.
2265 : */
2266 120006 : ResetExprContext(econtext);
2267 :
2268 120006 : hjstate->hj_CurTuple = hashTuple;
2269 120006 : return true;
2270 : }
2271 :
2272 240000 : hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
2273 : }
2274 :
2275 : /* allow this loop to be cancellable */
2276 614400 : CHECK_FOR_INTERRUPTS();
2277 : }
2278 :
2279 : /*
2280 : * no more unmatched tuples
2281 : */
2282 66 : return false;
2283 : }
2284 :
2285 : /*
2286 : * ExecHashTableReset
2287 : *
2288 : * reset hash table header for new batch
2289 : */
2290 : void
2291 1254 : ExecHashTableReset(HashJoinTable hashtable)
2292 : {
2293 : MemoryContext oldcxt;
2294 1254 : int nbuckets = hashtable->nbuckets;
2295 :
2296 : /*
2297 : * Release all the hash buckets and tuples acquired in the prior pass, and
2298 : * reinitialize the context for a new pass.
2299 : */
2300 1254 : MemoryContextReset(hashtable->batchCxt);
2301 1254 : oldcxt = MemoryContextSwitchTo(hashtable->batchCxt);
2302 :
2303 : /* Reallocate and reinitialize the hash bucket headers. */
2304 1254 : hashtable->buckets.unshared = palloc0_array(HashJoinTuple, nbuckets);
2305 :
2306 1254 : hashtable->spaceUsed = 0;
2307 :
2308 1254 : MemoryContextSwitchTo(oldcxt);
2309 :
2310 : /* Forget the chunks (the memory was freed by the context reset above). */
2311 1254 : hashtable->chunks = NULL;
2312 1254 : }
2313 :
2314 : /*
2315 : * ExecHashTableResetMatchFlags
2316 : * Clear all the HeapTupleHeaderHasMatch flags in the table
2317 : */
2318 : void
2319 18 : ExecHashTableResetMatchFlags(HashJoinTable hashtable)
2320 : {
2321 : HashJoinTuple tuple;
2322 : int i;
2323 :
2324 : /* Reset all flags in the main table ... */
2325 18450 : for (i = 0; i < hashtable->nbuckets; i++)
2326 : {
2327 18600 : for (tuple = hashtable->buckets.unshared[i]; tuple != NULL;
2328 168 : tuple = tuple->next.unshared)
2329 168 : HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(tuple));
2330 : }
2331 :
2332 : /* ... and the same for the skew buckets, if any */
2333 18 : for (i = 0; i < hashtable->nSkewBuckets; i++)
2334 : {
2335 0 : int j = hashtable->skewBucketNums[i];
2336 0 : HashSkewBucket *skewBucket = hashtable->skewBucket[j];
2337 :
2338 0 : for (tuple = skewBucket->tuples; tuple != NULL; tuple = tuple->next.unshared)
2339 0 : HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(tuple));
2340 : }
2341 18 : }
2342 :
2343 :
2344 : void
2345 1148 : ExecReScanHash(HashState *node)
2346 : {
2347 1148 : PlanState *outerPlan = outerPlanState(node);
2348 :
2349 : /*
2350 : * if chgParam of subnode is not null then plan will be re-scanned by
2351 : * first ExecProcNode.
2352 : */
2353 1148 : if (outerPlan->chgParam == NULL)
2354 30 : ExecReScan(outerPlan);
2355 1148 : }
2356 :
2357 :
2358 : /*
2359 : * ExecHashBuildSkewHash
2360 : *
2361 : * Set up for skew optimization if we can identify the most common values
2362 : * (MCVs) of the outer relation's join key. We make a skew hash bucket
2363 : * for the hash value of each MCV, up to the number of slots allowed
2364 : * based on available memory.
2365 : */
2366 : static void
2367 110 : ExecHashBuildSkewHash(HashJoinTable hashtable, Hash *node, int mcvsToUse)
2368 : {
2369 : HeapTupleData *statsTuple;
2370 : AttStatsSlot sslot;
2371 :
2372 : /* Do nothing if planner didn't identify the outer relation's join key */
2373 110 : if (!OidIsValid(node->skewTable))
2374 72 : return;
2375 : /* Also, do nothing if we don't have room for at least one skew bucket */
2376 110 : if (mcvsToUse <= 0)
2377 0 : return;
2378 :
2379 : /*
2380 : * Try to find the MCV statistics for the outer relation's join key.
2381 : */
2382 110 : statsTuple = SearchSysCache3(STATRELATTINH,
2383 : ObjectIdGetDatum(node->skewTable),
2384 110 : Int16GetDatum(node->skewColumn),
2385 110 : BoolGetDatum(node->skewInherit));
2386 110 : if (!HeapTupleIsValid(statsTuple))
2387 72 : return;
2388 :
2389 38 : if (get_attstatsslot(&sslot, statsTuple,
2390 : STATISTIC_KIND_MCV, InvalidOid,
2391 : ATTSTATSSLOT_VALUES | ATTSTATSSLOT_NUMBERS))
2392 : {
2393 : double frac;
2394 : int nbuckets;
2395 : FmgrInfo *hashfunctions;
2396 : int i;
2397 :
2398 6 : if (mcvsToUse > sslot.nvalues)
2399 0 : mcvsToUse = sslot.nvalues;
2400 :
2401 : /*
2402 : * Calculate the expected fraction of outer relation that will
2403 : * participate in the skew optimization. If this isn't at least
2404 : * SKEW_MIN_OUTER_FRACTION, don't use skew optimization.
2405 : */
2406 6 : frac = 0;
2407 132 : for (i = 0; i < mcvsToUse; i++)
2408 126 : frac += sslot.numbers[i];
2409 6 : if (frac < SKEW_MIN_OUTER_FRACTION)
2410 : {
2411 0 : free_attstatsslot(&sslot);
2412 0 : ReleaseSysCache(statsTuple);
2413 0 : return;
2414 : }
2415 :
2416 : /*
2417 : * Okay, set up the skew hashtable.
2418 : *
2419 : * skewBucket[] is an open addressing hashtable with a power of 2 size
2420 : * that is greater than the number of MCV values. (This ensures there
2421 : * will be at least one null entry, so searches will always
2422 : * terminate.)
2423 : *
2424 : * Note: this code could fail if mcvsToUse exceeds INT_MAX/8 or
2425 : * MaxAllocSize/sizeof(void *)/8, but that is not currently possible
2426 : * since we limit pg_statistic entries to much less than that.
2427 : */
2428 6 : nbuckets = pg_nextpower2_32(mcvsToUse + 1);
2429 : /* use two more bits just to help avoid collisions */
2430 6 : nbuckets <<= 2;
2431 :
2432 6 : hashtable->skewEnabled = true;
2433 6 : hashtable->skewBucketLen = nbuckets;
2434 :
2435 : /*
2436 : * We allocate the bucket memory in the hashtable's batch context. It
2437 : * is only needed during the first batch, and this ensures it will be
2438 : * automatically removed once the first batch is done.
2439 : */
2440 6 : hashtable->skewBucket = (HashSkewBucket **)
2441 6 : MemoryContextAllocZero(hashtable->batchCxt,
2442 : nbuckets * sizeof(HashSkewBucket *));
2443 6 : hashtable->skewBucketNums = (int *)
2444 6 : MemoryContextAllocZero(hashtable->batchCxt,
2445 : mcvsToUse * sizeof(int));
2446 :
2447 6 : hashtable->spaceUsed += nbuckets * sizeof(HashSkewBucket *)
2448 6 : + mcvsToUse * sizeof(int);
2449 6 : hashtable->spaceUsedSkew += nbuckets * sizeof(HashSkewBucket *)
2450 6 : + mcvsToUse * sizeof(int);
2451 6 : if (hashtable->spaceUsed > hashtable->spacePeak)
2452 6 : hashtable->spacePeak = hashtable->spaceUsed;
2453 :
2454 : /*
2455 : * Create a skew bucket for each MCV hash value.
2456 : *
2457 : * Note: it is very important that we create the buckets in order of
2458 : * decreasing MCV frequency. If we have to remove some buckets, they
2459 : * must be removed in reverse order of creation (see notes in
2460 : * ExecHashRemoveNextSkewBucket) and we want the least common MCVs to
2461 : * be removed first.
2462 : */
2463 6 : hashfunctions = hashtable->outer_hashfunctions;
2464 :
2465 132 : for (i = 0; i < mcvsToUse; i++)
2466 : {
2467 : uint32 hashvalue;
2468 : int bucket;
2469 :
2470 126 : hashvalue = DatumGetUInt32(FunctionCall1Coll(&hashfunctions[0],
2471 126 : hashtable->collations[0],
2472 126 : sslot.values[i]));
2473 :
2474 : /*
2475 : * While we have not hit a hole in the hashtable and have not hit
2476 : * the desired bucket, we have collided with some previous hash
2477 : * value, so try the next bucket location. NB: this code must
2478 : * match ExecHashGetSkewBucket.
2479 : */
2480 126 : bucket = hashvalue & (nbuckets - 1);
2481 126 : while (hashtable->skewBucket[bucket] != NULL &&
2482 0 : hashtable->skewBucket[bucket]->hashvalue != hashvalue)
2483 0 : bucket = (bucket + 1) & (nbuckets - 1);
2484 :
2485 : /*
2486 : * If we found an existing bucket with the same hashvalue, leave
2487 : * it alone. It's okay for two MCVs to share a hashvalue.
2488 : */
2489 126 : if (hashtable->skewBucket[bucket] != NULL)
2490 0 : continue;
2491 :
2492 : /* Okay, create a new skew bucket for this hashvalue. */
2493 252 : hashtable->skewBucket[bucket] = (HashSkewBucket *)
2494 126 : MemoryContextAlloc(hashtable->batchCxt,
2495 : sizeof(HashSkewBucket));
2496 126 : hashtable->skewBucket[bucket]->hashvalue = hashvalue;
2497 126 : hashtable->skewBucket[bucket]->tuples = NULL;
2498 126 : hashtable->skewBucketNums[hashtable->nSkewBuckets] = bucket;
2499 126 : hashtable->nSkewBuckets++;
2500 126 : hashtable->spaceUsed += SKEW_BUCKET_OVERHEAD;
2501 126 : hashtable->spaceUsedSkew += SKEW_BUCKET_OVERHEAD;
2502 126 : if (hashtable->spaceUsed > hashtable->spacePeak)
2503 126 : hashtable->spacePeak = hashtable->spaceUsed;
2504 : }
2505 :
2506 6 : free_attstatsslot(&sslot);
2507 : }
2508 :
2509 38 : ReleaseSysCache(statsTuple);
2510 : }
2511 :
2512 : /*
2513 : * ExecHashGetSkewBucket
2514 : *
2515 : * Returns the index of the skew bucket for this hashvalue,
2516 : * or INVALID_SKEW_BUCKET_NO if the hashvalue is not
2517 : * associated with any active skew bucket.
2518 : */
2519 : int
2520 23369922 : ExecHashGetSkewBucket(HashJoinTable hashtable, uint32 hashvalue)
2521 : {
2522 : int bucket;
2523 :
2524 : /*
2525 : * Always return INVALID_SKEW_BUCKET_NO if not doing skew optimization (in
2526 : * particular, this happens after the initial batch is done).
2527 : */
2528 23369922 : if (!hashtable->skewEnabled)
2529 23249922 : return INVALID_SKEW_BUCKET_NO;
2530 :
2531 : /*
2532 : * Since skewBucketLen is a power of 2, we can do a modulo by ANDing.
2533 : */
2534 120000 : bucket = hashvalue & (hashtable->skewBucketLen - 1);
2535 :
2536 : /*
2537 : * While we have not hit a hole in the hashtable and have not hit the
2538 : * desired bucket, we have collided with some other hash value, so try the
2539 : * next bucket location.
2540 : */
2541 127830 : while (hashtable->skewBucket[bucket] != NULL &&
2542 10818 : hashtable->skewBucket[bucket]->hashvalue != hashvalue)
2543 7830 : bucket = (bucket + 1) & (hashtable->skewBucketLen - 1);
2544 :
2545 : /*
2546 : * Found the desired bucket?
2547 : */
2548 120000 : if (hashtable->skewBucket[bucket] != NULL)
2549 2988 : return bucket;
2550 :
2551 : /*
2552 : * There must not be any hashtable entry for this hash value.
2553 : */
2554 117012 : return INVALID_SKEW_BUCKET_NO;
2555 : }
2556 :
2557 : /*
2558 : * ExecHashSkewTableInsert
2559 : *
2560 : * Insert a tuple into the skew hashtable.
2561 : *
2562 : * This should generally match up with the current-batch case in
2563 : * ExecHashTableInsert.
2564 : */
2565 : static void
2566 588 : ExecHashSkewTableInsert(HashJoinTable hashtable,
2567 : TupleTableSlot *slot,
2568 : uint32 hashvalue,
2569 : int bucketNumber)
2570 : {
2571 : bool shouldFree;
2572 588 : MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
2573 : HashJoinTuple hashTuple;
2574 : int hashTupleSize;
2575 :
2576 : /* Create the HashJoinTuple */
2577 588 : hashTupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
2578 588 : hashTuple = (HashJoinTuple) MemoryContextAlloc(hashtable->batchCxt,
2579 : hashTupleSize);
2580 588 : hashTuple->hashvalue = hashvalue;
2581 588 : memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
2582 588 : HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
2583 :
2584 : /* Push it onto the front of the skew bucket's list */
2585 588 : hashTuple->next.unshared = hashtable->skewBucket[bucketNumber]->tuples;
2586 588 : hashtable->skewBucket[bucketNumber]->tuples = hashTuple;
2587 : Assert(hashTuple != hashTuple->next.unshared);
2588 :
2589 : /* Account for space used, and back off if we've used too much */
2590 588 : hashtable->spaceUsed += hashTupleSize;
2591 588 : hashtable->spaceUsedSkew += hashTupleSize;
2592 588 : if (hashtable->spaceUsed > hashtable->spacePeak)
2593 432 : hashtable->spacePeak = hashtable->spaceUsed;
2594 690 : while (hashtable->spaceUsedSkew > hashtable->spaceAllowedSkew)
2595 102 : ExecHashRemoveNextSkewBucket(hashtable);
2596 :
2597 : /* Check we are not over the total spaceAllowed, either */
2598 588 : if (hashtable->spaceUsed > hashtable->spaceAllowed)
2599 0 : ExecHashIncreaseNumBatches(hashtable);
2600 :
2601 588 : if (shouldFree)
2602 588 : heap_free_minimal_tuple(tuple);
2603 588 : }
2604 :
2605 : /*
2606 : * ExecHashRemoveNextSkewBucket
2607 : *
2608 : * Remove the least valuable skew bucket by pushing its tuples into
2609 : * the main hash table.
2610 : */
2611 : static void
2612 102 : ExecHashRemoveNextSkewBucket(HashJoinTable hashtable)
2613 : {
2614 : int bucketToRemove;
2615 : HashSkewBucket *bucket;
2616 : uint32 hashvalue;
2617 : int bucketno;
2618 : int batchno;
2619 : HashJoinTuple hashTuple;
2620 :
2621 : /* Locate the bucket to remove */
2622 102 : bucketToRemove = hashtable->skewBucketNums[hashtable->nSkewBuckets - 1];
2623 102 : bucket = hashtable->skewBucket[bucketToRemove];
2624 :
2625 : /*
2626 : * Calculate which bucket and batch the tuples belong to in the main
2627 : * hashtable. They all have the same hash value, so it's the same for all
2628 : * of them. Also note that it's not possible for nbatch to increase while
2629 : * we are processing the tuples.
2630 : */
2631 102 : hashvalue = bucket->hashvalue;
2632 102 : ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
2633 :
2634 : /* Process all tuples in the bucket */
2635 102 : hashTuple = bucket->tuples;
2636 450 : while (hashTuple != NULL)
2637 : {
2638 348 : HashJoinTuple nextHashTuple = hashTuple->next.unshared;
2639 : MinimalTuple tuple;
2640 : Size tupleSize;
2641 :
2642 : /*
2643 : * This code must agree with ExecHashTableInsert. We do not use
2644 : * ExecHashTableInsert directly as ExecHashTableInsert expects a
2645 : * TupleTableSlot while we already have HashJoinTuples.
2646 : */
2647 348 : tuple = HJTUPLE_MINTUPLE(hashTuple);
2648 348 : tupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
2649 :
2650 : /* Decide whether to put the tuple in the hash table or a temp file */
2651 348 : if (batchno == hashtable->curbatch)
2652 : {
2653 : /* Move the tuple to the main hash table */
2654 : HashJoinTuple copyTuple;
2655 :
2656 : /*
2657 : * We must copy the tuple into the dense storage, else it will not
2658 : * be found by, eg, ExecHashIncreaseNumBatches.
2659 : */
2660 138 : copyTuple = (HashJoinTuple) dense_alloc(hashtable, tupleSize);
2661 138 : memcpy(copyTuple, hashTuple, tupleSize);
2662 138 : pfree(hashTuple);
2663 :
2664 138 : copyTuple->next.unshared = hashtable->buckets.unshared[bucketno];
2665 138 : hashtable->buckets.unshared[bucketno] = copyTuple;
2666 :
2667 : /* We have reduced skew space, but overall space doesn't change */
2668 138 : hashtable->spaceUsedSkew -= tupleSize;
2669 : }
2670 : else
2671 : {
2672 : /* Put the tuple into a temp file for later batches */
2673 : Assert(batchno > hashtable->curbatch);
2674 210 : ExecHashJoinSaveTuple(tuple, hashvalue,
2675 210 : &hashtable->innerBatchFile[batchno],
2676 : hashtable);
2677 210 : pfree(hashTuple);
2678 210 : hashtable->spaceUsed -= tupleSize;
2679 210 : hashtable->spaceUsedSkew -= tupleSize;
2680 : }
2681 :
2682 348 : hashTuple = nextHashTuple;
2683 :
2684 : /* allow this loop to be cancellable */
2685 348 : CHECK_FOR_INTERRUPTS();
2686 : }
2687 :
2688 : /*
2689 : * Free the bucket struct itself and reset the hashtable entry to NULL.
2690 : *
2691 : * NOTE: this is not nearly as simple as it looks on the surface, because
2692 : * of the possibility of collisions in the hashtable. Suppose that hash
2693 : * values A and B collide at a particular hashtable entry, and that A was
2694 : * entered first so B gets shifted to a different table entry. If we were
2695 : * to remove A first then ExecHashGetSkewBucket would mistakenly start
2696 : * reporting that B is not in the hashtable, because it would hit the NULL
2697 : * before finding B. However, we always remove entries in the reverse
2698 : * order of creation, so this failure cannot happen.
2699 : */
2700 102 : hashtable->skewBucket[bucketToRemove] = NULL;
2701 102 : hashtable->nSkewBuckets--;
2702 102 : pfree(bucket);
2703 102 : hashtable->spaceUsed -= SKEW_BUCKET_OVERHEAD;
2704 102 : hashtable->spaceUsedSkew -= SKEW_BUCKET_OVERHEAD;
2705 :
2706 : /*
2707 : * If we have removed all skew buckets then give up on skew optimization.
2708 : * Release the arrays since they aren't useful any more.
2709 : */
2710 102 : if (hashtable->nSkewBuckets == 0)
2711 : {
2712 0 : hashtable->skewEnabled = false;
2713 0 : pfree(hashtable->skewBucket);
2714 0 : pfree(hashtable->skewBucketNums);
2715 0 : hashtable->skewBucket = NULL;
2716 0 : hashtable->skewBucketNums = NULL;
2717 0 : hashtable->spaceUsed -= hashtable->spaceUsedSkew;
2718 0 : hashtable->spaceUsedSkew = 0;
2719 : }
2720 102 : }
2721 :
2722 : /*
2723 : * Reserve space in the DSM segment for instrumentation data.
2724 : */
2725 : void
2726 192 : ExecHashEstimate(HashState *node, ParallelContext *pcxt)
2727 : {
2728 : size_t size;
2729 :
2730 : /* don't need this if not instrumenting or no workers */
2731 192 : if (!node->ps.instrument || pcxt->nworkers == 0)
2732 108 : return;
2733 :
2734 84 : size = mul_size(pcxt->nworkers, sizeof(HashInstrumentation));
2735 84 : size = add_size(size, offsetof(SharedHashInfo, hinstrument));
2736 84 : shm_toc_estimate_chunk(&pcxt->estimator, size);
2737 84 : shm_toc_estimate_keys(&pcxt->estimator, 1);
2738 : }
2739 :
2740 : /*
2741 : * Set up a space in the DSM for all workers to record instrumentation data
2742 : * about their hash table.
2743 : */
2744 : void
2745 192 : ExecHashInitializeDSM(HashState *node, ParallelContext *pcxt)
2746 : {
2747 : size_t size;
2748 :
2749 : /* don't need this if not instrumenting or no workers */
2750 192 : if (!node->ps.instrument || pcxt->nworkers == 0)
2751 108 : return;
2752 :
2753 84 : size = offsetof(SharedHashInfo, hinstrument) +
2754 84 : pcxt->nworkers * sizeof(HashInstrumentation);
2755 84 : node->shared_info = (SharedHashInfo *) shm_toc_allocate(pcxt->toc, size);
2756 :
2757 : /* Each per-worker area must start out as zeroes. */
2758 84 : memset(node->shared_info, 0, size);
2759 :
2760 84 : node->shared_info->num_workers = pcxt->nworkers;
2761 84 : shm_toc_insert(pcxt->toc, node->ps.plan->plan_node_id,
2762 84 : node->shared_info);
2763 : }
2764 :
2765 : /*
2766 : * Locate the DSM space for hash table instrumentation data that we'll write
2767 : * to at shutdown time.
2768 : */
2769 : void
2770 546 : ExecHashInitializeWorker(HashState *node, ParallelWorkerContext *pwcxt)
2771 : {
2772 : SharedHashInfo *shared_info;
2773 :
2774 : /* don't need this if not instrumenting */
2775 546 : if (!node->ps.instrument)
2776 294 : return;
2777 :
2778 : /*
2779 : * Find our entry in the shared area, and set up a pointer to it so that
2780 : * we'll accumulate stats there when shutting down or rebuilding the hash
2781 : * table.
2782 : */
2783 : shared_info = (SharedHashInfo *)
2784 252 : shm_toc_lookup(pwcxt->toc, node->ps.plan->plan_node_id, false);
2785 252 : node->hinstrument = &shared_info->hinstrument[ParallelWorkerNumber];
2786 : }
2787 :
2788 : /*
2789 : * Collect EXPLAIN stats if needed, saving them into DSM memory if
2790 : * ExecHashInitializeWorker was called, or local storage if not. In the
2791 : * parallel case, this must be done in ExecShutdownHash() rather than
2792 : * ExecEndHash() because the latter runs after we've detached from the DSM
2793 : * segment.
2794 : */
2795 : void
2796 24642 : ExecShutdownHash(HashState *node)
2797 : {
2798 : /* Allocate save space if EXPLAIN'ing and we didn't do so already */
2799 24642 : if (node->ps.instrument && !node->hinstrument)
2800 108 : node->hinstrument = palloc0_object(HashInstrumentation);
2801 : /* Now accumulate data for the current (final) hash table */
2802 24642 : if (node->hinstrument && node->hashtable)
2803 296 : ExecHashAccumInstrumentation(node->hinstrument, node->hashtable);
2804 24642 : }
2805 :
2806 : /*
2807 : * Retrieve instrumentation data from workers before the DSM segment is
2808 : * detached, so that EXPLAIN can access it.
2809 : */
2810 : void
2811 84 : ExecHashRetrieveInstrumentation(HashState *node)
2812 : {
2813 84 : SharedHashInfo *shared_info = node->shared_info;
2814 : size_t size;
2815 :
2816 84 : if (shared_info == NULL)
2817 0 : return;
2818 :
2819 : /* Replace node->shared_info with a copy in backend-local memory. */
2820 84 : size = offsetof(SharedHashInfo, hinstrument) +
2821 84 : shared_info->num_workers * sizeof(HashInstrumentation);
2822 84 : node->shared_info = palloc(size);
2823 84 : memcpy(node->shared_info, shared_info, size);
2824 : }
2825 :
2826 : /*
2827 : * Accumulate instrumentation data from 'hashtable' into an
2828 : * initially-zeroed HashInstrumentation struct.
2829 : *
2830 : * This is used to merge information across successive hash table instances
2831 : * within a single plan node. We take the maximum values of each interesting
2832 : * number. The largest nbuckets and largest nbatch values might have occurred
2833 : * in different instances, so there's some risk of confusion from reporting
2834 : * unrelated numbers; but there's a bigger risk of misdiagnosing a performance
2835 : * issue if we don't report the largest values. Similarly, we want to report
2836 : * the largest spacePeak regardless of whether it happened in the same
2837 : * instance as the largest nbuckets or nbatch. All the instances should have
2838 : * the same nbuckets_original and nbatch_original; but there's little value
2839 : * in depending on that here, so handle them the same way.
2840 : */
2841 : void
2842 296 : ExecHashAccumInstrumentation(HashInstrumentation *instrument,
2843 : HashJoinTable hashtable)
2844 : {
2845 296 : instrument->nbuckets = Max(instrument->nbuckets,
2846 : hashtable->nbuckets);
2847 296 : instrument->nbuckets_original = Max(instrument->nbuckets_original,
2848 : hashtable->nbuckets_original);
2849 296 : instrument->nbatch = Max(instrument->nbatch,
2850 : hashtable->nbatch);
2851 296 : instrument->nbatch_original = Max(instrument->nbatch_original,
2852 : hashtable->nbatch_original);
2853 296 : instrument->space_peak = Max(instrument->space_peak,
2854 : hashtable->spacePeak);
2855 296 : }
2856 :
2857 : /*
2858 : * Allocate 'size' bytes from the currently active HashMemoryChunk
2859 : */
2860 : static void *
2861 7692662 : dense_alloc(HashJoinTable hashtable, Size size)
2862 : {
2863 : HashMemoryChunk newChunk;
2864 : char *ptr;
2865 :
2866 : /* just in case the size is not already aligned properly */
2867 7692662 : size = MAXALIGN(size);
2868 :
2869 : /*
2870 : * If tuple size is larger than threshold, allocate a separate chunk.
2871 : */
2872 7692662 : if (size > HASH_CHUNK_THRESHOLD)
2873 : {
2874 : /* allocate new chunk and put it at the beginning of the list */
2875 0 : newChunk = (HashMemoryChunk) MemoryContextAlloc(hashtable->batchCxt,
2876 : HASH_CHUNK_HEADER_SIZE + size);
2877 0 : newChunk->maxlen = size;
2878 0 : newChunk->used = size;
2879 0 : newChunk->ntuples = 1;
2880 :
2881 : /*
2882 : * Add this chunk to the list after the first existing chunk, so that
2883 : * we don't lose the remaining space in the "current" chunk.
2884 : */
2885 0 : if (hashtable->chunks != NULL)
2886 : {
2887 0 : newChunk->next = hashtable->chunks->next;
2888 0 : hashtable->chunks->next.unshared = newChunk;
2889 : }
2890 : else
2891 : {
2892 0 : newChunk->next.unshared = hashtable->chunks;
2893 0 : hashtable->chunks = newChunk;
2894 : }
2895 :
2896 0 : return HASH_CHUNK_DATA(newChunk);
2897 : }
2898 :
2899 : /*
2900 : * See if we have enough space for it in the current chunk (if any). If
2901 : * not, allocate a fresh chunk.
2902 : */
2903 7692662 : if ((hashtable->chunks == NULL) ||
2904 7674332 : (hashtable->chunks->maxlen - hashtable->chunks->used) < size)
2905 : {
2906 : /* allocate new chunk and put it at the beginning of the list */
2907 28048 : newChunk = (HashMemoryChunk) MemoryContextAlloc(hashtable->batchCxt,
2908 : HASH_CHUNK_HEADER_SIZE + HASH_CHUNK_SIZE);
2909 :
2910 28048 : newChunk->maxlen = HASH_CHUNK_SIZE;
2911 28048 : newChunk->used = size;
2912 28048 : newChunk->ntuples = 1;
2913 :
2914 28048 : newChunk->next.unshared = hashtable->chunks;
2915 28048 : hashtable->chunks = newChunk;
2916 :
2917 28048 : return HASH_CHUNK_DATA(newChunk);
2918 : }
2919 :
2920 : /* There is enough space in the current chunk, let's add the tuple */
2921 7664614 : ptr = HASH_CHUNK_DATA(hashtable->chunks) + hashtable->chunks->used;
2922 7664614 : hashtable->chunks->used += size;
2923 7664614 : hashtable->chunks->ntuples += 1;
2924 :
2925 : /* return pointer to the start of the tuple memory */
2926 7664614 : return ptr;
2927 : }
2928 :
2929 : /*
2930 : * Allocate space for a tuple in shared dense storage. This is equivalent to
2931 : * dense_alloc but for Parallel Hash using shared memory.
2932 : *
2933 : * While loading a tuple into shared memory, we might run out of memory and
2934 : * decide to repartition, or determine that the load factor is too high and
2935 : * decide to expand the bucket array, or discover that another participant has
2936 : * commanded us to help do that. Return NULL if number of buckets or batches
2937 : * has changed, indicating that the caller must retry (considering the
2938 : * possibility that the tuple no longer belongs in the same batch).
2939 : */
2940 : static HashJoinTuple
2941 2381016 : ExecParallelHashTupleAlloc(HashJoinTable hashtable, size_t size,
2942 : dsa_pointer *shared)
2943 : {
2944 2381016 : ParallelHashJoinState *pstate = hashtable->parallel_state;
2945 : dsa_pointer chunk_shared;
2946 : HashMemoryChunk chunk;
2947 : Size chunk_size;
2948 : HashJoinTuple result;
2949 2381016 : int curbatch = hashtable->curbatch;
2950 :
2951 2381016 : size = MAXALIGN(size);
2952 :
2953 : /*
2954 : * Fast path: if there is enough space in this backend's current chunk,
2955 : * then we can allocate without any locking.
2956 : */
2957 2381016 : chunk = hashtable->current_chunk;
2958 2381016 : if (chunk != NULL &&
2959 2380052 : size <= HASH_CHUNK_THRESHOLD &&
2960 2380052 : chunk->maxlen - chunk->used >= size)
2961 : {
2962 :
2963 2377276 : chunk_shared = hashtable->current_chunk_shared;
2964 : Assert(chunk == dsa_get_address(hashtable->area, chunk_shared));
2965 2377276 : *shared = chunk_shared + HASH_CHUNK_HEADER_SIZE + chunk->used;
2966 2377276 : result = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + chunk->used);
2967 2377276 : chunk->used += size;
2968 :
2969 : Assert(chunk->used <= chunk->maxlen);
2970 : Assert(result == dsa_get_address(hashtable->area, *shared));
2971 :
2972 2377276 : return result;
2973 : }
2974 :
2975 : /* Slow path: try to allocate a new chunk. */
2976 3740 : LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
2977 :
2978 : /*
2979 : * Check if we need to help increase the number of buckets or batches.
2980 : */
2981 3740 : if (pstate->growth == PHJ_GROWTH_NEED_MORE_BATCHES ||
2982 3704 : pstate->growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
2983 : {
2984 180 : ParallelHashGrowth growth = pstate->growth;
2985 :
2986 180 : hashtable->current_chunk = NULL;
2987 180 : LWLockRelease(&pstate->lock);
2988 :
2989 : /* Another participant has commanded us to help grow. */
2990 180 : if (growth == PHJ_GROWTH_NEED_MORE_BATCHES)
2991 36 : ExecParallelHashIncreaseNumBatches(hashtable);
2992 144 : else if (growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
2993 144 : ExecParallelHashIncreaseNumBuckets(hashtable);
2994 :
2995 : /* The caller must retry. */
2996 180 : return NULL;
2997 : }
2998 :
2999 : /* Oversized tuples get their own chunk. */
3000 3560 : if (size > HASH_CHUNK_THRESHOLD)
3001 48 : chunk_size = size + HASH_CHUNK_HEADER_SIZE;
3002 : else
3003 3512 : chunk_size = HASH_CHUNK_SIZE;
3004 :
3005 : /* Check if it's time to grow batches or buckets. */
3006 3560 : if (pstate->growth != PHJ_GROWTH_DISABLED)
3007 : {
3008 : Assert(curbatch == 0);
3009 : Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASH_INNER);
3010 :
3011 : /*
3012 : * Check if our space limit would be exceeded. To avoid choking on
3013 : * very large tuples or very low hash_mem setting, we'll always allow
3014 : * each backend to allocate at least one chunk.
3015 : */
3016 1818 : if (hashtable->batches[0].at_least_one_chunk &&
3017 1444 : hashtable->batches[0].shared->size +
3018 1444 : chunk_size > pstate->space_allowed)
3019 : {
3020 36 : pstate->growth = PHJ_GROWTH_NEED_MORE_BATCHES;
3021 36 : hashtable->batches[0].shared->space_exhausted = true;
3022 36 : LWLockRelease(&pstate->lock);
3023 :
3024 36 : return NULL;
3025 : }
3026 :
3027 : /* Check if our load factor limit would be exceeded. */
3028 1782 : if (hashtable->nbatch == 1)
3029 : {
3030 1556 : hashtable->batches[0].shared->ntuples += hashtable->batches[0].ntuples;
3031 1556 : hashtable->batches[0].ntuples = 0;
3032 : /* Guard against integer overflow and alloc size overflow */
3033 1556 : if (hashtable->batches[0].shared->ntuples + 1 >
3034 1556 : hashtable->nbuckets * NTUP_PER_BUCKET &&
3035 108 : hashtable->nbuckets < (INT_MAX / 2) &&
3036 108 : hashtable->nbuckets * 2 <=
3037 : MaxAllocSize / sizeof(dsa_pointer_atomic))
3038 : {
3039 108 : pstate->growth = PHJ_GROWTH_NEED_MORE_BUCKETS;
3040 108 : LWLockRelease(&pstate->lock);
3041 :
3042 108 : return NULL;
3043 : }
3044 : }
3045 : }
3046 :
3047 : /* We are cleared to allocate a new chunk. */
3048 3416 : chunk_shared = dsa_allocate(hashtable->area, chunk_size);
3049 3416 : hashtable->batches[curbatch].shared->size += chunk_size;
3050 3416 : hashtable->batches[curbatch].at_least_one_chunk = true;
3051 :
3052 : /* Set up the chunk. */
3053 3416 : chunk = (HashMemoryChunk) dsa_get_address(hashtable->area, chunk_shared);
3054 3416 : *shared = chunk_shared + HASH_CHUNK_HEADER_SIZE;
3055 3416 : chunk->maxlen = chunk_size - HASH_CHUNK_HEADER_SIZE;
3056 3416 : chunk->used = size;
3057 :
3058 : /*
3059 : * Push it onto the list of chunks, so that it can be found if we need to
3060 : * increase the number of buckets or batches (batch 0 only) and later for
3061 : * freeing the memory (all batches).
3062 : */
3063 3416 : chunk->next.shared = hashtable->batches[curbatch].shared->chunks;
3064 3416 : hashtable->batches[curbatch].shared->chunks = chunk_shared;
3065 :
3066 3416 : if (size <= HASH_CHUNK_THRESHOLD)
3067 : {
3068 : /*
3069 : * Make this the current chunk so that we can use the fast path to
3070 : * fill the rest of it up in future calls.
3071 : */
3072 3380 : hashtable->current_chunk = chunk;
3073 3380 : hashtable->current_chunk_shared = chunk_shared;
3074 : }
3075 3416 : LWLockRelease(&pstate->lock);
3076 :
3077 : Assert(HASH_CHUNK_DATA(chunk) == dsa_get_address(hashtable->area, *shared));
3078 3416 : result = (HashJoinTuple) HASH_CHUNK_DATA(chunk);
3079 :
3080 3416 : return result;
3081 : }
3082 :
3083 : /*
3084 : * One backend needs to set up the shared batch state including tuplestores.
3085 : * Other backends will ensure they have correctly configured accessors by
3086 : * called ExecParallelHashEnsureBatchAccessors().
3087 : */
3088 : static void
3089 216 : ExecParallelHashJoinSetUpBatches(HashJoinTable hashtable, int nbatch)
3090 : {
3091 216 : ParallelHashJoinState *pstate = hashtable->parallel_state;
3092 : ParallelHashJoinBatch *batches;
3093 : MemoryContext oldcxt;
3094 : int i;
3095 :
3096 : Assert(hashtable->batches == NULL);
3097 :
3098 : /* Allocate space. */
3099 216 : pstate->batches =
3100 216 : dsa_allocate0(hashtable->area,
3101 : EstimateParallelHashJoinBatch(hashtable) * nbatch);
3102 216 : pstate->nbatch = nbatch;
3103 216 : batches = dsa_get_address(hashtable->area, pstate->batches);
3104 :
3105 : /*
3106 : * Use hash join spill memory context to allocate accessors, including
3107 : * buffers for the temporary files.
3108 : */
3109 216 : oldcxt = MemoryContextSwitchTo(hashtable->spillCxt);
3110 :
3111 : /* Allocate this backend's accessor array. */
3112 216 : hashtable->nbatch = nbatch;
3113 216 : hashtable->batches =
3114 216 : palloc0_array(ParallelHashJoinBatchAccessor, hashtable->nbatch);
3115 :
3116 : /* Set up the shared state, tuplestores and backend-local accessors. */
3117 1050 : for (i = 0; i < hashtable->nbatch; ++i)
3118 : {
3119 834 : ParallelHashJoinBatchAccessor *accessor = &hashtable->batches[i];
3120 834 : ParallelHashJoinBatch *shared = NthParallelHashJoinBatch(batches, i);
3121 : char name[MAXPGPATH];
3122 :
3123 : /*
3124 : * All members of shared were zero-initialized. We just need to set
3125 : * up the Barrier.
3126 : */
3127 834 : BarrierInit(&shared->batch_barrier, 0);
3128 834 : if (i == 0)
3129 : {
3130 : /* Batch 0 doesn't need to be loaded. */
3131 216 : BarrierAttach(&shared->batch_barrier);
3132 864 : while (BarrierPhase(&shared->batch_barrier) < PHJ_BATCH_PROBE)
3133 648 : BarrierArriveAndWait(&shared->batch_barrier, 0);
3134 216 : BarrierDetach(&shared->batch_barrier);
3135 : }
3136 :
3137 : /* Initialize accessor state. All members were zero-initialized. */
3138 834 : accessor->shared = shared;
3139 :
3140 : /* Initialize the shared tuplestores. */
3141 834 : snprintf(name, sizeof(name), "i%dof%d", i, hashtable->nbatch);
3142 834 : accessor->inner_tuples =
3143 834 : sts_initialize(ParallelHashJoinBatchInner(shared),
3144 : pstate->nparticipants,
3145 : ParallelWorkerNumber + 1,
3146 : sizeof(uint32),
3147 : SHARED_TUPLESTORE_SINGLE_PASS,
3148 : &pstate->fileset,
3149 : name);
3150 834 : snprintf(name, sizeof(name), "o%dof%d", i, hashtable->nbatch);
3151 834 : accessor->outer_tuples =
3152 834 : sts_initialize(ParallelHashJoinBatchOuter(shared,
3153 : pstate->nparticipants),
3154 : pstate->nparticipants,
3155 : ParallelWorkerNumber + 1,
3156 : sizeof(uint32),
3157 : SHARED_TUPLESTORE_SINGLE_PASS,
3158 : &pstate->fileset,
3159 : name);
3160 : }
3161 :
3162 216 : MemoryContextSwitchTo(oldcxt);
3163 216 : }
3164 :
3165 : /*
3166 : * Free the current set of ParallelHashJoinBatchAccessor objects.
3167 : */
3168 : static void
3169 48 : ExecParallelHashCloseBatchAccessors(HashJoinTable hashtable)
3170 : {
3171 : int i;
3172 :
3173 132 : for (i = 0; i < hashtable->nbatch; ++i)
3174 : {
3175 : /* Make sure no files are left open. */
3176 84 : sts_end_write(hashtable->batches[i].inner_tuples);
3177 84 : sts_end_write(hashtable->batches[i].outer_tuples);
3178 84 : sts_end_parallel_scan(hashtable->batches[i].inner_tuples);
3179 84 : sts_end_parallel_scan(hashtable->batches[i].outer_tuples);
3180 : }
3181 48 : pfree(hashtable->batches);
3182 48 : hashtable->batches = NULL;
3183 48 : }
3184 :
3185 : /*
3186 : * Make sure this backend has up-to-date accessors for the current set of
3187 : * batches.
3188 : */
3189 : static void
3190 876 : ExecParallelHashEnsureBatchAccessors(HashJoinTable hashtable)
3191 : {
3192 876 : ParallelHashJoinState *pstate = hashtable->parallel_state;
3193 : ParallelHashJoinBatch *batches;
3194 : MemoryContext oldcxt;
3195 : int i;
3196 :
3197 876 : if (hashtable->batches != NULL)
3198 : {
3199 648 : if (hashtable->nbatch == pstate->nbatch)
3200 648 : return;
3201 0 : ExecParallelHashCloseBatchAccessors(hashtable);
3202 : }
3203 :
3204 : /*
3205 : * We should never see a state where the batch-tracking array is freed,
3206 : * because we should have given up sooner if we join when the build
3207 : * barrier has reached the PHJ_BUILD_FREE phase.
3208 : */
3209 : Assert(DsaPointerIsValid(pstate->batches));
3210 :
3211 : /*
3212 : * Use hash join spill memory context to allocate accessors, including
3213 : * buffers for the temporary files.
3214 : */
3215 228 : oldcxt = MemoryContextSwitchTo(hashtable->spillCxt);
3216 :
3217 : /* Allocate this backend's accessor array. */
3218 228 : hashtable->nbatch = pstate->nbatch;
3219 228 : hashtable->batches =
3220 228 : palloc0_array(ParallelHashJoinBatchAccessor, hashtable->nbatch);
3221 :
3222 : /* Find the base of the pseudo-array of ParallelHashJoinBatch objects. */
3223 : batches = (ParallelHashJoinBatch *)
3224 228 : dsa_get_address(hashtable->area, pstate->batches);
3225 :
3226 : /* Set up the accessor array and attach to the tuplestores. */
3227 1248 : for (i = 0; i < hashtable->nbatch; ++i)
3228 : {
3229 1020 : ParallelHashJoinBatchAccessor *accessor = &hashtable->batches[i];
3230 1020 : ParallelHashJoinBatch *shared = NthParallelHashJoinBatch(batches, i);
3231 :
3232 1020 : accessor->shared = shared;
3233 1020 : accessor->preallocated = 0;
3234 1020 : accessor->done = false;
3235 1020 : accessor->outer_eof = false;
3236 1020 : accessor->inner_tuples =
3237 1020 : sts_attach(ParallelHashJoinBatchInner(shared),
3238 : ParallelWorkerNumber + 1,
3239 : &pstate->fileset);
3240 1020 : accessor->outer_tuples =
3241 1020 : sts_attach(ParallelHashJoinBatchOuter(shared,
3242 : pstate->nparticipants),
3243 : ParallelWorkerNumber + 1,
3244 : &pstate->fileset);
3245 : }
3246 :
3247 228 : MemoryContextSwitchTo(oldcxt);
3248 : }
3249 :
3250 : /*
3251 : * Allocate an empty shared memory hash table for a given batch.
3252 : */
3253 : void
3254 750 : ExecParallelHashTableAlloc(HashJoinTable hashtable, int batchno)
3255 : {
3256 750 : ParallelHashJoinBatch *batch = hashtable->batches[batchno].shared;
3257 : dsa_pointer_atomic *buckets;
3258 750 : int nbuckets = hashtable->parallel_state->nbuckets;
3259 : int i;
3260 :
3261 750 : batch->buckets =
3262 750 : dsa_allocate(hashtable->area, sizeof(dsa_pointer_atomic) * nbuckets);
3263 : buckets = (dsa_pointer_atomic *)
3264 750 : dsa_get_address(hashtable->area, batch->buckets);
3265 3115758 : for (i = 0; i < nbuckets; ++i)
3266 3115008 : dsa_pointer_atomic_init(&buckets[i], InvalidDsaPointer);
3267 750 : }
3268 :
3269 : /*
3270 : * If we are currently attached to a shared hash join batch, detach. If we
3271 : * are last to detach, clean up.
3272 : */
3273 : void
3274 18502 : ExecHashTableDetachBatch(HashJoinTable hashtable)
3275 : {
3276 18502 : if (hashtable->parallel_state != NULL &&
3277 1266 : hashtable->curbatch >= 0)
3278 : {
3279 870 : int curbatch = hashtable->curbatch;
3280 870 : ParallelHashJoinBatch *batch = hashtable->batches[curbatch].shared;
3281 870 : bool attached = true;
3282 :
3283 : /* Make sure any temporary files are closed. */
3284 870 : sts_end_parallel_scan(hashtable->batches[curbatch].inner_tuples);
3285 870 : sts_end_parallel_scan(hashtable->batches[curbatch].outer_tuples);
3286 :
3287 : /* After attaching we always get at least to PHJ_BATCH_PROBE. */
3288 : Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE ||
3289 : BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_SCAN);
3290 :
3291 : /*
3292 : * If we're abandoning the PHJ_BATCH_PROBE phase early without having
3293 : * reached the end of it, it means the plan doesn't want any more
3294 : * tuples, and it is happy to abandon any tuples buffered in this
3295 : * process's subplans. For correctness, we can't allow any process to
3296 : * execute the PHJ_BATCH_SCAN phase, because we will never have the
3297 : * complete set of match bits. Therefore we skip emitting unmatched
3298 : * tuples in all backends (if this is a full/right join), as if those
3299 : * tuples were all due to be emitted by this process and it has
3300 : * abandoned them too.
3301 : */
3302 870 : if (BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE &&
3303 802 : !hashtable->batches[curbatch].outer_eof)
3304 : {
3305 : /*
3306 : * This flag may be written to by multiple backends during
3307 : * PHJ_BATCH_PROBE phase, but will only be read in PHJ_BATCH_SCAN
3308 : * phase so requires no extra locking.
3309 : */
3310 0 : batch->skip_unmatched = true;
3311 : }
3312 :
3313 : /*
3314 : * Even if we aren't doing a full/right outer join, we'll step through
3315 : * the PHJ_BATCH_SCAN phase just to maintain the invariant that
3316 : * freeing happens in PHJ_BATCH_FREE, but that'll be wait-free.
3317 : */
3318 870 : if (BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE)
3319 802 : attached = BarrierArriveAndDetachExceptLast(&batch->batch_barrier);
3320 870 : if (attached && BarrierArriveAndDetach(&batch->batch_barrier))
3321 : {
3322 : /*
3323 : * We are not longer attached to the batch barrier, but we're the
3324 : * process that was chosen to free resources and it's safe to
3325 : * assert the current phase. The ParallelHashJoinBatch can't go
3326 : * away underneath us while we are attached to the build barrier,
3327 : * making this access safe.
3328 : */
3329 : Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_FREE);
3330 :
3331 : /* Free shared chunks and buckets. */
3332 3878 : while (DsaPointerIsValid(batch->chunks))
3333 : {
3334 : HashMemoryChunk chunk =
3335 3128 : dsa_get_address(hashtable->area, batch->chunks);
3336 3128 : dsa_pointer next = chunk->next.shared;
3337 :
3338 3128 : dsa_free(hashtable->area, batch->chunks);
3339 3128 : batch->chunks = next;
3340 : }
3341 750 : if (DsaPointerIsValid(batch->buckets))
3342 : {
3343 750 : dsa_free(hashtable->area, batch->buckets);
3344 750 : batch->buckets = InvalidDsaPointer;
3345 : }
3346 : }
3347 :
3348 : /*
3349 : * Track the largest batch we've been attached to. Though each
3350 : * backend might see a different subset of batches, explain.c will
3351 : * scan the results from all backends to find the largest value.
3352 : */
3353 870 : hashtable->spacePeak =
3354 870 : Max(hashtable->spacePeak,
3355 : batch->size + sizeof(dsa_pointer_atomic) * hashtable->nbuckets);
3356 :
3357 : /* Remember that we are not attached to a batch. */
3358 870 : hashtable->curbatch = -1;
3359 : }
3360 18502 : }
3361 :
3362 : /*
3363 : * Detach from all shared resources. If we are last to detach, clean up.
3364 : */
3365 : void
3366 17632 : ExecHashTableDetach(HashJoinTable hashtable)
3367 : {
3368 17632 : ParallelHashJoinState *pstate = hashtable->parallel_state;
3369 :
3370 : /*
3371 : * If we're involved in a parallel query, we must either have gotten all
3372 : * the way to PHJ_BUILD_RUN, or joined too late and be in PHJ_BUILD_FREE.
3373 : */
3374 : Assert(!pstate ||
3375 : BarrierPhase(&pstate->build_barrier) >= PHJ_BUILD_RUN);
3376 :
3377 17632 : if (pstate && BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_RUN)
3378 : {
3379 : int i;
3380 :
3381 : /* Make sure any temporary files are closed. */
3382 396 : if (hashtable->batches)
3383 : {
3384 2166 : for (i = 0; i < hashtable->nbatch; ++i)
3385 : {
3386 1770 : sts_end_write(hashtable->batches[i].inner_tuples);
3387 1770 : sts_end_write(hashtable->batches[i].outer_tuples);
3388 1770 : sts_end_parallel_scan(hashtable->batches[i].inner_tuples);
3389 1770 : sts_end_parallel_scan(hashtable->batches[i].outer_tuples);
3390 : }
3391 : }
3392 :
3393 : /* If we're last to detach, clean up shared memory. */
3394 396 : if (BarrierArriveAndDetach(&pstate->build_barrier))
3395 : {
3396 : /*
3397 : * Late joining processes will see this state and give up
3398 : * immediately.
3399 : */
3400 : Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_FREE);
3401 :
3402 168 : if (DsaPointerIsValid(pstate->batches))
3403 : {
3404 168 : dsa_free(hashtable->area, pstate->batches);
3405 168 : pstate->batches = InvalidDsaPointer;
3406 : }
3407 : }
3408 : }
3409 17632 : hashtable->parallel_state = NULL;
3410 17632 : }
3411 :
3412 : /*
3413 : * Get the first tuple in a given bucket identified by number.
3414 : */
3415 : static inline HashJoinTuple
3416 2774430 : ExecParallelHashFirstTuple(HashJoinTable hashtable, int bucketno)
3417 : {
3418 : HashJoinTuple tuple;
3419 : dsa_pointer p;
3420 :
3421 : Assert(hashtable->parallel_state);
3422 2774430 : p = dsa_pointer_atomic_read(&hashtable->buckets.shared[bucketno]);
3423 2774430 : tuple = (HashJoinTuple) dsa_get_address(hashtable->area, p);
3424 :
3425 2774430 : return tuple;
3426 : }
3427 :
3428 : /*
3429 : * Get the next tuple in the same bucket as 'tuple'.
3430 : */
3431 : static inline HashJoinTuple
3432 3807186 : ExecParallelHashNextTuple(HashJoinTable hashtable, HashJoinTuple tuple)
3433 : {
3434 : HashJoinTuple next;
3435 :
3436 : Assert(hashtable->parallel_state);
3437 3807186 : next = (HashJoinTuple) dsa_get_address(hashtable->area, tuple->next.shared);
3438 :
3439 3807186 : return next;
3440 : }
3441 :
3442 : /*
3443 : * Insert a tuple at the front of a chain of tuples in DSA memory atomically.
3444 : */
3445 : static inline void
3446 2932550 : ExecParallelHashPushTuple(dsa_pointer_atomic *head,
3447 : HashJoinTuple tuple,
3448 : dsa_pointer tuple_shared)
3449 : {
3450 : for (;;)
3451 : {
3452 2932550 : tuple->next.shared = dsa_pointer_atomic_read(head);
3453 2932550 : if (dsa_pointer_atomic_compare_exchange(head,
3454 2932550 : &tuple->next.shared,
3455 : tuple_shared))
3456 2922208 : break;
3457 : }
3458 2922208 : }
3459 :
3460 : /*
3461 : * Prepare to work on a given batch.
3462 : */
3463 : void
3464 1950 : ExecParallelHashTableSetCurrentBatch(HashJoinTable hashtable, int batchno)
3465 : {
3466 : Assert(hashtable->batches[batchno].shared->buckets != InvalidDsaPointer);
3467 :
3468 1950 : hashtable->curbatch = batchno;
3469 1950 : hashtable->buckets.shared = (dsa_pointer_atomic *)
3470 1950 : dsa_get_address(hashtable->area,
3471 1950 : hashtable->batches[batchno].shared->buckets);
3472 1950 : hashtable->nbuckets = hashtable->parallel_state->nbuckets;
3473 1950 : hashtable->log2_nbuckets = my_log2(hashtable->nbuckets);
3474 1950 : hashtable->current_chunk = NULL;
3475 1950 : hashtable->current_chunk_shared = InvalidDsaPointer;
3476 1950 : hashtable->batches[batchno].at_least_one_chunk = false;
3477 1950 : }
3478 :
3479 : /*
3480 : * Take the next available chunk from the queue of chunks being worked on in
3481 : * parallel. Return NULL if there are none left. Otherwise return a pointer
3482 : * to the chunk, and set *shared to the DSA pointer to the chunk.
3483 : */
3484 : static HashMemoryChunk
3485 1142 : ExecParallelHashPopChunkQueue(HashJoinTable hashtable, dsa_pointer *shared)
3486 : {
3487 1142 : ParallelHashJoinState *pstate = hashtable->parallel_state;
3488 : HashMemoryChunk chunk;
3489 :
3490 1142 : LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
3491 1142 : if (DsaPointerIsValid(pstate->chunk_work_queue))
3492 : {
3493 950 : *shared = pstate->chunk_work_queue;
3494 : chunk = (HashMemoryChunk)
3495 950 : dsa_get_address(hashtable->area, *shared);
3496 950 : pstate->chunk_work_queue = chunk->next.shared;
3497 : }
3498 : else
3499 192 : chunk = NULL;
3500 1142 : LWLockRelease(&pstate->lock);
3501 :
3502 1142 : return chunk;
3503 : }
3504 :
3505 : /*
3506 : * Increase the space preallocated in this backend for a given inner batch by
3507 : * at least a given amount. This allows us to track whether a given batch
3508 : * would fit in memory when loaded back in. Also increase the number of
3509 : * batches or buckets if required.
3510 : *
3511 : * This maintains a running estimation of how much space will be taken when we
3512 : * load the batch back into memory by simulating the way chunks will be handed
3513 : * out to workers. It's not perfectly accurate because the tuples will be
3514 : * packed into memory chunks differently by ExecParallelHashTupleAlloc(), but
3515 : * it should be pretty close. It tends to overestimate by a fraction of a
3516 : * chunk per worker since all workers gang up to preallocate during hashing,
3517 : * but workers tend to reload batches alone if there are enough to go around,
3518 : * leaving fewer partially filled chunks. This effect is bounded by
3519 : * nparticipants.
3520 : *
3521 : * Return false if the number of batches or buckets has changed, and the
3522 : * caller should reconsider which batch a given tuple now belongs in and call
3523 : * again.
3524 : */
3525 : static bool
3526 1690 : ExecParallelHashTuplePrealloc(HashJoinTable hashtable, int batchno, size_t size)
3527 : {
3528 1690 : ParallelHashJoinState *pstate = hashtable->parallel_state;
3529 1690 : ParallelHashJoinBatchAccessor *batch = &hashtable->batches[batchno];
3530 1690 : size_t want = Max(size, HASH_CHUNK_SIZE - HASH_CHUNK_HEADER_SIZE);
3531 :
3532 : Assert(batchno > 0);
3533 : Assert(batchno < hashtable->nbatch);
3534 : Assert(size == MAXALIGN(size));
3535 :
3536 1690 : LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
3537 :
3538 : /* Has another participant commanded us to help grow? */
3539 1690 : if (pstate->growth == PHJ_GROWTH_NEED_MORE_BATCHES ||
3540 1678 : pstate->growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
3541 : {
3542 12 : ParallelHashGrowth growth = pstate->growth;
3543 :
3544 12 : LWLockRelease(&pstate->lock);
3545 12 : if (growth == PHJ_GROWTH_NEED_MORE_BATCHES)
3546 12 : ExecParallelHashIncreaseNumBatches(hashtable);
3547 0 : else if (growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
3548 0 : ExecParallelHashIncreaseNumBuckets(hashtable);
3549 :
3550 12 : return false;
3551 : }
3552 :
3553 1678 : if (pstate->growth != PHJ_GROWTH_DISABLED &&
3554 1450 : batch->at_least_one_chunk &&
3555 700 : (batch->shared->estimated_size + want + HASH_CHUNK_HEADER_SIZE
3556 700 : > pstate->space_allowed))
3557 : {
3558 : /*
3559 : * We have determined that this batch would exceed the space budget if
3560 : * loaded into memory. Command all participants to help repartition.
3561 : */
3562 12 : batch->shared->space_exhausted = true;
3563 12 : pstate->growth = PHJ_GROWTH_NEED_MORE_BATCHES;
3564 12 : LWLockRelease(&pstate->lock);
3565 :
3566 12 : return false;
3567 : }
3568 :
3569 1666 : batch->at_least_one_chunk = true;
3570 1666 : batch->shared->estimated_size += want + HASH_CHUNK_HEADER_SIZE;
3571 1666 : batch->preallocated = want;
3572 1666 : LWLockRelease(&pstate->lock);
3573 :
3574 1666 : return true;
3575 : }
3576 :
3577 : /*
3578 : * Calculate the limit on how much memory can be used by Hash and similar
3579 : * plan types. This is work_mem times hash_mem_multiplier, and is
3580 : * expressed in bytes.
3581 : *
3582 : * Exported for use by the planner, as well as other hash-like executor
3583 : * nodes. This is a rather random place for this, but there is no better
3584 : * place.
3585 : */
3586 : size_t
3587 1014138 : get_hash_memory_limit(void)
3588 : {
3589 : double mem_limit;
3590 :
3591 : /* Do initial calculation in double arithmetic */
3592 1014138 : mem_limit = (double) work_mem * hash_mem_multiplier * 1024.0;
3593 :
3594 : /* Clamp in case it doesn't fit in size_t */
3595 1014138 : mem_limit = Min(mem_limit, (double) SIZE_MAX);
3596 :
3597 1014138 : return (size_t) mem_limit;
3598 : }
|