LCOV - code coverage report
Current view: top level - src/backend/executor - nodeHash.c (source / functions) Hit Total Coverage
Test: PostgreSQL 19devel Lines: 1053 1109 95.0 %
Date: 2025-10-30 15:17:48 Functions: 54 55 98.2 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * nodeHash.c
       4             :  *    Routines to hash relations for hashjoin
       5             :  *
       6             :  * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
       7             :  * Portions Copyright (c) 1994, Regents of the University of California
       8             :  *
       9             :  *
      10             :  * IDENTIFICATION
      11             :  *    src/backend/executor/nodeHash.c
      12             :  *
      13             :  * See note on parallelism in nodeHashjoin.c.
      14             :  *
      15             :  *-------------------------------------------------------------------------
      16             :  */
      17             : /*
      18             :  * INTERFACE ROUTINES
      19             :  *      MultiExecHash   - generate an in-memory hash table of the relation
      20             :  *      ExecInitHash    - initialize node and subnodes
      21             :  *      ExecEndHash     - shutdown node and subnodes
      22             :  */
      23             : 
      24             : #include "postgres.h"
      25             : 
      26             : #include <math.h>
      27             : #include <limits.h>
      28             : 
      29             : #include "access/htup_details.h"
      30             : #include "access/parallel.h"
      31             : #include "catalog/pg_statistic.h"
      32             : #include "commands/tablespace.h"
      33             : #include "executor/executor.h"
      34             : #include "executor/hashjoin.h"
      35             : #include "executor/nodeHash.h"
      36             : #include "executor/nodeHashjoin.h"
      37             : #include "miscadmin.h"
      38             : #include "port/pg_bitutils.h"
      39             : #include "utils/lsyscache.h"
      40             : #include "utils/memutils.h"
      41             : #include "utils/syscache.h"
      42             : #include "utils/wait_event.h"
      43             : 
      44             : static void ExecHashIncreaseNumBatches(HashJoinTable hashtable);
      45             : static void ExecHashIncreaseNumBuckets(HashJoinTable hashtable);
      46             : static void ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable);
      47             : static void ExecParallelHashIncreaseNumBuckets(HashJoinTable hashtable);
      48             : static void ExecHashBuildSkewHash(HashState *hashstate,
      49             :                                   HashJoinTable hashtable, Hash *node,
      50             :                                   int mcvsToUse);
      51             : static void ExecHashSkewTableInsert(HashJoinTable hashtable,
      52             :                                     TupleTableSlot *slot,
      53             :                                     uint32 hashvalue,
      54             :                                     int bucketNumber);
      55             : static void ExecHashRemoveNextSkewBucket(HashJoinTable hashtable);
      56             : 
      57             : static void *dense_alloc(HashJoinTable hashtable, Size size);
      58             : static HashJoinTuple ExecParallelHashTupleAlloc(HashJoinTable hashtable,
      59             :                                                 size_t size,
      60             :                                                 dsa_pointer *shared);
      61             : static void MultiExecPrivateHash(HashState *node);
      62             : static void MultiExecParallelHash(HashState *node);
      63             : static inline HashJoinTuple ExecParallelHashFirstTuple(HashJoinTable hashtable,
      64             :                                                        int bucketno);
      65             : static inline HashJoinTuple ExecParallelHashNextTuple(HashJoinTable hashtable,
      66             :                                                       HashJoinTuple tuple);
      67             : static inline void ExecParallelHashPushTuple(dsa_pointer_atomic *head,
      68             :                                              HashJoinTuple tuple,
      69             :                                              dsa_pointer tuple_shared);
      70             : static void ExecParallelHashJoinSetUpBatches(HashJoinTable hashtable, int nbatch);
      71             : static void ExecParallelHashEnsureBatchAccessors(HashJoinTable hashtable);
      72             : static void ExecParallelHashRepartitionFirst(HashJoinTable hashtable);
      73             : static void ExecParallelHashRepartitionRest(HashJoinTable hashtable);
      74             : static HashMemoryChunk ExecParallelHashPopChunkQueue(HashJoinTable hashtable,
      75             :                                                      dsa_pointer *shared);
      76             : static bool ExecParallelHashTuplePrealloc(HashJoinTable hashtable,
      77             :                                           int batchno,
      78             :                                           size_t size);
      79             : static void ExecParallelHashMergeCounters(HashJoinTable hashtable);
      80             : static void ExecParallelHashCloseBatchAccessors(HashJoinTable hashtable);
      81             : 
      82             : 
      83             : /* ----------------------------------------------------------------
      84             :  *      ExecHash
      85             :  *
      86             :  *      stub for pro forma compliance
      87             :  * ----------------------------------------------------------------
      88             :  */
      89             : static TupleTableSlot *
      90           0 : ExecHash(PlanState *pstate)
      91             : {
      92           0 :     elog(ERROR, "Hash node does not support ExecProcNode call convention");
      93             :     return NULL;
      94             : }
      95             : 
      96             : /* ----------------------------------------------------------------
      97             :  *      MultiExecHash
      98             :  *
      99             :  *      build hash table for hashjoin, doing partitioning if more
     100             :  *      than one batch is required.
     101             :  * ----------------------------------------------------------------
     102             :  */
     103             : Node *
     104       25488 : MultiExecHash(HashState *node)
     105             : {
     106             :     /* must provide our own instrumentation support */
     107       25488 :     if (node->ps.instrument)
     108         334 :         InstrStartNode(node->ps.instrument);
     109             : 
     110       25488 :     if (node->parallel_state != NULL)
     111         414 :         MultiExecParallelHash(node);
     112             :     else
     113       25074 :         MultiExecPrivateHash(node);
     114             : 
     115             :     /* must provide our own instrumentation support */
     116       25488 :     if (node->ps.instrument)
     117         334 :         InstrStopNode(node->ps.instrument, node->hashtable->partialTuples);
     118             : 
     119             :     /*
     120             :      * We do not return the hash table directly because it's not a subtype of
     121             :      * Node, and so would violate the MultiExecProcNode API.  Instead, our
     122             :      * parent Hashjoin node is expected to know how to fish it out of our node
     123             :      * state.  Ugly but not really worth cleaning up, since Hashjoin knows
     124             :      * quite a bit more about Hash besides that.
     125             :      */
     126       25488 :     return NULL;
     127             : }
     128             : 
     129             : /* ----------------------------------------------------------------
     130             :  *      MultiExecPrivateHash
     131             :  *
     132             :  *      parallel-oblivious version, building a backend-private
     133             :  *      hash table and (if necessary) batch files.
     134             :  * ----------------------------------------------------------------
     135             :  */
     136             : static void
     137       25074 : MultiExecPrivateHash(HashState *node)
     138             : {
     139             :     PlanState  *outerNode;
     140             :     HashJoinTable hashtable;
     141             :     TupleTableSlot *slot;
     142             :     ExprContext *econtext;
     143             : 
     144             :     /*
     145             :      * get state info from node
     146             :      */
     147       25074 :     outerNode = outerPlanState(node);
     148       25074 :     hashtable = node->hashtable;
     149             : 
     150             :     /*
     151             :      * set expression context
     152             :      */
     153       25074 :     econtext = node->ps.ps_ExprContext;
     154             : 
     155             :     /*
     156             :      * Get all tuples from the node below the Hash node and insert into the
     157             :      * hash table (or temp files).
     158             :      */
     159             :     for (;;)
     160     8904488 :     {
     161             :         bool        isnull;
     162             :         Datum       hashdatum;
     163             : 
     164     8929562 :         slot = ExecProcNode(outerNode);
     165     8929562 :         if (TupIsNull(slot))
     166             :             break;
     167             :         /* We have to compute the hash value */
     168     8904488 :         econtext->ecxt_outertuple = slot;
     169             : 
     170     8904488 :         ResetExprContext(econtext);
     171             : 
     172     8904488 :         hashdatum = ExecEvalExprSwitchContext(node->hash_expr, econtext,
     173             :                                               &isnull);
     174             : 
     175     8904488 :         if (!isnull)
     176             :         {
     177     8904386 :             uint32      hashvalue = DatumGetUInt32(hashdatum);
     178             :             int         bucketNumber;
     179             : 
     180     8904386 :             bucketNumber = ExecHashGetSkewBucket(hashtable, hashvalue);
     181     8904386 :             if (bucketNumber != INVALID_SKEW_BUCKET_NO)
     182             :             {
     183             :                 /* It's a skew tuple, so put it into that hash table */
     184         588 :                 ExecHashSkewTableInsert(hashtable, slot, hashvalue,
     185             :                                         bucketNumber);
     186         588 :                 hashtable->skewTuples += 1;
     187             :             }
     188             :             else
     189             :             {
     190             :                 /* Not subject to skew optimization, so insert normally */
     191     8903798 :                 ExecHashTableInsert(hashtable, slot, hashvalue);
     192             :             }
     193     8904386 :             hashtable->totalTuples += 1;
     194             :         }
     195             :     }
     196             : 
     197             :     /* resize the hash table if needed (NTUP_PER_BUCKET exceeded) */
     198       25074 :     if (hashtable->nbuckets != hashtable->nbuckets_optimal)
     199          72 :         ExecHashIncreaseNumBuckets(hashtable);
     200             : 
     201             :     /* Account for the buckets in spaceUsed (reported in EXPLAIN ANALYZE) */
     202       25074 :     hashtable->spaceUsed += hashtable->nbuckets * sizeof(HashJoinTuple);
     203       25074 :     if (hashtable->spaceUsed > hashtable->spacePeak)
     204       25028 :         hashtable->spacePeak = hashtable->spaceUsed;
     205             : 
     206       25074 :     hashtable->partialTuples = hashtable->totalTuples;
     207       25074 : }
     208             : 
     209             : /* ----------------------------------------------------------------
     210             :  *      MultiExecParallelHash
     211             :  *
     212             :  *      parallel-aware version, building a shared hash table and
     213             :  *      (if necessary) batch files using the combined effort of
     214             :  *      a set of co-operating backends.
     215             :  * ----------------------------------------------------------------
     216             :  */
     217             : static void
     218         414 : MultiExecParallelHash(HashState *node)
     219             : {
     220             :     ParallelHashJoinState *pstate;
     221             :     PlanState  *outerNode;
     222             :     HashJoinTable hashtable;
     223             :     TupleTableSlot *slot;
     224             :     ExprContext *econtext;
     225             :     uint32      hashvalue;
     226             :     Barrier    *build_barrier;
     227             :     int         i;
     228             : 
     229             :     /*
     230             :      * get state info from node
     231             :      */
     232         414 :     outerNode = outerPlanState(node);
     233         414 :     hashtable = node->hashtable;
     234             : 
     235             :     /*
     236             :      * set expression context
     237             :      */
     238         414 :     econtext = node->ps.ps_ExprContext;
     239             : 
     240             :     /*
     241             :      * Synchronize the parallel hash table build.  At this stage we know that
     242             :      * the shared hash table has been or is being set up by
     243             :      * ExecHashTableCreate(), but we don't know if our peers have returned
     244             :      * from there or are here in MultiExecParallelHash(), and if so how far
     245             :      * through they are.  To find out, we check the build_barrier phase then
     246             :      * and jump to the right step in the build algorithm.
     247             :      */
     248         414 :     pstate = hashtable->parallel_state;
     249         414 :     build_barrier = &pstate->build_barrier;
     250             :     Assert(BarrierPhase(build_barrier) >= PHJ_BUILD_ALLOCATE);
     251         414 :     switch (BarrierPhase(build_barrier))
     252             :     {
     253         194 :         case PHJ_BUILD_ALLOCATE:
     254             : 
     255             :             /*
     256             :              * Either I just allocated the initial hash table in
     257             :              * ExecHashTableCreate(), or someone else is doing that.  Either
     258             :              * way, wait for everyone to arrive here so we can proceed.
     259             :              */
     260         194 :             BarrierArriveAndWait(build_barrier, WAIT_EVENT_HASH_BUILD_ALLOCATE);
     261             :             /* Fall through. */
     262             : 
     263         290 :         case PHJ_BUILD_HASH_INNER:
     264             : 
     265             :             /*
     266             :              * It's time to begin hashing, or if we just arrived here then
     267             :              * hashing is already underway, so join in that effort.  While
     268             :              * hashing we have to be prepared to help increase the number of
     269             :              * batches or buckets at any time, and if we arrived here when
     270             :              * that was already underway we'll have to help complete that work
     271             :              * immediately so that it's safe to access batches and buckets
     272             :              * below.
     273             :              */
     274         290 :             if (PHJ_GROW_BATCHES_PHASE(BarrierAttach(&pstate->grow_batches_barrier)) !=
     275             :                 PHJ_GROW_BATCHES_ELECT)
     276           0 :                 ExecParallelHashIncreaseNumBatches(hashtable);
     277         290 :             if (PHJ_GROW_BUCKETS_PHASE(BarrierAttach(&pstate->grow_buckets_barrier)) !=
     278             :                 PHJ_GROW_BUCKETS_ELECT)
     279           0 :                 ExecParallelHashIncreaseNumBuckets(hashtable);
     280         290 :             ExecParallelHashEnsureBatchAccessors(hashtable);
     281         290 :             ExecParallelHashTableSetCurrentBatch(hashtable, 0);
     282             :             for (;;)
     283     2160192 :             {
     284             :                 bool        isnull;
     285             : 
     286     2160482 :                 slot = ExecProcNode(outerNode);
     287     2160482 :                 if (TupIsNull(slot))
     288             :                     break;
     289     2160192 :                 econtext->ecxt_outertuple = slot;
     290             : 
     291     2160192 :                 ResetExprContext(econtext);
     292             : 
     293     2160192 :                 hashvalue = DatumGetUInt32(ExecEvalExprSwitchContext(node->hash_expr,
     294             :                                                                      econtext,
     295             :                                                                      &isnull));
     296             : 
     297     2160192 :                 if (!isnull)
     298     2160192 :                     ExecParallelHashTableInsert(hashtable, slot, hashvalue);
     299     2160192 :                 hashtable->partialTuples++;
     300             :             }
     301             : 
     302             :             /*
     303             :              * Make sure that any tuples we wrote to disk are visible to
     304             :              * others before anyone tries to load them.
     305             :              */
     306        1296 :             for (i = 0; i < hashtable->nbatch; ++i)
     307        1006 :                 sts_end_write(hashtable->batches[i].inner_tuples);
     308             : 
     309             :             /*
     310             :              * Update shared counters.  We need an accurate total tuple count
     311             :              * to control the empty table optimization.
     312             :              */
     313         290 :             ExecParallelHashMergeCounters(hashtable);
     314             : 
     315         290 :             BarrierDetach(&pstate->grow_buckets_barrier);
     316         290 :             BarrierDetach(&pstate->grow_batches_barrier);
     317             : 
     318             :             /*
     319             :              * Wait for everyone to finish building and flushing files and
     320             :              * counters.
     321             :              */
     322         290 :             if (BarrierArriveAndWait(build_barrier,
     323             :                                      WAIT_EVENT_HASH_BUILD_HASH_INNER))
     324             :             {
     325             :                 /*
     326             :                  * Elect one backend to disable any further growth.  Batches
     327             :                  * are now fixed.  While building them we made sure they'd fit
     328             :                  * in our memory budget when we load them back in later (or we
     329             :                  * tried to do that and gave up because we detected extreme
     330             :                  * skew).
     331             :                  */
     332         174 :                 pstate->growth = PHJ_GROWTH_DISABLED;
     333             :             }
     334             :     }
     335             : 
     336             :     /*
     337             :      * We're not yet attached to a batch.  We all agree on the dimensions and
     338             :      * number of inner tuples (for the empty table optimization).
     339             :      */
     340         414 :     hashtable->curbatch = -1;
     341         414 :     hashtable->nbuckets = pstate->nbuckets;
     342         414 :     hashtable->log2_nbuckets = pg_ceil_log2_32(hashtable->nbuckets);
     343         414 :     hashtable->totalTuples = pstate->total_tuples;
     344             : 
     345             :     /*
     346             :      * Unless we're completely done and the batch state has been freed, make
     347             :      * sure we have accessors.
     348             :      */
     349         414 :     if (BarrierPhase(build_barrier) < PHJ_BUILD_FREE)
     350         414 :         ExecParallelHashEnsureBatchAccessors(hashtable);
     351             : 
     352             :     /*
     353             :      * The next synchronization point is in ExecHashJoin's HJ_BUILD_HASHTABLE
     354             :      * case, which will bring the build phase to PHJ_BUILD_RUN (if it isn't
     355             :      * there already).
     356             :      */
     357             :     Assert(BarrierPhase(build_barrier) == PHJ_BUILD_HASH_OUTER ||
     358             :            BarrierPhase(build_barrier) == PHJ_BUILD_RUN ||
     359             :            BarrierPhase(build_barrier) == PHJ_BUILD_FREE);
     360         414 : }
     361             : 
     362             : /* ----------------------------------------------------------------
     363             :  *      ExecInitHash
     364             :  *
     365             :  *      Init routine for Hash node
     366             :  * ----------------------------------------------------------------
     367             :  */
     368             : HashState *
     369       35110 : ExecInitHash(Hash *node, EState *estate, int eflags)
     370             : {
     371             :     HashState  *hashstate;
     372             : 
     373             :     /* check for unsupported flags */
     374             :     Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
     375             : 
     376             :     /*
     377             :      * create state structure
     378             :      */
     379       35110 :     hashstate = makeNode(HashState);
     380       35110 :     hashstate->ps.plan = (Plan *) node;
     381       35110 :     hashstate->ps.state = estate;
     382       35110 :     hashstate->ps.ExecProcNode = ExecHash;
     383             :     /* delay building hashtable until ExecHashTableCreate() in executor run */
     384       35110 :     hashstate->hashtable = NULL;
     385             : 
     386             :     /*
     387             :      * Miscellaneous initialization
     388             :      *
     389             :      * create expression context for node
     390             :      */
     391       35110 :     ExecAssignExprContext(estate, &hashstate->ps);
     392             : 
     393             :     /*
     394             :      * initialize child nodes
     395             :      */
     396       35110 :     outerPlanState(hashstate) = ExecInitNode(outerPlan(node), estate, eflags);
     397             : 
     398             :     /*
     399             :      * initialize our result slot and type. No need to build projection
     400             :      * because this node doesn't do projections.
     401             :      */
     402       35110 :     ExecInitResultTupleSlotTL(&hashstate->ps, &TTSOpsMinimalTuple);
     403       35110 :     hashstate->ps.ps_ProjInfo = NULL;
     404             : 
     405             :     Assert(node->plan.qual == NIL);
     406             : 
     407             :     /*
     408             :      * Delay initialization of hash_expr until ExecInitHashJoin().  We cannot
     409             :      * build the ExprState here as we don't yet know the join type we're going
     410             :      * to be hashing values for and we need to know that before calling
     411             :      * ExecBuildHash32Expr as the keep_nulls parameter depends on the join
     412             :      * type.
     413             :      */
     414       35110 :     hashstate->hash_expr = NULL;
     415             : 
     416       35110 :     return hashstate;
     417             : }
     418             : 
     419             : /* ---------------------------------------------------------------
     420             :  *      ExecEndHash
     421             :  *
     422             :  *      clean up routine for Hash node
     423             :  * ----------------------------------------------------------------
     424             :  */
     425             : void
     426       34998 : ExecEndHash(HashState *node)
     427             : {
     428             :     PlanState  *outerPlan;
     429             : 
     430             :     /*
     431             :      * shut down the subplan
     432             :      */
     433       34998 :     outerPlan = outerPlanState(node);
     434       34998 :     ExecEndNode(outerPlan);
     435       34998 : }
     436             : 
     437             : 
     438             : /* ----------------------------------------------------------------
     439             :  *      ExecHashTableCreate
     440             :  *
     441             :  *      create an empty hashtable data structure for hashjoin.
     442             :  * ----------------------------------------------------------------
     443             :  */
     444             : HashJoinTable
     445       25488 : ExecHashTableCreate(HashState *state)
     446             : {
     447             :     Hash       *node;
     448             :     HashJoinTable hashtable;
     449             :     Plan       *outerNode;
     450             :     size_t      space_allowed;
     451             :     int         nbuckets;
     452             :     int         nbatch;
     453             :     double      rows;
     454             :     int         num_skew_mcvs;
     455             :     int         log2_nbuckets;
     456             :     MemoryContext oldcxt;
     457             : 
     458             :     /*
     459             :      * Get information about the size of the relation to be hashed (it's the
     460             :      * "outer" subtree of this node, but the inner relation of the hashjoin).
     461             :      * Compute the appropriate size of the hash table.
     462             :      */
     463       25488 :     node = (Hash *) state->ps.plan;
     464       25488 :     outerNode = outerPlan(node);
     465             : 
     466             :     /*
     467             :      * If this is shared hash table with a partial plan, then we can't use
     468             :      * outerNode->plan_rows to estimate its size.  We need an estimate of the
     469             :      * total number of rows across all copies of the partial plan.
     470             :      */
     471       25488 :     rows = node->plan.parallel_aware ? node->rows_total : outerNode->plan_rows;
     472             : 
     473       25074 :     ExecChooseHashTableSize(rows, outerNode->plan_width,
     474       25488 :                             OidIsValid(node->skewTable),
     475       25488 :                             state->parallel_state != NULL,
     476       25488 :                             state->parallel_state != NULL ?
     477         414 :                             state->parallel_state->nparticipants - 1 : 0,
     478             :                             &space_allowed,
     479             :                             &nbuckets, &nbatch, &num_skew_mcvs);
     480             : 
     481             :     /* nbuckets must be a power of 2 */
     482       25488 :     log2_nbuckets = pg_ceil_log2_32(nbuckets);
     483             :     Assert(nbuckets == (1 << log2_nbuckets));
     484             : 
     485             :     /*
     486             :      * Initialize the hash table control block.
     487             :      *
     488             :      * The hashtable control block is just palloc'd from the executor's
     489             :      * per-query memory context.  Everything else should be kept inside the
     490             :      * subsidiary hashCxt, batchCxt or spillCxt.
     491             :      */
     492       25488 :     hashtable = palloc_object(HashJoinTableData);
     493       25488 :     hashtable->nbuckets = nbuckets;
     494       25488 :     hashtable->nbuckets_original = nbuckets;
     495       25488 :     hashtable->nbuckets_optimal = nbuckets;
     496       25488 :     hashtable->log2_nbuckets = log2_nbuckets;
     497       25488 :     hashtable->log2_nbuckets_optimal = log2_nbuckets;
     498       25488 :     hashtable->buckets.unshared = NULL;
     499       25488 :     hashtable->skewEnabled = false;
     500       25488 :     hashtable->skewBucket = NULL;
     501       25488 :     hashtable->skewBucketLen = 0;
     502       25488 :     hashtable->nSkewBuckets = 0;
     503       25488 :     hashtable->skewBucketNums = NULL;
     504       25488 :     hashtable->nbatch = nbatch;
     505       25488 :     hashtable->curbatch = 0;
     506       25488 :     hashtable->nbatch_original = nbatch;
     507       25488 :     hashtable->nbatch_outstart = nbatch;
     508       25488 :     hashtable->growEnabled = true;
     509       25488 :     hashtable->totalTuples = 0;
     510       25488 :     hashtable->partialTuples = 0;
     511       25488 :     hashtable->skewTuples = 0;
     512       25488 :     hashtable->innerBatchFile = NULL;
     513       25488 :     hashtable->outerBatchFile = NULL;
     514       25488 :     hashtable->spaceUsed = 0;
     515       25488 :     hashtable->spacePeak = 0;
     516       25488 :     hashtable->spaceAllowed = space_allowed;
     517       25488 :     hashtable->spaceUsedSkew = 0;
     518       25488 :     hashtable->spaceAllowedSkew =
     519       25488 :         hashtable->spaceAllowed * SKEW_HASH_MEM_PERCENT / 100;
     520       25488 :     hashtable->chunks = NULL;
     521       25488 :     hashtable->current_chunk = NULL;
     522       25488 :     hashtable->parallel_state = state->parallel_state;
     523       25488 :     hashtable->area = state->ps.state->es_query_dsa;
     524       25488 :     hashtable->batches = NULL;
     525             : 
     526             : #ifdef HJDEBUG
     527             :     printf("Hashjoin %p: initial nbatch = %d, nbuckets = %d\n",
     528             :            hashtable, nbatch, nbuckets);
     529             : #endif
     530             : 
     531             :     /*
     532             :      * Create temporary memory contexts in which to keep the hashtable working
     533             :      * storage.  See notes in executor/hashjoin.h.
     534             :      */
     535       25488 :     hashtable->hashCxt = AllocSetContextCreate(CurrentMemoryContext,
     536             :                                                "HashTableContext",
     537             :                                                ALLOCSET_DEFAULT_SIZES);
     538             : 
     539       25488 :     hashtable->batchCxt = AllocSetContextCreate(hashtable->hashCxt,
     540             :                                                 "HashBatchContext",
     541             :                                                 ALLOCSET_DEFAULT_SIZES);
     542             : 
     543       25488 :     hashtable->spillCxt = AllocSetContextCreate(hashtable->hashCxt,
     544             :                                                 "HashSpillContext",
     545             :                                                 ALLOCSET_DEFAULT_SIZES);
     546             : 
     547             :     /* Allocate data that will live for the life of the hashjoin */
     548             : 
     549       25488 :     oldcxt = MemoryContextSwitchTo(hashtable->hashCxt);
     550             : 
     551       25488 :     if (nbatch > 1 && hashtable->parallel_state == NULL)
     552             :     {
     553             :         MemoryContext oldctx;
     554             : 
     555             :         /*
     556             :          * allocate and initialize the file arrays in hashCxt (not needed for
     557             :          * parallel case which uses shared tuplestores instead of raw files)
     558             :          */
     559         126 :         oldctx = MemoryContextSwitchTo(hashtable->spillCxt);
     560             : 
     561         126 :         hashtable->innerBatchFile = palloc0_array(BufFile *, nbatch);
     562         126 :         hashtable->outerBatchFile = palloc0_array(BufFile *, nbatch);
     563             : 
     564         126 :         MemoryContextSwitchTo(oldctx);
     565             : 
     566             :         /* The files will not be opened until needed... */
     567             :         /* ... but make sure we have temp tablespaces established for them */
     568         126 :         PrepareTempTablespaces();
     569             :     }
     570             : 
     571       25488 :     MemoryContextSwitchTo(oldcxt);
     572             : 
     573       25488 :     if (hashtable->parallel_state)
     574             :     {
     575         414 :         ParallelHashJoinState *pstate = hashtable->parallel_state;
     576             :         Barrier    *build_barrier;
     577             : 
     578             :         /*
     579             :          * Attach to the build barrier.  The corresponding detach operation is
     580             :          * in ExecHashTableDetach.  Note that we won't attach to the
     581             :          * batch_barrier for batch 0 yet.  We'll attach later and start it out
     582             :          * in PHJ_BATCH_PROBE phase, because batch 0 is allocated up front and
     583             :          * then loaded while hashing (the standard hybrid hash join
     584             :          * algorithm), and we'll coordinate that using build_barrier.
     585             :          */
     586         414 :         build_barrier = &pstate->build_barrier;
     587         414 :         BarrierAttach(build_barrier);
     588             : 
     589             :         /*
     590             :          * So far we have no idea whether there are any other participants,
     591             :          * and if so, what phase they are working on.  The only thing we care
     592             :          * about at this point is whether someone has already created the
     593             :          * SharedHashJoinBatch objects and the hash table for batch 0.  One
     594             :          * backend will be elected to do that now if necessary.
     595             :          */
     596         592 :         if (BarrierPhase(build_barrier) == PHJ_BUILD_ELECT &&
     597         178 :             BarrierArriveAndWait(build_barrier, WAIT_EVENT_HASH_BUILD_ELECT))
     598             :         {
     599         174 :             pstate->nbatch = nbatch;
     600         174 :             pstate->space_allowed = space_allowed;
     601         174 :             pstate->growth = PHJ_GROWTH_OK;
     602             : 
     603             :             /* Set up the shared state for coordinating batches. */
     604         174 :             ExecParallelHashJoinSetUpBatches(hashtable, nbatch);
     605             : 
     606             :             /*
     607             :              * Allocate batch 0's hash table up front so we can load it
     608             :              * directly while hashing.
     609             :              */
     610         174 :             pstate->nbuckets = nbuckets;
     611         174 :             ExecParallelHashTableAlloc(hashtable, 0);
     612             :         }
     613             : 
     614             :         /*
     615             :          * The next Parallel Hash synchronization point is in
     616             :          * MultiExecParallelHash(), which will progress it all the way to
     617             :          * PHJ_BUILD_RUN.  The caller must not return control from this
     618             :          * executor node between now and then.
     619             :          */
     620             :     }
     621             :     else
     622             :     {
     623             :         /*
     624             :          * Prepare context for the first-scan space allocations; allocate the
     625             :          * hashbucket array therein, and set each bucket "empty".
     626             :          */
     627       25074 :         MemoryContextSwitchTo(hashtable->batchCxt);
     628             : 
     629       25074 :         hashtable->buckets.unshared = palloc0_array(HashJoinTuple, nbuckets);
     630             : 
     631             :         /*
     632             :          * Set up for skew optimization, if possible and there's a need for
     633             :          * more than one batch.  (In a one-batch join, there's no point in
     634             :          * it.)
     635             :          */
     636       25074 :         if (nbatch > 1)
     637         126 :             ExecHashBuildSkewHash(state, hashtable, node, num_skew_mcvs);
     638             : 
     639       25074 :         MemoryContextSwitchTo(oldcxt);
     640             :     }
     641             : 
     642       25488 :     return hashtable;
     643             : }
     644             : 
     645             : 
     646             : /*
     647             :  * Compute appropriate size for hashtable given the estimated size of the
     648             :  * relation to be hashed (number of rows and average row width).
     649             :  *
     650             :  * This is exported so that the planner's costsize.c can use it.
     651             :  */
     652             : 
     653             : /* Target bucket loading (tuples per bucket) */
     654             : #define NTUP_PER_BUCKET         1
     655             : 
     656             : void
     657      898818 : ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
     658             :                         bool try_combined_hash_mem,
     659             :                         int parallel_workers,
     660             :                         size_t *space_allowed,
     661             :                         int *numbuckets,
     662             :                         int *numbatches,
     663             :                         int *num_skew_mcvs)
     664             : {
     665             :     int         tupsize;
     666             :     double      inner_rel_bytes;
     667             :     size_t      hash_table_bytes;
     668             :     size_t      bucket_bytes;
     669             :     size_t      max_pointers;
     670      898818 :     int         nbatch = 1;
     671             :     int         nbuckets;
     672             :     double      dbuckets;
     673             : 
     674             :     /* Force a plausible relation size if no info */
     675      898818 :     if (ntuples <= 0.0)
     676         150 :         ntuples = 1000.0;
     677             : 
     678             :     /*
     679             :      * Estimate tupsize based on footprint of tuple in hashtable... note this
     680             :      * does not allow for any palloc overhead.  The manipulations of spaceUsed
     681             :      * don't count palloc overhead either.
     682             :      */
     683      898818 :     tupsize = HJTUPLE_OVERHEAD +
     684      898818 :         MAXALIGN(SizeofMinimalTupleHeader) +
     685      898818 :         MAXALIGN(tupwidth);
     686      898818 :     inner_rel_bytes = ntuples * tupsize;
     687             : 
     688             :     /*
     689             :      * Compute in-memory hashtable size limit from GUCs.
     690             :      */
     691      898818 :     hash_table_bytes = get_hash_memory_limit();
     692             : 
     693             :     /*
     694             :      * Parallel Hash tries to use the combined hash_mem of all workers to
     695             :      * avoid the need to batch.  If that won't work, it falls back to hash_mem
     696             :      * per worker and tries to process batches in parallel.
     697             :      */
     698      898818 :     if (try_combined_hash_mem)
     699             :     {
     700             :         /* Careful, this could overflow size_t */
     701             :         double      newlimit;
     702             : 
     703       75570 :         newlimit = (double) hash_table_bytes * (double) (parallel_workers + 1);
     704       75570 :         newlimit = Min(newlimit, (double) SIZE_MAX);
     705       75570 :         hash_table_bytes = (size_t) newlimit;
     706             :     }
     707             : 
     708      898818 :     *space_allowed = hash_table_bytes;
     709             : 
     710             :     /*
     711             :      * If skew optimization is possible, estimate the number of skew buckets
     712             :      * that will fit in the memory allowed, and decrement the assumed space
     713             :      * available for the main hash table accordingly.
     714             :      *
     715             :      * We make the optimistic assumption that each skew bucket will contain
     716             :      * one inner-relation tuple.  If that turns out to be low, we will recover
     717             :      * at runtime by reducing the number of skew buckets.
     718             :      *
     719             :      * hashtable->skewBucket will have up to 8 times as many HashSkewBucket
     720             :      * pointers as the number of MCVs we allow, since ExecHashBuildSkewHash
     721             :      * will round up to the next power of 2 and then multiply by 4 to reduce
     722             :      * collisions.
     723             :      */
     724      898818 :     if (useskew)
     725             :     {
     726             :         size_t      bytes_per_mcv;
     727             :         size_t      skew_mcvs;
     728             : 
     729             :         /*----------
     730             :          * Compute number of MCVs we could hold in hash_table_bytes
     731             :          *
     732             :          * Divisor is:
     733             :          * size of a hash tuple +
     734             :          * worst-case size of skewBucket[] per MCV +
     735             :          * size of skewBucketNums[] entry +
     736             :          * size of skew bucket struct itself
     737             :          *----------
     738             :          */
     739      893050 :         bytes_per_mcv = tupsize +
     740             :             (8 * sizeof(HashSkewBucket *)) +
     741      893050 :             sizeof(int) +
     742             :             SKEW_BUCKET_OVERHEAD;
     743      893050 :         skew_mcvs = hash_table_bytes / bytes_per_mcv;
     744             : 
     745             :         /*
     746             :          * Now scale by SKEW_HASH_MEM_PERCENT (we do it in this order so as
     747             :          * not to worry about size_t overflow in the multiplication)
     748             :          */
     749      893050 :         skew_mcvs = (skew_mcvs * SKEW_HASH_MEM_PERCENT) / 100;
     750             : 
     751             :         /* Now clamp to integer range */
     752      893050 :         skew_mcvs = Min(skew_mcvs, INT_MAX);
     753             : 
     754      893050 :         *num_skew_mcvs = (int) skew_mcvs;
     755             : 
     756             :         /* Reduce hash_table_bytes by the amount needed for the skew table */
     757      893050 :         if (skew_mcvs > 0)
     758      893050 :             hash_table_bytes -= skew_mcvs * bytes_per_mcv;
     759             :     }
     760             :     else
     761        5768 :         *num_skew_mcvs = 0;
     762             : 
     763             :     /*
     764             :      * Set nbuckets to achieve an average bucket load of NTUP_PER_BUCKET when
     765             :      * memory is filled, assuming a single batch; but limit the value so that
     766             :      * the pointer arrays we'll try to allocate do not exceed hash_table_bytes
     767             :      * nor MaxAllocSize.
     768             :      *
     769             :      * Note that both nbuckets and nbatch must be powers of 2 to make
     770             :      * ExecHashGetBucketAndBatch fast.
     771             :      */
     772      898818 :     max_pointers = hash_table_bytes / sizeof(HashJoinTuple);
     773      898818 :     max_pointers = Min(max_pointers, MaxAllocSize / sizeof(HashJoinTuple));
     774             :     /* If max_pointers isn't a power of 2, must round it down to one */
     775      898818 :     max_pointers = pg_prevpower2_size_t(max_pointers);
     776             : 
     777             :     /* Also ensure we avoid integer overflow in nbatch and nbuckets */
     778             :     /* (this step is redundant given the current value of MaxAllocSize) */
     779      898818 :     max_pointers = Min(max_pointers, INT_MAX / 2 + 1);
     780             : 
     781      898818 :     dbuckets = ceil(ntuples / NTUP_PER_BUCKET);
     782      898818 :     dbuckets = Min(dbuckets, max_pointers);
     783      898818 :     nbuckets = (int) dbuckets;
     784             :     /* don't let nbuckets be really small, though ... */
     785      898818 :     nbuckets = Max(nbuckets, 1024);
     786             :     /* ... and force it to be a power of 2. */
     787      898818 :     nbuckets = pg_nextpower2_32(nbuckets);
     788             : 
     789             :     /*
     790             :      * If there's not enough space to store the projected number of tuples and
     791             :      * the required bucket headers, we will need multiple batches.
     792             :      */
     793      898818 :     bucket_bytes = sizeof(HashJoinTuple) * nbuckets;
     794      898818 :     if (inner_rel_bytes + bucket_bytes > hash_table_bytes)
     795             :     {
     796             :         /* We'll need multiple batches */
     797             :         size_t      sbuckets;
     798             :         double      dbatch;
     799             :         int         minbatch;
     800             :         size_t      bucket_size;
     801             : 
     802             :         /*
     803             :          * If Parallel Hash with combined hash_mem would still need multiple
     804             :          * batches, we'll have to fall back to regular hash_mem budget.
     805             :          */
     806        5166 :         if (try_combined_hash_mem)
     807             :         {
     808         246 :             ExecChooseHashTableSize(ntuples, tupwidth, useskew,
     809             :                                     false, parallel_workers,
     810             :                                     space_allowed,
     811             :                                     numbuckets,
     812             :                                     numbatches,
     813             :                                     num_skew_mcvs);
     814         246 :             return;
     815             :         }
     816             : 
     817             :         /*
     818             :          * Estimate the number of buckets we'll want to have when hash_mem is
     819             :          * entirely full.  Each bucket will contain a bucket pointer plus
     820             :          * NTUP_PER_BUCKET tuples, whose projected size already includes
     821             :          * overhead for the hash code, pointer to the next tuple, etc.
     822             :          */
     823        4920 :         bucket_size = (tupsize * NTUP_PER_BUCKET + sizeof(HashJoinTuple));
     824        4920 :         if (hash_table_bytes <= bucket_size)
     825           0 :             sbuckets = 1;       /* avoid pg_nextpower2_size_t(0) */
     826             :         else
     827        4920 :             sbuckets = pg_nextpower2_size_t(hash_table_bytes / bucket_size);
     828        4920 :         sbuckets = Min(sbuckets, max_pointers);
     829        4920 :         nbuckets = (int) sbuckets;
     830        4920 :         nbuckets = pg_nextpower2_32(nbuckets);
     831        4920 :         bucket_bytes = nbuckets * sizeof(HashJoinTuple);
     832             : 
     833             :         /*
     834             :          * Buckets are simple pointers to hashjoin tuples, while tupsize
     835             :          * includes the pointer, hash code, and MinimalTupleData.  So buckets
     836             :          * should never really exceed 25% of hash_mem (even for
     837             :          * NTUP_PER_BUCKET=1); except maybe for hash_mem values that are not
     838             :          * 2^N bytes, where we might get more because of doubling. So let's
     839             :          * look for 50% here.
     840             :          */
     841             :         Assert(bucket_bytes <= hash_table_bytes / 2);
     842             : 
     843             :         /* Calculate required number of batches. */
     844        4920 :         dbatch = ceil(inner_rel_bytes / (hash_table_bytes - bucket_bytes));
     845        4920 :         dbatch = Min(dbatch, max_pointers);
     846        4920 :         minbatch = (int) dbatch;
     847        4920 :         nbatch = pg_nextpower2_32(Max(2, minbatch));
     848             :     }
     849             : 
     850             :     /*
     851             :      * Optimize the total amount of memory consumed by the hash node.
     852             :      *
     853             :      * The nbatch calculation above focuses on the in-memory hash table,
     854             :      * assuming no per-batch overhead. But each batch may have two files, each
     855             :      * with a BLCKSZ buffer. For large nbatch values these buffers may use
     856             :      * significantly more memory than the hash table.
     857             :      *
     858             :      * The total memory usage may be expressed by this formula:
     859             :      *
     860             :      * (inner_rel_bytes / nbatch) + (2 * nbatch * BLCKSZ)
     861             :      *
     862             :      * where (inner_rel_bytes / nbatch) is the size of the in-memory hash
     863             :      * table and (2 * nbatch * BLCKSZ) is the amount of memory used by file
     864             :      * buffers.
     865             :      *
     866             :      * The nbatch calculation however ignores the second part. And for very
     867             :      * large inner_rel_bytes, there may be no nbatch that keeps total memory
     868             :      * usage under the budget (work_mem * hash_mem_multiplier). To deal with
     869             :      * that, we will adjust nbatch to minimize total memory consumption across
     870             :      * both the hashtable and file buffers.
     871             :      *
     872             :      * As we increase the size of the hashtable, the number of batches
     873             :      * decreases, and the total memory usage follows a U-shaped curve. We find
     874             :      * the minimum nbatch by "walking back" -- checking if halving nbatch
     875             :      * would lower the total memory usage. We stop when it no longer helps.
     876             :      *
     877             :      * We only reduce the number of batches. Adding batches reduces memory
     878             :      * usage only when most of the memory is used by the hash table, with
     879             :      * total memory usage within the limit or not far from it. We don't want
     880             :      * to start batching when not needed, even if that would reduce memory
     881             :      * usage.
     882             :      *
     883             :      * While growing the hashtable, we also adjust the number of buckets to
     884             :      * maintain a load factor of NTUP_PER_BUCKET while squeezing tuples back
     885             :      * from batches into the hashtable.
     886             :      *
     887             :      * Note that we can only change nbuckets during initial hashtable sizing.
     888             :      * Once we start building the hash, nbuckets is fixed (we may still grow
     889             :      * the hash table).
     890             :      *
     891             :      * We double several parameters (space_allowed, nbuckets, num_skew_mcvs),
     892             :      * which introduces a risk of overflow. We avoid this by exiting the loop.
     893             :      * We could do something smarter (e.g. capping nbuckets and continue), but
     894             :      * the complexity is not worth it. Such cases are extremely rare, and this
     895             :      * is a best-effort attempt to reduce memory usage.
     896             :      */
     897      899406 :     while (nbatch > 1)
     898             :     {
     899             :         /* Check that buckets wont't overflow MaxAllocSize */
     900        5754 :         if (nbuckets > (MaxAllocSize / sizeof(HashJoinTuple) / 2))
     901           0 :             break;
     902             : 
     903             :         /* num_skew_mcvs should be less than nbuckets */
     904             :         Assert((*num_skew_mcvs) < (INT_MAX / 2));
     905             : 
     906             :         /*
     907             :          * Check that space_allowed won't overlow SIZE_MAX.
     908             :          *
     909             :          * We don't use hash_table_bytes here, because it does not include the
     910             :          * skew buckets. And we want to limit the overall memory limit.
     911             :          */
     912        5754 :         if ((*space_allowed) > (SIZE_MAX / 2))
     913           0 :             break;
     914             : 
     915             :         /*
     916             :          * Will halving the number of batches and doubling the size of the
     917             :          * hashtable reduce overall memory usage?
     918             :          *
     919             :          * This is the same as (S = space_allowed):
     920             :          *
     921             :          * (S + 2 * nbatch * BLCKSZ) < (S * 2 + nbatch * BLCKSZ)
     922             :          *
     923             :          * but avoiding intermediate overflow.
     924             :          */
     925        5754 :         if (nbatch < (*space_allowed) / BLCKSZ)
     926        4920 :             break;
     927             : 
     928             :         /*
     929             :          * MaxAllocSize is sufficiently small that we are not worried about
     930             :          * overflowing nbuckets.
     931             :          */
     932         834 :         nbuckets *= 2;
     933             : 
     934         834 :         *num_skew_mcvs = (*num_skew_mcvs) * 2;
     935         834 :         *space_allowed = (*space_allowed) * 2;
     936             : 
     937         834 :         nbatch /= 2;
     938             :     }
     939             : 
     940             :     Assert(nbuckets > 0);
     941             :     Assert(nbatch > 0);
     942             : 
     943      898572 :     *numbuckets = nbuckets;
     944      898572 :     *numbatches = nbatch;
     945             : }
     946             : 
     947             : 
     948             : /* ----------------------------------------------------------------
     949             :  *      ExecHashTableDestroy
     950             :  *
     951             :  *      destroy a hash table
     952             :  * ----------------------------------------------------------------
     953             :  */
     954             : void
     955       25378 : ExecHashTableDestroy(HashJoinTable hashtable)
     956             : {
     957             :     int         i;
     958             : 
     959             :     /*
     960             :      * Make sure all the temp files are closed.  We skip batch 0, since it
     961             :      * can't have any temp files (and the arrays might not even exist if
     962             :      * nbatch is only 1).  Parallel hash joins don't use these files.
     963             :      */
     964       25378 :     if (hashtable->innerBatchFile != NULL)
     965             :     {
     966        1172 :         for (i = 1; i < hashtable->nbatch; i++)
     967             :         {
     968         952 :             if (hashtable->innerBatchFile[i])
     969           0 :                 BufFileClose(hashtable->innerBatchFile[i]);
     970         952 :             if (hashtable->outerBatchFile[i])
     971           0 :                 BufFileClose(hashtable->outerBatchFile[i]);
     972             :         }
     973             :     }
     974             : 
     975             :     /* Release working memory (batchCxt is a child, so it goes away too) */
     976       25378 :     MemoryContextDelete(hashtable->hashCxt);
     977             : 
     978             :     /* And drop the control block */
     979       25378 :     pfree(hashtable);
     980       25378 : }
     981             : 
     982             : /*
     983             :  * Consider adjusting the allowed hash table size, depending on the number
     984             :  * of batches, to minimize the overall memory usage (for both the hashtable
     985             :  * and batch files).
     986             :  *
     987             :  * We're adjusting the size of the hash table, not the (optimal) number of
     988             :  * buckets. We can't change that once we start building the hash, due to how
     989             :  * ExecHashGetBucketAndBatch calculates batchno/bucketno from the hash. This
     990             :  * means the load factor may not be optimal, but we're in damage control so
     991             :  * we accept slower lookups. It's still much better than batch explosion.
     992             :  *
     993             :  * Returns true if we chose to increase the batch size (and thus we don't
     994             :  * need to add batches), and false if we should increase nbatch.
     995             :  */
     996             : static bool
     997         196 : ExecHashIncreaseBatchSize(HashJoinTable hashtable)
     998             : {
     999             :     /*
    1000             :      * How much additional memory would doubling nbatch use? Each batch may
    1001             :      * require two buffered files (inner/outer), with a BLCKSZ buffer.
    1002             :      */
    1003         196 :     size_t      batchSpace = (hashtable->nbatch * 2 * (size_t) BLCKSZ);
    1004             : 
    1005             :     /*
    1006             :      * Compare the new space needed for doubling nbatch and for enlarging the
    1007             :      * in-memory hash table. If doubling the hash table needs less memory,
    1008             :      * just do that. Otherwise, continue with doubling the nbatch.
    1009             :      *
    1010             :      * We're either doubling spaceAllowed or batchSpace, so which of those
    1011             :      * increases the memory usage the least is the same as comparing the
    1012             :      * values directly.
    1013             :      */
    1014         196 :     if (hashtable->spaceAllowed <= batchSpace)
    1015             :     {
    1016           0 :         hashtable->spaceAllowed *= 2;
    1017           0 :         return true;
    1018             :     }
    1019             : 
    1020         196 :     return false;
    1021             : }
    1022             : 
    1023             : /*
    1024             :  * ExecHashIncreaseNumBatches
    1025             :  *      increase the original number of batches in order to reduce
    1026             :  *      current memory consumption
    1027             :  */
    1028             : static void
    1029      794616 : ExecHashIncreaseNumBatches(HashJoinTable hashtable)
    1030             : {
    1031      794616 :     int         oldnbatch = hashtable->nbatch;
    1032      794616 :     int         curbatch = hashtable->curbatch;
    1033             :     int         nbatch;
    1034             :     long        ninmemory;
    1035             :     long        nfreed;
    1036             :     HashMemoryChunk oldchunks;
    1037             : 
    1038             :     /* do nothing if we've decided to shut off growth */
    1039      794616 :     if (!hashtable->growEnabled)
    1040      794420 :         return;
    1041             : 
    1042             :     /* safety check to avoid overflow */
    1043         196 :     if (oldnbatch > Min(INT_MAX / 2, MaxAllocSize / (sizeof(void *) * 2)))
    1044           0 :         return;
    1045             : 
    1046             :     /* consider increasing size of the in-memory hash table instead */
    1047         196 :     if (ExecHashIncreaseBatchSize(hashtable))
    1048           0 :         return;
    1049             : 
    1050         196 :     nbatch = oldnbatch * 2;
    1051             :     Assert(nbatch > 1);
    1052             : 
    1053             : #ifdef HJDEBUG
    1054             :     printf("Hashjoin %p: increasing nbatch to %d because space = %zu\n",
    1055             :            hashtable, nbatch, hashtable->spaceUsed);
    1056             : #endif
    1057             : 
    1058         196 :     if (hashtable->innerBatchFile == NULL)
    1059             :     {
    1060          94 :         MemoryContext oldcxt = MemoryContextSwitchTo(hashtable->spillCxt);
    1061             : 
    1062             :         /* we had no file arrays before */
    1063          94 :         hashtable->innerBatchFile = palloc0_array(BufFile *, nbatch);
    1064          94 :         hashtable->outerBatchFile = palloc0_array(BufFile *, nbatch);
    1065             : 
    1066          94 :         MemoryContextSwitchTo(oldcxt);
    1067             : 
    1068             :         /* time to establish the temp tablespaces, too */
    1069          94 :         PrepareTempTablespaces();
    1070             :     }
    1071             :     else
    1072             :     {
    1073             :         /* enlarge arrays and zero out added entries */
    1074         102 :         hashtable->innerBatchFile = repalloc0_array(hashtable->innerBatchFile, BufFile *, oldnbatch, nbatch);
    1075         102 :         hashtable->outerBatchFile = repalloc0_array(hashtable->outerBatchFile, BufFile *, oldnbatch, nbatch);
    1076             :     }
    1077             : 
    1078         196 :     hashtable->nbatch = nbatch;
    1079             : 
    1080             :     /*
    1081             :      * Scan through the existing hash table entries and dump out any that are
    1082             :      * no longer of the current batch.
    1083             :      */
    1084         196 :     ninmemory = nfreed = 0;
    1085             : 
    1086             :     /* If know we need to resize nbuckets, we can do it while rebatching. */
    1087         196 :     if (hashtable->nbuckets_optimal != hashtable->nbuckets)
    1088             :     {
    1089             :         /* we never decrease the number of buckets */
    1090             :         Assert(hashtable->nbuckets_optimal > hashtable->nbuckets);
    1091             : 
    1092          94 :         hashtable->nbuckets = hashtable->nbuckets_optimal;
    1093          94 :         hashtable->log2_nbuckets = hashtable->log2_nbuckets_optimal;
    1094             : 
    1095          94 :         hashtable->buckets.unshared =
    1096          94 :             repalloc_array(hashtable->buckets.unshared,
    1097             :                            HashJoinTuple, hashtable->nbuckets);
    1098             :     }
    1099             : 
    1100             :     /*
    1101             :      * We will scan through the chunks directly, so that we can reset the
    1102             :      * buckets now and not have to keep track which tuples in the buckets have
    1103             :      * already been processed. We will free the old chunks as we go.
    1104             :      */
    1105         196 :     memset(hashtable->buckets.unshared, 0,
    1106         196 :            sizeof(HashJoinTuple) * hashtable->nbuckets);
    1107         196 :     oldchunks = hashtable->chunks;
    1108         196 :     hashtable->chunks = NULL;
    1109             : 
    1110             :     /* so, let's scan through the old chunks, and all tuples in each chunk */
    1111         980 :     while (oldchunks != NULL)
    1112             :     {
    1113         784 :         HashMemoryChunk nextchunk = oldchunks->next.unshared;
    1114             : 
    1115             :         /* position within the buffer (up to oldchunks->used) */
    1116         784 :         size_t      idx = 0;
    1117             : 
    1118             :         /* process all tuples stored in this chunk (and then free it) */
    1119      535628 :         while (idx < oldchunks->used)
    1120             :         {
    1121      534844 :             HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(oldchunks) + idx);
    1122      534844 :             MinimalTuple tuple = HJTUPLE_MINTUPLE(hashTuple);
    1123      534844 :             int         hashTupleSize = (HJTUPLE_OVERHEAD + tuple->t_len);
    1124             :             int         bucketno;
    1125             :             int         batchno;
    1126             : 
    1127      534844 :             ninmemory++;
    1128      534844 :             ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
    1129             :                                       &bucketno, &batchno);
    1130             : 
    1131      534844 :             if (batchno == curbatch)
    1132             :             {
    1133             :                 /* keep tuple in memory - copy it into the new chunk */
    1134             :                 HashJoinTuple copyTuple;
    1135             : 
    1136      202866 :                 copyTuple = (HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
    1137      202866 :                 memcpy(copyTuple, hashTuple, hashTupleSize);
    1138             : 
    1139             :                 /* and add it back to the appropriate bucket */
    1140      202866 :                 copyTuple->next.unshared = hashtable->buckets.unshared[bucketno];
    1141      202866 :                 hashtable->buckets.unshared[bucketno] = copyTuple;
    1142             :             }
    1143             :             else
    1144             :             {
    1145             :                 /* dump it out */
    1146             :                 Assert(batchno > curbatch);
    1147      331978 :                 ExecHashJoinSaveTuple(HJTUPLE_MINTUPLE(hashTuple),
    1148             :                                       hashTuple->hashvalue,
    1149      331978 :                                       &hashtable->innerBatchFile[batchno],
    1150             :                                       hashtable);
    1151             : 
    1152      331978 :                 hashtable->spaceUsed -= hashTupleSize;
    1153      331978 :                 nfreed++;
    1154             :             }
    1155             : 
    1156             :             /* next tuple in this chunk */
    1157      534844 :             idx += MAXALIGN(hashTupleSize);
    1158             : 
    1159             :             /* allow this loop to be cancellable */
    1160      534844 :             CHECK_FOR_INTERRUPTS();
    1161             :         }
    1162             : 
    1163             :         /* we're done with this chunk - free it and proceed to the next one */
    1164         784 :         pfree(oldchunks);
    1165         784 :         oldchunks = nextchunk;
    1166             :     }
    1167             : 
    1168             : #ifdef HJDEBUG
    1169             :     printf("Hashjoin %p: freed %ld of %ld tuples, space now %zu\n",
    1170             :            hashtable, nfreed, ninmemory, hashtable->spaceUsed);
    1171             : #endif
    1172             : 
    1173             :     /*
    1174             :      * If we dumped out either all or none of the tuples in the table, disable
    1175             :      * further expansion of nbatch.  This situation implies that we have
    1176             :      * enough tuples of identical hashvalues to overflow spaceAllowed.
    1177             :      * Increasing nbatch will not fix it since there's no way to subdivide the
    1178             :      * group any more finely. We have to just gut it out and hope the server
    1179             :      * has enough RAM.
    1180             :      */
    1181         196 :     if (nfreed == 0 || nfreed == ninmemory)
    1182             :     {
    1183          46 :         hashtable->growEnabled = false;
    1184             : #ifdef HJDEBUG
    1185             :         printf("Hashjoin %p: disabling further increase of nbatch\n",
    1186             :                hashtable);
    1187             : #endif
    1188             :     }
    1189             : }
    1190             : 
    1191             : /*
    1192             :  * ExecParallelHashIncreaseNumBatches
    1193             :  *      Every participant attached to grow_batches_barrier must run this
    1194             :  *      function when it observes growth == PHJ_GROWTH_NEED_MORE_BATCHES.
    1195             :  */
    1196             : static void
    1197          54 : ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
    1198             : {
    1199          54 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    1200             : 
    1201             :     Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASH_INNER);
    1202             : 
    1203             :     /*
    1204             :      * It's unlikely, but we need to be prepared for new participants to show
    1205             :      * up while we're in the middle of this operation so we need to switch on
    1206             :      * barrier phase here.
    1207             :      */
    1208          54 :     switch (PHJ_GROW_BATCHES_PHASE(BarrierPhase(&pstate->grow_batches_barrier)))
    1209             :     {
    1210          54 :         case PHJ_GROW_BATCHES_ELECT:
    1211             : 
    1212             :             /*
    1213             :              * Elect one participant to prepare to grow the number of batches.
    1214             :              * This involves reallocating or resetting the buckets of batch 0
    1215             :              * in preparation for all participants to begin repartitioning the
    1216             :              * tuples.
    1217             :              */
    1218          54 :             if (BarrierArriveAndWait(&pstate->grow_batches_barrier,
    1219             :                                      WAIT_EVENT_HASH_GROW_BATCHES_ELECT))
    1220             :             {
    1221             :                 dsa_pointer_atomic *buckets;
    1222             :                 ParallelHashJoinBatch *old_batch0;
    1223             :                 int         new_nbatch;
    1224             :                 int         i;
    1225             : 
    1226             :                 /* Move the old batch out of the way. */
    1227          52 :                 old_batch0 = hashtable->batches[0].shared;
    1228          52 :                 pstate->old_batches = pstate->batches;
    1229          52 :                 pstate->old_nbatch = hashtable->nbatch;
    1230          52 :                 pstate->batches = InvalidDsaPointer;
    1231             : 
    1232             :                 /* Free this backend's old accessors. */
    1233          52 :                 ExecParallelHashCloseBatchAccessors(hashtable);
    1234             : 
    1235             :                 /* Figure out how many batches to use. */
    1236          52 :                 if (hashtable->nbatch == 1)
    1237             :                 {
    1238             :                     /*
    1239             :                      * We are going from single-batch to multi-batch.  We need
    1240             :                      * to switch from one large combined memory budget to the
    1241             :                      * regular hash_mem budget.
    1242             :                      */
    1243          36 :                     pstate->space_allowed = get_hash_memory_limit();
    1244             : 
    1245             :                     /*
    1246             :                      * The combined hash_mem of all participants wasn't
    1247             :                      * enough. Therefore one batch per participant would be
    1248             :                      * approximately equivalent and would probably also be
    1249             :                      * insufficient.  So try two batches per participant,
    1250             :                      * rounded up to a power of two.
    1251             :                      */
    1252          36 :                     new_nbatch = pg_nextpower2_32(pstate->nparticipants * 2);
    1253             :                 }
    1254             :                 else
    1255             :                 {
    1256             :                     /*
    1257             :                      * We were already multi-batched.  Try doubling the number
    1258             :                      * of batches.
    1259             :                      */
    1260          16 :                     new_nbatch = hashtable->nbatch * 2;
    1261             :                 }
    1262             : 
    1263             :                 /* Allocate new larger generation of batches. */
    1264             :                 Assert(hashtable->nbatch == pstate->nbatch);
    1265          52 :                 ExecParallelHashJoinSetUpBatches(hashtable, new_nbatch);
    1266             :                 Assert(hashtable->nbatch == pstate->nbatch);
    1267             : 
    1268             :                 /* Replace or recycle batch 0's bucket array. */
    1269          52 :                 if (pstate->old_nbatch == 1)
    1270             :                 {
    1271             :                     double      dtuples;
    1272             :                     double      dbuckets;
    1273             :                     int         new_nbuckets;
    1274             :                     uint32      max_buckets;
    1275             : 
    1276             :                     /*
    1277             :                      * We probably also need a smaller bucket array.  How many
    1278             :                      * tuples do we expect per batch, assuming we have only
    1279             :                      * half of them so far?  Normally we don't need to change
    1280             :                      * the bucket array's size, because the size of each batch
    1281             :                      * stays the same as we add more batches, but in this
    1282             :                      * special case we move from a large batch to many smaller
    1283             :                      * batches and it would be wasteful to keep the large
    1284             :                      * array.
    1285             :                      */
    1286          36 :                     dtuples = (old_batch0->ntuples * 2.0) / new_nbatch;
    1287             : 
    1288             :                     /*
    1289             :                      * We need to calculate the maximum number of buckets to
    1290             :                      * stay within the MaxAllocSize boundary.  Round the
    1291             :                      * maximum number to the previous power of 2 given that
    1292             :                      * later we round the number to the next power of 2.
    1293             :                      */
    1294          36 :                     max_buckets = pg_prevpower2_32((uint32)
    1295             :                                                    (MaxAllocSize / sizeof(dsa_pointer_atomic)));
    1296          36 :                     dbuckets = ceil(dtuples / NTUP_PER_BUCKET);
    1297          36 :                     dbuckets = Min(dbuckets, max_buckets);
    1298          36 :                     new_nbuckets = (int) dbuckets;
    1299          36 :                     new_nbuckets = Max(new_nbuckets, 1024);
    1300          36 :                     new_nbuckets = pg_nextpower2_32(new_nbuckets);
    1301          36 :                     dsa_free(hashtable->area, old_batch0->buckets);
    1302          72 :                     hashtable->batches[0].shared->buckets =
    1303          36 :                         dsa_allocate(hashtable->area,
    1304             :                                      sizeof(dsa_pointer_atomic) * new_nbuckets);
    1305             :                     buckets = (dsa_pointer_atomic *)
    1306          36 :                         dsa_get_address(hashtable->area,
    1307          36 :                                         hashtable->batches[0].shared->buckets);
    1308      110628 :                     for (i = 0; i < new_nbuckets; ++i)
    1309      110592 :                         dsa_pointer_atomic_init(&buckets[i], InvalidDsaPointer);
    1310          36 :                     pstate->nbuckets = new_nbuckets;
    1311             :                 }
    1312             :                 else
    1313             :                 {
    1314             :                     /* Recycle the existing bucket array. */
    1315          16 :                     hashtable->batches[0].shared->buckets = old_batch0->buckets;
    1316             :                     buckets = (dsa_pointer_atomic *)
    1317          16 :                         dsa_get_address(hashtable->area, old_batch0->buckets);
    1318       65552 :                     for (i = 0; i < hashtable->nbuckets; ++i)
    1319       65536 :                         dsa_pointer_atomic_write(&buckets[i], InvalidDsaPointer);
    1320             :                 }
    1321             : 
    1322             :                 /* Move all chunks to the work queue for parallel processing. */
    1323          52 :                 pstate->chunk_work_queue = old_batch0->chunks;
    1324             : 
    1325             :                 /* Disable further growth temporarily while we're growing. */
    1326          52 :                 pstate->growth = PHJ_GROWTH_DISABLED;
    1327             :             }
    1328             :             else
    1329             :             {
    1330             :                 /* All other participants just flush their tuples to disk. */
    1331           2 :                 ExecParallelHashCloseBatchAccessors(hashtable);
    1332             :             }
    1333             :             /* Fall through. */
    1334             : 
    1335             :         case PHJ_GROW_BATCHES_REALLOCATE:
    1336             :             /* Wait for the above to be finished. */
    1337          54 :             BarrierArriveAndWait(&pstate->grow_batches_barrier,
    1338             :                                  WAIT_EVENT_HASH_GROW_BATCHES_REALLOCATE);
    1339             :             /* Fall through. */
    1340             : 
    1341          54 :         case PHJ_GROW_BATCHES_REPARTITION:
    1342             :             /* Make sure that we have the current dimensions and buckets. */
    1343          54 :             ExecParallelHashEnsureBatchAccessors(hashtable);
    1344          54 :             ExecParallelHashTableSetCurrentBatch(hashtable, 0);
    1345             :             /* Then partition, flush counters. */
    1346          54 :             ExecParallelHashRepartitionFirst(hashtable);
    1347          54 :             ExecParallelHashRepartitionRest(hashtable);
    1348          54 :             ExecParallelHashMergeCounters(hashtable);
    1349             :             /* Wait for the above to be finished. */
    1350          54 :             BarrierArriveAndWait(&pstate->grow_batches_barrier,
    1351             :                                  WAIT_EVENT_HASH_GROW_BATCHES_REPARTITION);
    1352             :             /* Fall through. */
    1353             : 
    1354          54 :         case PHJ_GROW_BATCHES_DECIDE:
    1355             : 
    1356             :             /*
    1357             :              * Elect one participant to clean up and decide whether further
    1358             :              * repartitioning is needed, or should be disabled because it's
    1359             :              * not helping.
    1360             :              */
    1361          54 :             if (BarrierArriveAndWait(&pstate->grow_batches_barrier,
    1362             :                                      WAIT_EVENT_HASH_GROW_BATCHES_DECIDE))
    1363             :             {
    1364             :                 ParallelHashJoinBatch *old_batches;
    1365          52 :                 bool        space_exhausted = false;
    1366          52 :                 bool        extreme_skew_detected = false;
    1367             : 
    1368             :                 /* Make sure that we have the current dimensions and buckets. */
    1369          52 :                 ExecParallelHashEnsureBatchAccessors(hashtable);
    1370          52 :                 ExecParallelHashTableSetCurrentBatch(hashtable, 0);
    1371             : 
    1372          52 :                 old_batches = dsa_get_address(hashtable->area, pstate->old_batches);
    1373             : 
    1374             :                 /* Are any of the new generation of batches exhausted? */
    1375         372 :                 for (int i = 0; i < hashtable->nbatch; ++i)
    1376             :                 {
    1377             :                     ParallelHashJoinBatch *batch;
    1378             :                     ParallelHashJoinBatch *old_batch;
    1379             :                     int         parent;
    1380             : 
    1381         320 :                     batch = hashtable->batches[i].shared;
    1382         320 :                     if (batch->space_exhausted ||
    1383         320 :                         batch->estimated_size > pstate->space_allowed)
    1384          24 :                         space_exhausted = true;
    1385             : 
    1386         320 :                     parent = i % pstate->old_nbatch;
    1387         320 :                     old_batch = NthParallelHashJoinBatch(old_batches, parent);
    1388         320 :                     if (old_batch->space_exhausted ||
    1389          96 :                         batch->estimated_size > pstate->space_allowed)
    1390             :                     {
    1391             :                         /*
    1392             :                          * Did this batch receive ALL of the tuples from its
    1393             :                          * parent batch?  That would indicate that further
    1394             :                          * repartitioning isn't going to help (the hash values
    1395             :                          * are probably all the same).
    1396             :                          */
    1397         224 :                         if (batch->ntuples == hashtable->batches[parent].shared->old_ntuples)
    1398          24 :                             extreme_skew_detected = true;
    1399             :                     }
    1400             :                 }
    1401             : 
    1402             :                 /* Don't keep growing if it's not helping or we'd overflow. */
    1403          52 :                 if (extreme_skew_detected || hashtable->nbatch >= INT_MAX / 2)
    1404          24 :                     pstate->growth = PHJ_GROWTH_DISABLED;
    1405          28 :                 else if (space_exhausted)
    1406           0 :                     pstate->growth = PHJ_GROWTH_NEED_MORE_BATCHES;
    1407             :                 else
    1408          28 :                     pstate->growth = PHJ_GROWTH_OK;
    1409             : 
    1410             :                 /* Free the old batches in shared memory. */
    1411          52 :                 dsa_free(hashtable->area, pstate->old_batches);
    1412          52 :                 pstate->old_batches = InvalidDsaPointer;
    1413             :             }
    1414             :             /* Fall through. */
    1415             : 
    1416             :         case PHJ_GROW_BATCHES_FINISH:
    1417             :             /* Wait for the above to complete. */
    1418          54 :             BarrierArriveAndWait(&pstate->grow_batches_barrier,
    1419             :                                  WAIT_EVENT_HASH_GROW_BATCHES_FINISH);
    1420             :     }
    1421          54 : }
    1422             : 
    1423             : /*
    1424             :  * Repartition the tuples currently loaded into memory for inner batch 0
    1425             :  * because the number of batches has been increased.  Some tuples are retained
    1426             :  * in memory and some are written out to a later batch.
    1427             :  */
    1428             : static void
    1429          54 : ExecParallelHashRepartitionFirst(HashJoinTable hashtable)
    1430             : {
    1431             :     dsa_pointer chunk_shared;
    1432             :     HashMemoryChunk chunk;
    1433             : 
    1434             :     Assert(hashtable->nbatch == hashtable->parallel_state->nbatch);
    1435             : 
    1436         410 :     while ((chunk = ExecParallelHashPopChunkQueue(hashtable, &chunk_shared)))
    1437             :     {
    1438         302 :         size_t      idx = 0;
    1439             : 
    1440             :         /* Repartition all tuples in this chunk. */
    1441      230660 :         while (idx < chunk->used)
    1442             :         {
    1443      230358 :             HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
    1444      230358 :             MinimalTuple tuple = HJTUPLE_MINTUPLE(hashTuple);
    1445             :             HashJoinTuple copyTuple;
    1446             :             dsa_pointer shared;
    1447             :             int         bucketno;
    1448             :             int         batchno;
    1449             : 
    1450      230358 :             ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
    1451             :                                       &bucketno, &batchno);
    1452             : 
    1453             :             Assert(batchno < hashtable->nbatch);
    1454      230358 :             if (batchno == 0)
    1455             :             {
    1456             :                 /* It still belongs in batch 0.  Copy to a new chunk. */
    1457             :                 copyTuple =
    1458       55578 :                     ExecParallelHashTupleAlloc(hashtable,
    1459       55578 :                                                HJTUPLE_OVERHEAD + tuple->t_len,
    1460             :                                                &shared);
    1461       55578 :                 copyTuple->hashvalue = hashTuple->hashvalue;
    1462       55578 :                 memcpy(HJTUPLE_MINTUPLE(copyTuple), tuple, tuple->t_len);
    1463       55578 :                 ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
    1464             :                                           copyTuple, shared);
    1465             :             }
    1466             :             else
    1467             :             {
    1468      174780 :                 size_t      tuple_size =
    1469      174780 :                     MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
    1470             : 
    1471             :                 /* It belongs in a later batch. */
    1472      174780 :                 hashtable->batches[batchno].estimated_size += tuple_size;
    1473      174780 :                 sts_puttuple(hashtable->batches[batchno].inner_tuples,
    1474      174780 :                              &hashTuple->hashvalue, tuple);
    1475             :             }
    1476             : 
    1477             :             /* Count this tuple. */
    1478      230358 :             ++hashtable->batches[0].old_ntuples;
    1479      230358 :             ++hashtable->batches[batchno].ntuples;
    1480             : 
    1481      230358 :             idx += MAXALIGN(HJTUPLE_OVERHEAD +
    1482             :                             HJTUPLE_MINTUPLE(hashTuple)->t_len);
    1483             :         }
    1484             : 
    1485             :         /* Free this chunk. */
    1486         302 :         dsa_free(hashtable->area, chunk_shared);
    1487             : 
    1488         302 :         CHECK_FOR_INTERRUPTS();
    1489             :     }
    1490          54 : }
    1491             : 
    1492             : /*
    1493             :  * Help repartition inner batches 1..n.
    1494             :  */
    1495             : static void
    1496          54 : ExecParallelHashRepartitionRest(HashJoinTable hashtable)
    1497             : {
    1498          54 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    1499          54 :     int         old_nbatch = pstate->old_nbatch;
    1500             :     SharedTuplestoreAccessor **old_inner_tuples;
    1501             :     ParallelHashJoinBatch *old_batches;
    1502             :     int         i;
    1503             : 
    1504             :     /* Get our hands on the previous generation of batches. */
    1505             :     old_batches = (ParallelHashJoinBatch *)
    1506          54 :         dsa_get_address(hashtable->area, pstate->old_batches);
    1507          54 :     old_inner_tuples = palloc0_array(SharedTuplestoreAccessor *, old_nbatch);
    1508         108 :     for (i = 1; i < old_nbatch; ++i)
    1509             :     {
    1510          54 :         ParallelHashJoinBatch *shared =
    1511          54 :             NthParallelHashJoinBatch(old_batches, i);
    1512             : 
    1513          54 :         old_inner_tuples[i] = sts_attach(ParallelHashJoinBatchInner(shared),
    1514             :                                          ParallelWorkerNumber + 1,
    1515             :                                          &pstate->fileset);
    1516             :     }
    1517             : 
    1518             :     /* Join in the effort to repartition them. */
    1519         108 :     for (i = 1; i < old_nbatch; ++i)
    1520             :     {
    1521             :         MinimalTuple tuple;
    1522             :         uint32      hashvalue;
    1523             : 
    1524             :         /* Scan one partition from the previous generation. */
    1525          54 :         sts_begin_parallel_scan(old_inner_tuples[i]);
    1526      191040 :         while ((tuple = sts_parallel_scan_next(old_inner_tuples[i], &hashvalue)))
    1527             :         {
    1528      190986 :             size_t      tuple_size = MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
    1529             :             int         bucketno;
    1530             :             int         batchno;
    1531             : 
    1532             :             /* Decide which partition it goes to in the new generation. */
    1533      190986 :             ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno,
    1534             :                                       &batchno);
    1535             : 
    1536      190986 :             hashtable->batches[batchno].estimated_size += tuple_size;
    1537      190986 :             ++hashtable->batches[batchno].ntuples;
    1538      190986 :             ++hashtable->batches[i].old_ntuples;
    1539             : 
    1540             :             /* Store the tuple its new batch. */
    1541      190986 :             sts_puttuple(hashtable->batches[batchno].inner_tuples,
    1542             :                          &hashvalue, tuple);
    1543             : 
    1544      190986 :             CHECK_FOR_INTERRUPTS();
    1545             :         }
    1546          54 :         sts_end_parallel_scan(old_inner_tuples[i]);
    1547             :     }
    1548             : 
    1549          54 :     pfree(old_inner_tuples);
    1550          54 : }
    1551             : 
    1552             : /*
    1553             :  * Transfer the backend-local per-batch counters to the shared totals.
    1554             :  */
    1555             : static void
    1556         344 : ExecParallelHashMergeCounters(HashJoinTable hashtable)
    1557             : {
    1558         344 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    1559             :     int         i;
    1560             : 
    1561         344 :     LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
    1562         344 :     pstate->total_tuples = 0;
    1563        1686 :     for (i = 0; i < hashtable->nbatch; ++i)
    1564             :     {
    1565        1342 :         ParallelHashJoinBatchAccessor *batch = &hashtable->batches[i];
    1566             : 
    1567        1342 :         batch->shared->size += batch->size;
    1568        1342 :         batch->shared->estimated_size += batch->estimated_size;
    1569        1342 :         batch->shared->ntuples += batch->ntuples;
    1570        1342 :         batch->shared->old_ntuples += batch->old_ntuples;
    1571        1342 :         batch->size = 0;
    1572        1342 :         batch->estimated_size = 0;
    1573        1342 :         batch->ntuples = 0;
    1574        1342 :         batch->old_ntuples = 0;
    1575        1342 :         pstate->total_tuples += batch->shared->ntuples;
    1576             :     }
    1577         344 :     LWLockRelease(&pstate->lock);
    1578         344 : }
    1579             : 
    1580             : /*
    1581             :  * ExecHashIncreaseNumBuckets
    1582             :  *      increase the original number of buckets in order to reduce
    1583             :  *      number of tuples per bucket
    1584             :  */
    1585             : static void
    1586          72 : ExecHashIncreaseNumBuckets(HashJoinTable hashtable)
    1587             : {
    1588             :     HashMemoryChunk chunk;
    1589             : 
    1590             :     /* do nothing if not an increase (it's called increase for a reason) */
    1591          72 :     if (hashtable->nbuckets >= hashtable->nbuckets_optimal)
    1592           0 :         return;
    1593             : 
    1594             : #ifdef HJDEBUG
    1595             :     printf("Hashjoin %p: increasing nbuckets %d => %d\n",
    1596             :            hashtable, hashtable->nbuckets, hashtable->nbuckets_optimal);
    1597             : #endif
    1598             : 
    1599          72 :     hashtable->nbuckets = hashtable->nbuckets_optimal;
    1600          72 :     hashtable->log2_nbuckets = hashtable->log2_nbuckets_optimal;
    1601             : 
    1602             :     Assert(hashtable->nbuckets > 1);
    1603             :     Assert(hashtable->nbuckets <= (INT_MAX / 2));
    1604             :     Assert(hashtable->nbuckets == (1 << hashtable->log2_nbuckets));
    1605             : 
    1606             :     /*
    1607             :      * Just reallocate the proper number of buckets - we don't need to walk
    1608             :      * through them - we can walk the dense-allocated chunks (just like in
    1609             :      * ExecHashIncreaseNumBatches, but without all the copying into new
    1610             :      * chunks)
    1611             :      */
    1612          72 :     hashtable->buckets.unshared =
    1613          72 :         repalloc_array(hashtable->buckets.unshared,
    1614             :                        HashJoinTuple, hashtable->nbuckets);
    1615             : 
    1616          72 :     memset(hashtable->buckets.unshared, 0,
    1617          72 :            hashtable->nbuckets * sizeof(HashJoinTuple));
    1618             : 
    1619             :     /* scan through all tuples in all chunks to rebuild the hash table */
    1620        1008 :     for (chunk = hashtable->chunks; chunk != NULL; chunk = chunk->next.unshared)
    1621             :     {
    1622             :         /* process all tuples stored in this chunk */
    1623         936 :         size_t      idx = 0;
    1624             : 
    1625      720936 :         while (idx < chunk->used)
    1626             :         {
    1627      720000 :             HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
    1628             :             int         bucketno;
    1629             :             int         batchno;
    1630             : 
    1631      720000 :             ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
    1632             :                                       &bucketno, &batchno);
    1633             : 
    1634             :             /* add the tuple to the proper bucket */
    1635      720000 :             hashTuple->next.unshared = hashtable->buckets.unshared[bucketno];
    1636      720000 :             hashtable->buckets.unshared[bucketno] = hashTuple;
    1637             : 
    1638             :             /* advance index past the tuple */
    1639      720000 :             idx += MAXALIGN(HJTUPLE_OVERHEAD +
    1640             :                             HJTUPLE_MINTUPLE(hashTuple)->t_len);
    1641             :         }
    1642             : 
    1643             :         /* allow this loop to be cancellable */
    1644         936 :         CHECK_FOR_INTERRUPTS();
    1645             :     }
    1646             : }
    1647             : 
    1648             : static void
    1649         126 : ExecParallelHashIncreaseNumBuckets(HashJoinTable hashtable)
    1650             : {
    1651         126 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    1652             :     int         i;
    1653             :     HashMemoryChunk chunk;
    1654             :     dsa_pointer chunk_s;
    1655             : 
    1656             :     Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASH_INNER);
    1657             : 
    1658             :     /*
    1659             :      * It's unlikely, but we need to be prepared for new participants to show
    1660             :      * up while we're in the middle of this operation so we need to switch on
    1661             :      * barrier phase here.
    1662             :      */
    1663         126 :     switch (PHJ_GROW_BUCKETS_PHASE(BarrierPhase(&pstate->grow_buckets_barrier)))
    1664             :     {
    1665         126 :         case PHJ_GROW_BUCKETS_ELECT:
    1666             :             /* Elect one participant to prepare to increase nbuckets. */
    1667         126 :             if (BarrierArriveAndWait(&pstate->grow_buckets_barrier,
    1668             :                                      WAIT_EVENT_HASH_GROW_BUCKETS_ELECT))
    1669             :             {
    1670             :                 size_t      size;
    1671             :                 dsa_pointer_atomic *buckets;
    1672             : 
    1673             :                 /* Double the size of the bucket array. */
    1674         108 :                 pstate->nbuckets *= 2;
    1675         108 :                 size = pstate->nbuckets * sizeof(dsa_pointer_atomic);
    1676         108 :                 hashtable->batches[0].shared->size += size / 2;
    1677         108 :                 dsa_free(hashtable->area, hashtable->batches[0].shared->buckets);
    1678         216 :                 hashtable->batches[0].shared->buckets =
    1679         108 :                     dsa_allocate(hashtable->area, size);
    1680             :                 buckets = (dsa_pointer_atomic *)
    1681         108 :                     dsa_get_address(hashtable->area,
    1682         108 :                                     hashtable->batches[0].shared->buckets);
    1683      933996 :                 for (i = 0; i < pstate->nbuckets; ++i)
    1684      933888 :                     dsa_pointer_atomic_init(&buckets[i], InvalidDsaPointer);
    1685             : 
    1686             :                 /* Put the chunk list onto the work queue. */
    1687         108 :                 pstate->chunk_work_queue = hashtable->batches[0].shared->chunks;
    1688             : 
    1689             :                 /* Clear the flag. */
    1690         108 :                 pstate->growth = PHJ_GROWTH_OK;
    1691             :             }
    1692             :             /* Fall through. */
    1693             : 
    1694             :         case PHJ_GROW_BUCKETS_REALLOCATE:
    1695             :             /* Wait for the above to complete. */
    1696         126 :             BarrierArriveAndWait(&pstate->grow_buckets_barrier,
    1697             :                                  WAIT_EVENT_HASH_GROW_BUCKETS_REALLOCATE);
    1698             :             /* Fall through. */
    1699             : 
    1700         126 :         case PHJ_GROW_BUCKETS_REINSERT:
    1701             :             /* Reinsert all tuples into the hash table. */
    1702         126 :             ExecParallelHashEnsureBatchAccessors(hashtable);
    1703         126 :             ExecParallelHashTableSetCurrentBatch(hashtable, 0);
    1704         934 :             while ((chunk = ExecParallelHashPopChunkQueue(hashtable, &chunk_s)))
    1705             :             {
    1706         682 :                 size_t      idx = 0;
    1707             : 
    1708      557068 :                 while (idx < chunk->used)
    1709             :                 {
    1710      556386 :                     HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
    1711      556386 :                     dsa_pointer shared = chunk_s + HASH_CHUNK_HEADER_SIZE + idx;
    1712             :                     int         bucketno;
    1713             :                     int         batchno;
    1714             : 
    1715      556386 :                     ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
    1716             :                                               &bucketno, &batchno);
    1717             :                     Assert(batchno == 0);
    1718             : 
    1719             :                     /* add the tuple to the proper bucket */
    1720      556386 :                     ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
    1721             :                                               hashTuple, shared);
    1722             : 
    1723             :                     /* advance index past the tuple */
    1724      556386 :                     idx += MAXALIGN(HJTUPLE_OVERHEAD +
    1725             :                                     HJTUPLE_MINTUPLE(hashTuple)->t_len);
    1726             :                 }
    1727             : 
    1728             :                 /* allow this loop to be cancellable */
    1729         682 :                 CHECK_FOR_INTERRUPTS();
    1730             :             }
    1731         126 :             BarrierArriveAndWait(&pstate->grow_buckets_barrier,
    1732             :                                  WAIT_EVENT_HASH_GROW_BUCKETS_REINSERT);
    1733             :     }
    1734         126 : }
    1735             : 
    1736             : /*
    1737             :  * ExecHashTableInsert
    1738             :  *      insert a tuple into the hash table depending on the hash value
    1739             :  *      it may just go to a temp file for later batches
    1740             :  *
    1741             :  * Note: the passed TupleTableSlot may contain a regular, minimal, or virtual
    1742             :  * tuple; the minimal case in particular is certain to happen while reloading
    1743             :  * tuples from batch files.  We could save some cycles in the regular-tuple
    1744             :  * case by not forcing the slot contents into minimal form; not clear if it's
    1745             :  * worth the messiness required.
    1746             :  */
    1747             : void
    1748    12328468 : ExecHashTableInsert(HashJoinTable hashtable,
    1749             :                     TupleTableSlot *slot,
    1750             :                     uint32 hashvalue)
    1751             : {
    1752             :     bool        shouldFree;
    1753    12328468 :     MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
    1754             :     int         bucketno;
    1755             :     int         batchno;
    1756             : 
    1757    12328468 :     ExecHashGetBucketAndBatch(hashtable, hashvalue,
    1758             :                               &bucketno, &batchno);
    1759             : 
    1760             :     /*
    1761             :      * decide whether to put the tuple in the hash table or a temp file
    1762             :      */
    1763    12328468 :     if (batchno == hashtable->curbatch)
    1764             :     {
    1765             :         /*
    1766             :          * put the tuple in hash table
    1767             :          */
    1768             :         HashJoinTuple hashTuple;
    1769             :         int         hashTupleSize;
    1770     9235986 :         double      ntuples = (hashtable->totalTuples - hashtable->skewTuples);
    1771             : 
    1772             :         /* Create the HashJoinTuple */
    1773     9235986 :         hashTupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
    1774     9235986 :         hashTuple = (HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
    1775             : 
    1776     9235986 :         hashTuple->hashvalue = hashvalue;
    1777     9235986 :         memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
    1778             : 
    1779             :         /*
    1780             :          * We always reset the tuple-matched flag on insertion.  This is okay
    1781             :          * even when reloading a tuple from a batch file, since the tuple
    1782             :          * could not possibly have been matched to an outer tuple before it
    1783             :          * went into the batch file.
    1784             :          */
    1785     9235986 :         HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
    1786             : 
    1787             :         /* Push it onto the front of the bucket's list */
    1788     9235986 :         hashTuple->next.unshared = hashtable->buckets.unshared[bucketno];
    1789     9235986 :         hashtable->buckets.unshared[bucketno] = hashTuple;
    1790             : 
    1791             :         /*
    1792             :          * Increase the (optimal) number of buckets if we just exceeded the
    1793             :          * NTUP_PER_BUCKET threshold, but only when there's still a single
    1794             :          * batch.
    1795             :          */
    1796     9235986 :         if (hashtable->nbatch == 1 &&
    1797     5541100 :             ntuples > (hashtable->nbuckets_optimal * NTUP_PER_BUCKET))
    1798             :         {
    1799             :             /* Guard against integer overflow and alloc size overflow */
    1800         260 :             if (hashtable->nbuckets_optimal <= INT_MAX / 2 &&
    1801         260 :                 hashtable->nbuckets_optimal * 2 <= MaxAllocSize / sizeof(HashJoinTuple))
    1802             :             {
    1803         260 :                 hashtable->nbuckets_optimal *= 2;
    1804         260 :                 hashtable->log2_nbuckets_optimal += 1;
    1805             :             }
    1806             :         }
    1807             : 
    1808             :         /* Account for space used, and back off if we've used too much */
    1809     9235986 :         hashtable->spaceUsed += hashTupleSize;
    1810     9235986 :         if (hashtable->spaceUsed > hashtable->spacePeak)
    1811     6651704 :             hashtable->spacePeak = hashtable->spaceUsed;
    1812     9235986 :         if (hashtable->spaceUsed +
    1813     9235986 :             hashtable->nbuckets_optimal * sizeof(HashJoinTuple)
    1814     9235986 :             > hashtable->spaceAllowed)
    1815      794616 :             ExecHashIncreaseNumBatches(hashtable);
    1816             :     }
    1817             :     else
    1818             :     {
    1819             :         /*
    1820             :          * put the tuple into a temp file for later batches
    1821             :          */
    1822             :         Assert(batchno > hashtable->curbatch);
    1823     3092482 :         ExecHashJoinSaveTuple(tuple,
    1824             :                               hashvalue,
    1825     3092482 :                               &hashtable->innerBatchFile[batchno],
    1826             :                               hashtable);
    1827             :     }
    1828             : 
    1829    12328468 :     if (shouldFree)
    1830     8827448 :         heap_free_minimal_tuple(tuple);
    1831    12328468 : }
    1832             : 
    1833             : /*
    1834             :  * ExecParallelHashTableInsert
    1835             :  *      insert a tuple into a shared hash table or shared batch tuplestore
    1836             :  */
    1837             : void
    1838     2160192 : ExecParallelHashTableInsert(HashJoinTable hashtable,
    1839             :                             TupleTableSlot *slot,
    1840             :                             uint32 hashvalue)
    1841             : {
    1842             :     bool        shouldFree;
    1843     2160192 :     MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
    1844             :     dsa_pointer shared;
    1845             :     int         bucketno;
    1846             :     int         batchno;
    1847             : 
    1848         340 : retry:
    1849     2160532 :     ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
    1850             : 
    1851     2160532 :     if (batchno == 0)
    1852             :     {
    1853             :         HashJoinTuple hashTuple;
    1854             : 
    1855             :         /* Try to load it into memory. */
    1856             :         Assert(BarrierPhase(&hashtable->parallel_state->build_barrier) ==
    1857             :                PHJ_BUILD_HASH_INNER);
    1858     1293834 :         hashTuple = ExecParallelHashTupleAlloc(hashtable,
    1859     1293834 :                                                HJTUPLE_OVERHEAD + tuple->t_len,
    1860             :                                                &shared);
    1861     1293834 :         if (hashTuple == NULL)
    1862         306 :             goto retry;
    1863             : 
    1864             :         /* Store the hash value in the HashJoinTuple header. */
    1865     1293528 :         hashTuple->hashvalue = hashvalue;
    1866     1293528 :         memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
    1867     1293528 :         HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
    1868             : 
    1869             :         /* Push it onto the front of the bucket's list */
    1870     1293528 :         ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
    1871             :                                   hashTuple, shared);
    1872             :     }
    1873             :     else
    1874             :     {
    1875      866698 :         size_t      tuple_size = MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
    1876             : 
    1877             :         Assert(batchno > 0);
    1878             : 
    1879             :         /* Try to preallocate space in the batch if necessary. */
    1880      866698 :         if (hashtable->batches[batchno].preallocated < tuple_size)
    1881             :         {
    1882        1516 :             if (!ExecParallelHashTuplePrealloc(hashtable, batchno, tuple_size))
    1883          34 :                 goto retry;
    1884             :         }
    1885             : 
    1886             :         Assert(hashtable->batches[batchno].preallocated >= tuple_size);
    1887      866664 :         hashtable->batches[batchno].preallocated -= tuple_size;
    1888      866664 :         sts_puttuple(hashtable->batches[batchno].inner_tuples, &hashvalue,
    1889             :                      tuple);
    1890             :     }
    1891     2160192 :     ++hashtable->batches[batchno].ntuples;
    1892             : 
    1893     2160192 :     if (shouldFree)
    1894     2160192 :         heap_free_minimal_tuple(tuple);
    1895     2160192 : }
    1896             : 
    1897             : /*
    1898             :  * Insert a tuple into the current hash table.  Unlike
    1899             :  * ExecParallelHashTableInsert, this version is not prepared to send the tuple
    1900             :  * to other batches or to run out of memory, and should only be called with
    1901             :  * tuples that belong in the current batch once growth has been disabled.
    1902             :  */
    1903             : void
    1904     1041444 : ExecParallelHashTableInsertCurrentBatch(HashJoinTable hashtable,
    1905             :                                         TupleTableSlot *slot,
    1906             :                                         uint32 hashvalue)
    1907             : {
    1908             :     bool        shouldFree;
    1909     1041444 :     MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
    1910             :     HashJoinTuple hashTuple;
    1911             :     dsa_pointer shared;
    1912             :     int         batchno;
    1913             :     int         bucketno;
    1914             : 
    1915     1041444 :     ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
    1916             :     Assert(batchno == hashtable->curbatch);
    1917     1041444 :     hashTuple = ExecParallelHashTupleAlloc(hashtable,
    1918     1041444 :                                            HJTUPLE_OVERHEAD + tuple->t_len,
    1919             :                                            &shared);
    1920     1041444 :     hashTuple->hashvalue = hashvalue;
    1921     1041444 :     memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
    1922     1041444 :     HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
    1923     1041444 :     ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
    1924             :                               hashTuple, shared);
    1925             : 
    1926     1041444 :     if (shouldFree)
    1927           0 :         heap_free_minimal_tuple(tuple);
    1928     1041444 : }
    1929             : 
    1930             : 
    1931             : /*
    1932             :  * ExecHashGetBucketAndBatch
    1933             :  *      Determine the bucket number and batch number for a hash value
    1934             :  *
    1935             :  * Note: on-the-fly increases of nbatch must not change the bucket number
    1936             :  * for a given hash code (since we don't move tuples to different hash
    1937             :  * chains), and must only cause the batch number to remain the same or
    1938             :  * increase.  Our algorithm is
    1939             :  *      bucketno = hashvalue MOD nbuckets
    1940             :  *      batchno = ROR(hashvalue, log2_nbuckets) MOD nbatch
    1941             :  * where nbuckets and nbatch are both expected to be powers of 2, so we can
    1942             :  * do the computations by shifting and masking.  (This assumes that all hash
    1943             :  * functions are good about randomizing all their output bits, else we are
    1944             :  * likely to have very skewed bucket or batch occupancy.)
    1945             :  *
    1946             :  * nbuckets and log2_nbuckets may change while nbatch == 1 because of dynamic
    1947             :  * bucket count growth.  Once we start batching, the value is fixed and does
    1948             :  * not change over the course of the join (making it possible to compute batch
    1949             :  * number the way we do here).
    1950             :  *
    1951             :  * nbatch is always a power of 2; we increase it only by doubling it.  This
    1952             :  * effectively adds one more bit to the top of the batchno.  In very large
    1953             :  * joins, we might run out of bits to add, so we do this by rotating the hash
    1954             :  * value.  This causes batchno to steal bits from bucketno when the number of
    1955             :  * virtual buckets exceeds 2^32.  It's better to have longer bucket chains
    1956             :  * than to lose the ability to divide batches.
    1957             :  */
    1958             : void
    1959    39995326 : ExecHashGetBucketAndBatch(HashJoinTable hashtable,
    1960             :                           uint32 hashvalue,
    1961             :                           int *bucketno,
    1962             :                           int *batchno)
    1963             : {
    1964    39995326 :     uint32      nbuckets = (uint32) hashtable->nbuckets;
    1965    39995326 :     uint32      nbatch = (uint32) hashtable->nbatch;
    1966             : 
    1967    39995326 :     if (nbatch > 1)
    1968             :     {
    1969    15471356 :         *bucketno = hashvalue & (nbuckets - 1);
    1970    15471356 :         *batchno = pg_rotate_right32(hashvalue,
    1971    15471356 :                                      hashtable->log2_nbuckets) & (nbatch - 1);
    1972             :     }
    1973             :     else
    1974             :     {
    1975    24523970 :         *bucketno = hashvalue & (nbuckets - 1);
    1976    24523970 :         *batchno = 0;
    1977             :     }
    1978    39995326 : }
    1979             : 
    1980             : /*
    1981             :  * ExecScanHashBucket
    1982             :  *      scan a hash bucket for matches to the current outer tuple
    1983             :  *
    1984             :  * The current outer tuple must be stored in econtext->ecxt_outertuple.
    1985             :  *
    1986             :  * On success, the inner tuple is stored into hjstate->hj_CurTuple and
    1987             :  * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
    1988             :  * for the latter.
    1989             :  */
    1990             : bool
    1991    22578508 : ExecScanHashBucket(HashJoinState *hjstate,
    1992             :                    ExprContext *econtext)
    1993             : {
    1994    22578508 :     ExprState  *hjclauses = hjstate->hashclauses;
    1995    22578508 :     HashJoinTable hashtable = hjstate->hj_HashTable;
    1996    22578508 :     HashJoinTuple hashTuple = hjstate->hj_CurTuple;
    1997    22578508 :     uint32      hashvalue = hjstate->hj_CurHashValue;
    1998             : 
    1999             :     /*
    2000             :      * hj_CurTuple is the address of the tuple last returned from the current
    2001             :      * bucket, or NULL if it's time to start scanning a new bucket.
    2002             :      *
    2003             :      * If the tuple hashed to a skew bucket then scan the skew bucket
    2004             :      * otherwise scan the standard hashtable bucket.
    2005             :      */
    2006    22578508 :     if (hashTuple != NULL)
    2007     5105388 :         hashTuple = hashTuple->next.unshared;
    2008    17473120 :     else if (hjstate->hj_CurSkewBucketNo != INVALID_SKEW_BUCKET_NO)
    2009        2400 :         hashTuple = hashtable->skewBucket[hjstate->hj_CurSkewBucketNo]->tuples;
    2010             :     else
    2011    17470720 :         hashTuple = hashtable->buckets.unshared[hjstate->hj_CurBucketNo];
    2012             : 
    2013    26974138 :     while (hashTuple != NULL)
    2014             :     {
    2015    14844380 :         if (hashTuple->hashvalue == hashvalue)
    2016             :         {
    2017             :             TupleTableSlot *inntuple;
    2018             : 
    2019             :             /* insert hashtable's tuple into exec slot so ExecQual sees it */
    2020    10448762 :             inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
    2021             :                                              hjstate->hj_HashTupleSlot,
    2022             :                                              false);    /* do not pfree */
    2023    10448762 :             econtext->ecxt_innertuple = inntuple;
    2024             : 
    2025    10448762 :             if (ExecQualAndReset(hjclauses, econtext))
    2026             :             {
    2027    10448750 :                 hjstate->hj_CurTuple = hashTuple;
    2028    10448750 :                 return true;
    2029             :             }
    2030             :         }
    2031             : 
    2032     4395630 :         hashTuple = hashTuple->next.unshared;
    2033             :     }
    2034             : 
    2035             :     /*
    2036             :      * no match
    2037             :      */
    2038    12129758 :     return false;
    2039             : }
    2040             : 
    2041             : /*
    2042             :  * ExecParallelScanHashBucket
    2043             :  *      scan a hash bucket for matches to the current outer tuple
    2044             :  *
    2045             :  * The current outer tuple must be stored in econtext->ecxt_outertuple.
    2046             :  *
    2047             :  * On success, the inner tuple is stored into hjstate->hj_CurTuple and
    2048             :  * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
    2049             :  * for the latter.
    2050             :  */
    2051             : bool
    2052     4206108 : ExecParallelScanHashBucket(HashJoinState *hjstate,
    2053             :                            ExprContext *econtext)
    2054             : {
    2055     4206108 :     ExprState  *hjclauses = hjstate->hashclauses;
    2056     4206108 :     HashJoinTable hashtable = hjstate->hj_HashTable;
    2057     4206108 :     HashJoinTuple hashTuple = hjstate->hj_CurTuple;
    2058     4206108 :     uint32      hashvalue = hjstate->hj_CurHashValue;
    2059             : 
    2060             :     /*
    2061             :      * hj_CurTuple is the address of the tuple last returned from the current
    2062             :      * bucket, or NULL if it's time to start scanning a new bucket.
    2063             :      */
    2064     4206108 :     if (hashTuple != NULL)
    2065     2040078 :         hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
    2066             :     else
    2067     2166030 :         hashTuple = ExecParallelHashFirstTuple(hashtable,
    2068             :                                                hjstate->hj_CurBucketNo);
    2069             : 
    2070     5600618 :     while (hashTuple != NULL)
    2071             :     {
    2072     3434588 :         if (hashTuple->hashvalue == hashvalue)
    2073             :         {
    2074             :             TupleTableSlot *inntuple;
    2075             : 
    2076             :             /* insert hashtable's tuple into exec slot so ExecQual sees it */
    2077     2040078 :             inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
    2078             :                                              hjstate->hj_HashTupleSlot,
    2079             :                                              false);    /* do not pfree */
    2080     2040078 :             econtext->ecxt_innertuple = inntuple;
    2081             : 
    2082     2040078 :             if (ExecQualAndReset(hjclauses, econtext))
    2083             :             {
    2084     2040078 :                 hjstate->hj_CurTuple = hashTuple;
    2085     2040078 :                 return true;
    2086             :             }
    2087             :         }
    2088             : 
    2089     1394510 :         hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
    2090             :     }
    2091             : 
    2092             :     /*
    2093             :      * no match
    2094             :      */
    2095     2166030 :     return false;
    2096             : }
    2097             : 
    2098             : /*
    2099             :  * ExecPrepHashTableForUnmatched
    2100             :  *      set up for a series of ExecScanHashTableForUnmatched calls
    2101             :  */
    2102             : void
    2103        3996 : ExecPrepHashTableForUnmatched(HashJoinState *hjstate)
    2104             : {
    2105             :     /*----------
    2106             :      * During this scan we use the HashJoinState fields as follows:
    2107             :      *
    2108             :      * hj_CurBucketNo: next regular bucket to scan
    2109             :      * hj_CurSkewBucketNo: next skew bucket (an index into skewBucketNums)
    2110             :      * hj_CurTuple: last tuple returned, or NULL to start next bucket
    2111             :      *----------
    2112             :      */
    2113        3996 :     hjstate->hj_CurBucketNo = 0;
    2114        3996 :     hjstate->hj_CurSkewBucketNo = 0;
    2115        3996 :     hjstate->hj_CurTuple = NULL;
    2116        3996 : }
    2117             : 
    2118             : /*
    2119             :  * Decide if this process is allowed to run the unmatched scan.  If so, the
    2120             :  * batch barrier is advanced to PHJ_BATCH_SCAN and true is returned.
    2121             :  * Otherwise the batch is detached and false is returned.
    2122             :  */
    2123             : bool
    2124          96 : ExecParallelPrepHashTableForUnmatched(HashJoinState *hjstate)
    2125             : {
    2126          96 :     HashJoinTable hashtable = hjstate->hj_HashTable;
    2127          96 :     int         curbatch = hashtable->curbatch;
    2128          96 :     ParallelHashJoinBatch *batch = hashtable->batches[curbatch].shared;
    2129             : 
    2130             :     Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE);
    2131             : 
    2132             :     /*
    2133             :      * It would not be deadlock-free to wait on the batch barrier, because it
    2134             :      * is in PHJ_BATCH_PROBE phase, and thus processes attached to it have
    2135             :      * already emitted tuples.  Therefore, we'll hold a wait-free election:
    2136             :      * only one process can continue to the next phase, and all others detach
    2137             :      * from this batch.  They can still go any work on other batches, if there
    2138             :      * are any.
    2139             :      */
    2140          96 :     if (!BarrierArriveAndDetachExceptLast(&batch->batch_barrier))
    2141             :     {
    2142             :         /* This process considers the batch to be done. */
    2143          30 :         hashtable->batches[hashtable->curbatch].done = true;
    2144             : 
    2145             :         /* Make sure any temporary files are closed. */
    2146          30 :         sts_end_parallel_scan(hashtable->batches[curbatch].inner_tuples);
    2147          30 :         sts_end_parallel_scan(hashtable->batches[curbatch].outer_tuples);
    2148             : 
    2149             :         /*
    2150             :          * Track largest batch we've seen, which would normally happen in
    2151             :          * ExecHashTableDetachBatch().
    2152             :          */
    2153          30 :         hashtable->spacePeak =
    2154          30 :             Max(hashtable->spacePeak,
    2155             :                 batch->size + sizeof(dsa_pointer_atomic) * hashtable->nbuckets);
    2156          30 :         hashtable->curbatch = -1;
    2157          30 :         return false;
    2158             :     }
    2159             : 
    2160             :     /* Now we are alone with this batch. */
    2161             :     Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_SCAN);
    2162             : 
    2163             :     /*
    2164             :      * Has another process decided to give up early and command all processes
    2165             :      * to skip the unmatched scan?
    2166             :      */
    2167          66 :     if (batch->skip_unmatched)
    2168             :     {
    2169           0 :         hashtable->batches[hashtable->curbatch].done = true;
    2170           0 :         ExecHashTableDetachBatch(hashtable);
    2171           0 :         return false;
    2172             :     }
    2173             : 
    2174             :     /* Now prepare the process local state, just as for non-parallel join. */
    2175          66 :     ExecPrepHashTableForUnmatched(hjstate);
    2176             : 
    2177          66 :     return true;
    2178             : }
    2179             : 
    2180             : /*
    2181             :  * ExecScanHashTableForUnmatched
    2182             :  *      scan the hash table for unmatched inner tuples
    2183             :  *
    2184             :  * On success, the inner tuple is stored into hjstate->hj_CurTuple and
    2185             :  * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
    2186             :  * for the latter.
    2187             :  */
    2188             : bool
    2189      433524 : ExecScanHashTableForUnmatched(HashJoinState *hjstate, ExprContext *econtext)
    2190             : {
    2191      433524 :     HashJoinTable hashtable = hjstate->hj_HashTable;
    2192      433524 :     HashJoinTuple hashTuple = hjstate->hj_CurTuple;
    2193             : 
    2194             :     for (;;)
    2195             :     {
    2196             :         /*
    2197             :          * hj_CurTuple is the address of the tuple last returned from the
    2198             :          * current bucket, or NULL if it's time to start scanning a new
    2199             :          * bucket.
    2200             :          */
    2201     5590872 :         if (hashTuple != NULL)
    2202      429594 :             hashTuple = hashTuple->next.unshared;
    2203     5161278 :         else if (hjstate->hj_CurBucketNo < hashtable->nbuckets)
    2204             :         {
    2205     5157360 :             hashTuple = hashtable->buckets.unshared[hjstate->hj_CurBucketNo];
    2206     5157360 :             hjstate->hj_CurBucketNo++;
    2207             :         }
    2208        3918 :         else if (hjstate->hj_CurSkewBucketNo < hashtable->nSkewBuckets)
    2209             :         {
    2210           0 :             int         j = hashtable->skewBucketNums[hjstate->hj_CurSkewBucketNo];
    2211             : 
    2212           0 :             hashTuple = hashtable->skewBucket[j]->tuples;
    2213           0 :             hjstate->hj_CurSkewBucketNo++;
    2214             :         }
    2215             :         else
    2216        3918 :             break;              /* finished all buckets */
    2217             : 
    2218     5998524 :         while (hashTuple != NULL)
    2219             :         {
    2220      841176 :             if (!HeapTupleHeaderHasMatch(HJTUPLE_MINTUPLE(hashTuple)))
    2221             :             {
    2222             :                 TupleTableSlot *inntuple;
    2223             : 
    2224             :                 /* insert hashtable's tuple into exec slot */
    2225      429606 :                 inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
    2226             :                                                  hjstate->hj_HashTupleSlot,
    2227             :                                                  false);    /* do not pfree */
    2228      429606 :                 econtext->ecxt_innertuple = inntuple;
    2229             : 
    2230             :                 /*
    2231             :                  * Reset temp memory each time; although this function doesn't
    2232             :                  * do any qual eval, the caller will, so let's keep it
    2233             :                  * parallel to ExecScanHashBucket.
    2234             :                  */
    2235      429606 :                 ResetExprContext(econtext);
    2236             : 
    2237      429606 :                 hjstate->hj_CurTuple = hashTuple;
    2238      429606 :                 return true;
    2239             :             }
    2240             : 
    2241      411570 :             hashTuple = hashTuple->next.unshared;
    2242             :         }
    2243             : 
    2244             :         /* allow this loop to be cancellable */
    2245     5157348 :         CHECK_FOR_INTERRUPTS();
    2246             :     }
    2247             : 
    2248             :     /*
    2249             :      * no more unmatched tuples
    2250             :      */
    2251        3918 :     return false;
    2252             : }
    2253             : 
    2254             : /*
    2255             :  * ExecParallelScanHashTableForUnmatched
    2256             :  *      scan the hash table for unmatched inner tuples, in parallel join
    2257             :  *
    2258             :  * On success, the inner tuple is stored into hjstate->hj_CurTuple and
    2259             :  * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
    2260             :  * for the latter.
    2261             :  */
    2262             : bool
    2263      120072 : ExecParallelScanHashTableForUnmatched(HashJoinState *hjstate,
    2264             :                                       ExprContext *econtext)
    2265             : {
    2266      120072 :     HashJoinTable hashtable = hjstate->hj_HashTable;
    2267      120072 :     HashJoinTuple hashTuple = hjstate->hj_CurTuple;
    2268             : 
    2269             :     for (;;)
    2270             :     {
    2271             :         /*
    2272             :          * hj_CurTuple is the address of the tuple last returned from the
    2273             :          * current bucket, or NULL if it's time to start scanning a new
    2274             :          * bucket.
    2275             :          */
    2276      734472 :         if (hashTuple != NULL)
    2277      120006 :             hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
    2278      614466 :         else if (hjstate->hj_CurBucketNo < hashtable->nbuckets)
    2279      614400 :             hashTuple = ExecParallelHashFirstTuple(hashtable,
    2280      614400 :                                                    hjstate->hj_CurBucketNo++);
    2281             :         else
    2282          66 :             break;              /* finished all buckets */
    2283             : 
    2284      974406 :         while (hashTuple != NULL)
    2285             :         {
    2286      360006 :             if (!HeapTupleHeaderHasMatch(HJTUPLE_MINTUPLE(hashTuple)))
    2287             :             {
    2288             :                 TupleTableSlot *inntuple;
    2289             : 
    2290             :                 /* insert hashtable's tuple into exec slot */
    2291      120006 :                 inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
    2292             :                                                  hjstate->hj_HashTupleSlot,
    2293             :                                                  false);    /* do not pfree */
    2294      120006 :                 econtext->ecxt_innertuple = inntuple;
    2295             : 
    2296             :                 /*
    2297             :                  * Reset temp memory each time; although this function doesn't
    2298             :                  * do any qual eval, the caller will, so let's keep it
    2299             :                  * parallel to ExecScanHashBucket.
    2300             :                  */
    2301      120006 :                 ResetExprContext(econtext);
    2302             : 
    2303      120006 :                 hjstate->hj_CurTuple = hashTuple;
    2304      120006 :                 return true;
    2305             :             }
    2306             : 
    2307      240000 :             hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
    2308             :         }
    2309             : 
    2310             :         /* allow this loop to be cancellable */
    2311      614400 :         CHECK_FOR_INTERRUPTS();
    2312             :     }
    2313             : 
    2314             :     /*
    2315             :      * no more unmatched tuples
    2316             :      */
    2317          66 :     return false;
    2318             : }
    2319             : 
    2320             : /*
    2321             :  * ExecHashTableReset
    2322             :  *
    2323             :  *      reset hash table header for new batch
    2324             :  */
    2325             : void
    2326         952 : ExecHashTableReset(HashJoinTable hashtable)
    2327             : {
    2328             :     MemoryContext oldcxt;
    2329         952 :     int         nbuckets = hashtable->nbuckets;
    2330             : 
    2331             :     /*
    2332             :      * Release all the hash buckets and tuples acquired in the prior pass, and
    2333             :      * reinitialize the context for a new pass.
    2334             :      */
    2335         952 :     MemoryContextReset(hashtable->batchCxt);
    2336         952 :     oldcxt = MemoryContextSwitchTo(hashtable->batchCxt);
    2337             : 
    2338             :     /* Reallocate and reinitialize the hash bucket headers. */
    2339         952 :     hashtable->buckets.unshared = palloc0_array(HashJoinTuple, nbuckets);
    2340             : 
    2341         952 :     hashtable->spaceUsed = 0;
    2342             : 
    2343         952 :     MemoryContextSwitchTo(oldcxt);
    2344             : 
    2345             :     /* Forget the chunks (the memory was freed by the context reset above). */
    2346         952 :     hashtable->chunks = NULL;
    2347         952 : }
    2348             : 
    2349             : /*
    2350             :  * ExecHashTableResetMatchFlags
    2351             :  *      Clear all the HeapTupleHeaderHasMatch flags in the table
    2352             :  */
    2353             : void
    2354          70 : ExecHashTableResetMatchFlags(HashJoinTable hashtable)
    2355             : {
    2356             :     HashJoinTuple tuple;
    2357             :     int         i;
    2358             : 
    2359             :     /* Reset all flags in the main table ... */
    2360       71750 :     for (i = 0; i < hashtable->nbuckets; i++)
    2361             :     {
    2362       72014 :         for (tuple = hashtable->buckets.unshared[i]; tuple != NULL;
    2363         334 :              tuple = tuple->next.unshared)
    2364         334 :             HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(tuple));
    2365             :     }
    2366             : 
    2367             :     /* ... and the same for the skew buckets, if any */
    2368          70 :     for (i = 0; i < hashtable->nSkewBuckets; i++)
    2369             :     {
    2370           0 :         int         j = hashtable->skewBucketNums[i];
    2371           0 :         HashSkewBucket *skewBucket = hashtable->skewBucket[j];
    2372             : 
    2373           0 :         for (tuple = skewBucket->tuples; tuple != NULL; tuple = tuple->next.unshared)
    2374           0 :             HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(tuple));
    2375             :     }
    2376          70 : }
    2377             : 
    2378             : 
    2379             : void
    2380        1824 : ExecReScanHash(HashState *node)
    2381             : {
    2382        1824 :     PlanState  *outerPlan = outerPlanState(node);
    2383             : 
    2384             :     /*
    2385             :      * if chgParam of subnode is not null then plan will be re-scanned by
    2386             :      * first ExecProcNode.
    2387             :      */
    2388        1824 :     if (outerPlan->chgParam == NULL)
    2389          30 :         ExecReScan(outerPlan);
    2390        1824 : }
    2391             : 
    2392             : 
    2393             : /*
    2394             :  * ExecHashBuildSkewHash
    2395             :  *
    2396             :  *      Set up for skew optimization if we can identify the most common values
    2397             :  *      (MCVs) of the outer relation's join key.  We make a skew hash bucket
    2398             :  *      for the hash value of each MCV, up to the number of slots allowed
    2399             :  *      based on available memory.
    2400             :  */
    2401             : static void
    2402         126 : ExecHashBuildSkewHash(HashState *hashstate, HashJoinTable hashtable,
    2403             :                       Hash *node, int mcvsToUse)
    2404             : {
    2405             :     HeapTupleData *statsTuple;
    2406             :     AttStatsSlot sslot;
    2407             : 
    2408             :     /* Do nothing if planner didn't identify the outer relation's join key */
    2409         126 :     if (!OidIsValid(node->skewTable))
    2410          72 :         return;
    2411             :     /* Also, do nothing if we don't have room for at least one skew bucket */
    2412         126 :     if (mcvsToUse <= 0)
    2413           0 :         return;
    2414             : 
    2415             :     /*
    2416             :      * Try to find the MCV statistics for the outer relation's join key.
    2417             :      */
    2418         126 :     statsTuple = SearchSysCache3(STATRELATTINH,
    2419             :                                  ObjectIdGetDatum(node->skewTable),
    2420         126 :                                  Int16GetDatum(node->skewColumn),
    2421         126 :                                  BoolGetDatum(node->skewInherit));
    2422         126 :     if (!HeapTupleIsValid(statsTuple))
    2423          72 :         return;
    2424             : 
    2425          54 :     if (get_attstatsslot(&sslot, statsTuple,
    2426             :                          STATISTIC_KIND_MCV, InvalidOid,
    2427             :                          ATTSTATSSLOT_VALUES | ATTSTATSSLOT_NUMBERS))
    2428             :     {
    2429             :         double      frac;
    2430             :         int         nbuckets;
    2431             :         int         i;
    2432             : 
    2433           6 :         if (mcvsToUse > sslot.nvalues)
    2434           0 :             mcvsToUse = sslot.nvalues;
    2435             : 
    2436             :         /*
    2437             :          * Calculate the expected fraction of outer relation that will
    2438             :          * participate in the skew optimization.  If this isn't at least
    2439             :          * SKEW_MIN_OUTER_FRACTION, don't use skew optimization.
    2440             :          */
    2441           6 :         frac = 0;
    2442         132 :         for (i = 0; i < mcvsToUse; i++)
    2443         126 :             frac += sslot.numbers[i];
    2444           6 :         if (frac < SKEW_MIN_OUTER_FRACTION)
    2445             :         {
    2446           0 :             free_attstatsslot(&sslot);
    2447           0 :             ReleaseSysCache(statsTuple);
    2448           0 :             return;
    2449             :         }
    2450             : 
    2451             :         /*
    2452             :          * Okay, set up the skew hashtable.
    2453             :          *
    2454             :          * skewBucket[] is an open addressing hashtable with a power of 2 size
    2455             :          * that is greater than the number of MCV values.  (This ensures there
    2456             :          * will be at least one null entry, so searches will always
    2457             :          * terminate.)
    2458             :          *
    2459             :          * Note: this code could fail if mcvsToUse exceeds INT_MAX/8 or
    2460             :          * MaxAllocSize/sizeof(void *)/8, but that is not currently possible
    2461             :          * since we limit pg_statistic entries to much less than that.
    2462             :          */
    2463           6 :         nbuckets = pg_nextpower2_32(mcvsToUse + 1);
    2464             :         /* use two more bits just to help avoid collisions */
    2465           6 :         nbuckets <<= 2;
    2466             : 
    2467           6 :         hashtable->skewEnabled = true;
    2468           6 :         hashtable->skewBucketLen = nbuckets;
    2469             : 
    2470             :         /*
    2471             :          * We allocate the bucket memory in the hashtable's batch context. It
    2472             :          * is only needed during the first batch, and this ensures it will be
    2473             :          * automatically removed once the first batch is done.
    2474             :          */
    2475           6 :         hashtable->skewBucket = (HashSkewBucket **)
    2476           6 :             MemoryContextAllocZero(hashtable->batchCxt,
    2477             :                                    nbuckets * sizeof(HashSkewBucket *));
    2478           6 :         hashtable->skewBucketNums = (int *)
    2479           6 :             MemoryContextAllocZero(hashtable->batchCxt,
    2480             :                                    mcvsToUse * sizeof(int));
    2481             : 
    2482           6 :         hashtable->spaceUsed += nbuckets * sizeof(HashSkewBucket *)
    2483           6 :             + mcvsToUse * sizeof(int);
    2484           6 :         hashtable->spaceUsedSkew += nbuckets * sizeof(HashSkewBucket *)
    2485           6 :             + mcvsToUse * sizeof(int);
    2486           6 :         if (hashtable->spaceUsed > hashtable->spacePeak)
    2487           6 :             hashtable->spacePeak = hashtable->spaceUsed;
    2488             : 
    2489             :         /*
    2490             :          * Create a skew bucket for each MCV hash value.
    2491             :          *
    2492             :          * Note: it is very important that we create the buckets in order of
    2493             :          * decreasing MCV frequency.  If we have to remove some buckets, they
    2494             :          * must be removed in reverse order of creation (see notes in
    2495             :          * ExecHashRemoveNextSkewBucket) and we want the least common MCVs to
    2496             :          * be removed first.
    2497             :          */
    2498             : 
    2499         132 :         for (i = 0; i < mcvsToUse; i++)
    2500             :         {
    2501             :             uint32      hashvalue;
    2502             :             int         bucket;
    2503             : 
    2504         126 :             hashvalue = DatumGetUInt32(FunctionCall1Coll(hashstate->skew_hashfunction,
    2505             :                                                          hashstate->skew_collation,
    2506         126 :                                                          sslot.values[i]));
    2507             : 
    2508             :             /*
    2509             :              * While we have not hit a hole in the hashtable and have not hit
    2510             :              * the desired bucket, we have collided with some previous hash
    2511             :              * value, so try the next bucket location.  NB: this code must
    2512             :              * match ExecHashGetSkewBucket.
    2513             :              */
    2514         126 :             bucket = hashvalue & (nbuckets - 1);
    2515         126 :             while (hashtable->skewBucket[bucket] != NULL &&
    2516           0 :                    hashtable->skewBucket[bucket]->hashvalue != hashvalue)
    2517           0 :                 bucket = (bucket + 1) & (nbuckets - 1);
    2518             : 
    2519             :             /*
    2520             :              * If we found an existing bucket with the same hashvalue, leave
    2521             :              * it alone.  It's okay for two MCVs to share a hashvalue.
    2522             :              */
    2523         126 :             if (hashtable->skewBucket[bucket] != NULL)
    2524           0 :                 continue;
    2525             : 
    2526             :             /* Okay, create a new skew bucket for this hashvalue. */
    2527         252 :             hashtable->skewBucket[bucket] = (HashSkewBucket *)
    2528         126 :                 MemoryContextAlloc(hashtable->batchCxt,
    2529             :                                    sizeof(HashSkewBucket));
    2530         126 :             hashtable->skewBucket[bucket]->hashvalue = hashvalue;
    2531         126 :             hashtable->skewBucket[bucket]->tuples = NULL;
    2532         126 :             hashtable->skewBucketNums[hashtable->nSkewBuckets] = bucket;
    2533         126 :             hashtable->nSkewBuckets++;
    2534         126 :             hashtable->spaceUsed += SKEW_BUCKET_OVERHEAD;
    2535         126 :             hashtable->spaceUsedSkew += SKEW_BUCKET_OVERHEAD;
    2536         126 :             if (hashtable->spaceUsed > hashtable->spacePeak)
    2537         126 :                 hashtable->spacePeak = hashtable->spaceUsed;
    2538             :         }
    2539             : 
    2540           6 :         free_attstatsslot(&sslot);
    2541             :     }
    2542             : 
    2543          54 :     ReleaseSysCache(statsTuple);
    2544             : }
    2545             : 
    2546             : /*
    2547             :  * ExecHashGetSkewBucket
    2548             :  *
    2549             :  *      Returns the index of the skew bucket for this hashvalue,
    2550             :  *      or INVALID_SKEW_BUCKET_NO if the hashvalue is not
    2551             :  *      associated with any active skew bucket.
    2552             :  */
    2553             : int
    2554    29936568 : ExecHashGetSkewBucket(HashJoinTable hashtable, uint32 hashvalue)
    2555             : {
    2556             :     int         bucket;
    2557             : 
    2558             :     /*
    2559             :      * Always return INVALID_SKEW_BUCKET_NO if not doing skew optimization (in
    2560             :      * particular, this happens after the initial batch is done).
    2561             :      */
    2562    29936568 :     if (!hashtable->skewEnabled)
    2563    29816568 :         return INVALID_SKEW_BUCKET_NO;
    2564             : 
    2565             :     /*
    2566             :      * Since skewBucketLen is a power of 2, we can do a modulo by ANDing.
    2567             :      */
    2568      120000 :     bucket = hashvalue & (hashtable->skewBucketLen - 1);
    2569             : 
    2570             :     /*
    2571             :      * While we have not hit a hole in the hashtable and have not hit the
    2572             :      * desired bucket, we have collided with some other hash value, so try the
    2573             :      * next bucket location.
    2574             :      */
    2575      127830 :     while (hashtable->skewBucket[bucket] != NULL &&
    2576       10818 :            hashtable->skewBucket[bucket]->hashvalue != hashvalue)
    2577        7830 :         bucket = (bucket + 1) & (hashtable->skewBucketLen - 1);
    2578             : 
    2579             :     /*
    2580             :      * Found the desired bucket?
    2581             :      */
    2582      120000 :     if (hashtable->skewBucket[bucket] != NULL)
    2583        2988 :         return bucket;
    2584             : 
    2585             :     /*
    2586             :      * There must not be any hashtable entry for this hash value.
    2587             :      */
    2588      117012 :     return INVALID_SKEW_BUCKET_NO;
    2589             : }
    2590             : 
    2591             : /*
    2592             :  * ExecHashSkewTableInsert
    2593             :  *
    2594             :  *      Insert a tuple into the skew hashtable.
    2595             :  *
    2596             :  * This should generally match up with the current-batch case in
    2597             :  * ExecHashTableInsert.
    2598             :  */
    2599             : static void
    2600         588 : ExecHashSkewTableInsert(HashJoinTable hashtable,
    2601             :                         TupleTableSlot *slot,
    2602             :                         uint32 hashvalue,
    2603             :                         int bucketNumber)
    2604             : {
    2605             :     bool        shouldFree;
    2606         588 :     MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
    2607             :     HashJoinTuple hashTuple;
    2608             :     int         hashTupleSize;
    2609             : 
    2610             :     /* Create the HashJoinTuple */
    2611         588 :     hashTupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
    2612         588 :     hashTuple = (HashJoinTuple) MemoryContextAlloc(hashtable->batchCxt,
    2613             :                                                    hashTupleSize);
    2614         588 :     hashTuple->hashvalue = hashvalue;
    2615         588 :     memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
    2616         588 :     HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
    2617             : 
    2618             :     /* Push it onto the front of the skew bucket's list */
    2619         588 :     hashTuple->next.unshared = hashtable->skewBucket[bucketNumber]->tuples;
    2620         588 :     hashtable->skewBucket[bucketNumber]->tuples = hashTuple;
    2621             :     Assert(hashTuple != hashTuple->next.unshared);
    2622             : 
    2623             :     /* Account for space used, and back off if we've used too much */
    2624         588 :     hashtable->spaceUsed += hashTupleSize;
    2625         588 :     hashtable->spaceUsedSkew += hashTupleSize;
    2626         588 :     if (hashtable->spaceUsed > hashtable->spacePeak)
    2627         432 :         hashtable->spacePeak = hashtable->spaceUsed;
    2628         690 :     while (hashtable->spaceUsedSkew > hashtable->spaceAllowedSkew)
    2629         102 :         ExecHashRemoveNextSkewBucket(hashtable);
    2630             : 
    2631             :     /* Check we are not over the total spaceAllowed, either */
    2632         588 :     if (hashtable->spaceUsed > hashtable->spaceAllowed)
    2633           0 :         ExecHashIncreaseNumBatches(hashtable);
    2634             : 
    2635         588 :     if (shouldFree)
    2636         588 :         heap_free_minimal_tuple(tuple);
    2637         588 : }
    2638             : 
    2639             : /*
    2640             :  *      ExecHashRemoveNextSkewBucket
    2641             :  *
    2642             :  *      Remove the least valuable skew bucket by pushing its tuples into
    2643             :  *      the main hash table.
    2644             :  */
    2645             : static void
    2646         102 : ExecHashRemoveNextSkewBucket(HashJoinTable hashtable)
    2647             : {
    2648             :     int         bucketToRemove;
    2649             :     HashSkewBucket *bucket;
    2650             :     uint32      hashvalue;
    2651             :     int         bucketno;
    2652             :     int         batchno;
    2653             :     HashJoinTuple hashTuple;
    2654             : 
    2655             :     /* Locate the bucket to remove */
    2656         102 :     bucketToRemove = hashtable->skewBucketNums[hashtable->nSkewBuckets - 1];
    2657         102 :     bucket = hashtable->skewBucket[bucketToRemove];
    2658             : 
    2659             :     /*
    2660             :      * Calculate which bucket and batch the tuples belong to in the main
    2661             :      * hashtable.  They all have the same hash value, so it's the same for all
    2662             :      * of them.  Also note that it's not possible for nbatch to increase while
    2663             :      * we are processing the tuples.
    2664             :      */
    2665         102 :     hashvalue = bucket->hashvalue;
    2666         102 :     ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
    2667             : 
    2668             :     /* Process all tuples in the bucket */
    2669         102 :     hashTuple = bucket->tuples;
    2670         450 :     while (hashTuple != NULL)
    2671             :     {
    2672         348 :         HashJoinTuple nextHashTuple = hashTuple->next.unshared;
    2673             :         MinimalTuple tuple;
    2674             :         Size        tupleSize;
    2675             : 
    2676             :         /*
    2677             :          * This code must agree with ExecHashTableInsert.  We do not use
    2678             :          * ExecHashTableInsert directly as ExecHashTableInsert expects a
    2679             :          * TupleTableSlot while we already have HashJoinTuples.
    2680             :          */
    2681         348 :         tuple = HJTUPLE_MINTUPLE(hashTuple);
    2682         348 :         tupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
    2683             : 
    2684             :         /* Decide whether to put the tuple in the hash table or a temp file */
    2685         348 :         if (batchno == hashtable->curbatch)
    2686             :         {
    2687             :             /* Move the tuple to the main hash table */
    2688             :             HashJoinTuple copyTuple;
    2689             : 
    2690             :             /*
    2691             :              * We must copy the tuple into the dense storage, else it will not
    2692             :              * be found by, eg, ExecHashIncreaseNumBatches.
    2693             :              */
    2694         138 :             copyTuple = (HashJoinTuple) dense_alloc(hashtable, tupleSize);
    2695         138 :             memcpy(copyTuple, hashTuple, tupleSize);
    2696         138 :             pfree(hashTuple);
    2697             : 
    2698         138 :             copyTuple->next.unshared = hashtable->buckets.unshared[bucketno];
    2699         138 :             hashtable->buckets.unshared[bucketno] = copyTuple;
    2700             : 
    2701             :             /* We have reduced skew space, but overall space doesn't change */
    2702         138 :             hashtable->spaceUsedSkew -= tupleSize;
    2703             :         }
    2704             :         else
    2705             :         {
    2706             :             /* Put the tuple into a temp file for later batches */
    2707             :             Assert(batchno > hashtable->curbatch);
    2708         210 :             ExecHashJoinSaveTuple(tuple, hashvalue,
    2709         210 :                                   &hashtable->innerBatchFile[batchno],
    2710             :                                   hashtable);
    2711         210 :             pfree(hashTuple);
    2712         210 :             hashtable->spaceUsed -= tupleSize;
    2713         210 :             hashtable->spaceUsedSkew -= tupleSize;
    2714             :         }
    2715             : 
    2716         348 :         hashTuple = nextHashTuple;
    2717             : 
    2718             :         /* allow this loop to be cancellable */
    2719         348 :         CHECK_FOR_INTERRUPTS();
    2720             :     }
    2721             : 
    2722             :     /*
    2723             :      * Free the bucket struct itself and reset the hashtable entry to NULL.
    2724             :      *
    2725             :      * NOTE: this is not nearly as simple as it looks on the surface, because
    2726             :      * of the possibility of collisions in the hashtable.  Suppose that hash
    2727             :      * values A and B collide at a particular hashtable entry, and that A was
    2728             :      * entered first so B gets shifted to a different table entry.  If we were
    2729             :      * to remove A first then ExecHashGetSkewBucket would mistakenly start
    2730             :      * reporting that B is not in the hashtable, because it would hit the NULL
    2731             :      * before finding B.  However, we always remove entries in the reverse
    2732             :      * order of creation, so this failure cannot happen.
    2733             :      */
    2734         102 :     hashtable->skewBucket[bucketToRemove] = NULL;
    2735         102 :     hashtable->nSkewBuckets--;
    2736         102 :     pfree(bucket);
    2737         102 :     hashtable->spaceUsed -= SKEW_BUCKET_OVERHEAD;
    2738         102 :     hashtable->spaceUsedSkew -= SKEW_BUCKET_OVERHEAD;
    2739             : 
    2740             :     /*
    2741             :      * If we have removed all skew buckets then give up on skew optimization.
    2742             :      * Release the arrays since they aren't useful any more.
    2743             :      */
    2744         102 :     if (hashtable->nSkewBuckets == 0)
    2745             :     {
    2746           0 :         hashtable->skewEnabled = false;
    2747           0 :         pfree(hashtable->skewBucket);
    2748           0 :         pfree(hashtable->skewBucketNums);
    2749           0 :         hashtable->skewBucket = NULL;
    2750           0 :         hashtable->skewBucketNums = NULL;
    2751           0 :         hashtable->spaceUsed -= hashtable->spaceUsedSkew;
    2752           0 :         hashtable->spaceUsedSkew = 0;
    2753             :     }
    2754         102 : }
    2755             : 
    2756             : /*
    2757             :  * Reserve space in the DSM segment for instrumentation data.
    2758             :  */
    2759             : void
    2760         198 : ExecHashEstimate(HashState *node, ParallelContext *pcxt)
    2761             : {
    2762             :     size_t      size;
    2763             : 
    2764             :     /* don't need this if not instrumenting or no workers */
    2765         198 :     if (!node->ps.instrument || pcxt->nworkers == 0)
    2766         114 :         return;
    2767             : 
    2768          84 :     size = mul_size(pcxt->nworkers, sizeof(HashInstrumentation));
    2769          84 :     size = add_size(size, offsetof(SharedHashInfo, hinstrument));
    2770          84 :     shm_toc_estimate_chunk(&pcxt->estimator, size);
    2771          84 :     shm_toc_estimate_keys(&pcxt->estimator, 1);
    2772             : }
    2773             : 
    2774             : /*
    2775             :  * Set up a space in the DSM for all workers to record instrumentation data
    2776             :  * about their hash table.
    2777             :  */
    2778             : void
    2779         198 : ExecHashInitializeDSM(HashState *node, ParallelContext *pcxt)
    2780             : {
    2781             :     size_t      size;
    2782             : 
    2783             :     /* don't need this if not instrumenting or no workers */
    2784         198 :     if (!node->ps.instrument || pcxt->nworkers == 0)
    2785         114 :         return;
    2786             : 
    2787          84 :     size = offsetof(SharedHashInfo, hinstrument) +
    2788          84 :         pcxt->nworkers * sizeof(HashInstrumentation);
    2789          84 :     node->shared_info = (SharedHashInfo *) shm_toc_allocate(pcxt->toc, size);
    2790             : 
    2791             :     /* Each per-worker area must start out as zeroes. */
    2792          84 :     memset(node->shared_info, 0, size);
    2793             : 
    2794          84 :     node->shared_info->num_workers = pcxt->nworkers;
    2795          84 :     shm_toc_insert(pcxt->toc, node->ps.plan->plan_node_id,
    2796          84 :                    node->shared_info);
    2797             : }
    2798             : 
    2799             : /*
    2800             :  * Locate the DSM space for hash table instrumentation data that we'll write
    2801             :  * to at shutdown time.
    2802             :  */
    2803             : void
    2804         558 : ExecHashInitializeWorker(HashState *node, ParallelWorkerContext *pwcxt)
    2805             : {
    2806             :     SharedHashInfo *shared_info;
    2807             : 
    2808             :     /* don't need this if not instrumenting */
    2809         558 :     if (!node->ps.instrument)
    2810         306 :         return;
    2811             : 
    2812             :     /*
    2813             :      * Find our entry in the shared area, and set up a pointer to it so that
    2814             :      * we'll accumulate stats there when shutting down or rebuilding the hash
    2815             :      * table.
    2816             :      */
    2817             :     shared_info = (SharedHashInfo *)
    2818         252 :         shm_toc_lookup(pwcxt->toc, node->ps.plan->plan_node_id, false);
    2819         252 :     node->hinstrument = &shared_info->hinstrument[ParallelWorkerNumber];
    2820             : }
    2821             : 
    2822             : /*
    2823             :  * Collect EXPLAIN stats if needed, saving them into DSM memory if
    2824             :  * ExecHashInitializeWorker was called, or local storage if not.  In the
    2825             :  * parallel case, this must be done in ExecShutdownHash() rather than
    2826             :  * ExecEndHash() because the latter runs after we've detached from the DSM
    2827             :  * segment.
    2828             :  */
    2829             : void
    2830       30834 : ExecShutdownHash(HashState *node)
    2831             : {
    2832             :     /* Allocate save space if EXPLAIN'ing and we didn't do so already */
    2833       30834 :     if (node->ps.instrument && !node->hinstrument)
    2834         114 :         node->hinstrument = palloc0_object(HashInstrumentation);
    2835             :     /* Now accumulate data for the current (final) hash table */
    2836       30834 :     if (node->hinstrument && node->hashtable)
    2837         334 :         ExecHashAccumInstrumentation(node->hinstrument, node->hashtable);
    2838       30834 : }
    2839             : 
    2840             : /*
    2841             :  * Retrieve instrumentation data from workers before the DSM segment is
    2842             :  * detached, so that EXPLAIN can access it.
    2843             :  */
    2844             : void
    2845          84 : ExecHashRetrieveInstrumentation(HashState *node)
    2846             : {
    2847          84 :     SharedHashInfo *shared_info = node->shared_info;
    2848             :     size_t      size;
    2849             : 
    2850          84 :     if (shared_info == NULL)
    2851           0 :         return;
    2852             : 
    2853             :     /* Replace node->shared_info with a copy in backend-local memory. */
    2854          84 :     size = offsetof(SharedHashInfo, hinstrument) +
    2855          84 :         shared_info->num_workers * sizeof(HashInstrumentation);
    2856          84 :     node->shared_info = palloc(size);
    2857          84 :     memcpy(node->shared_info, shared_info, size);
    2858             : }
    2859             : 
    2860             : /*
    2861             :  * Accumulate instrumentation data from 'hashtable' into an
    2862             :  * initially-zeroed HashInstrumentation struct.
    2863             :  *
    2864             :  * This is used to merge information across successive hash table instances
    2865             :  * within a single plan node.  We take the maximum values of each interesting
    2866             :  * number.  The largest nbuckets and largest nbatch values might have occurred
    2867             :  * in different instances, so there's some risk of confusion from reporting
    2868             :  * unrelated numbers; but there's a bigger risk of misdiagnosing a performance
    2869             :  * issue if we don't report the largest values.  Similarly, we want to report
    2870             :  * the largest spacePeak regardless of whether it happened in the same
    2871             :  * instance as the largest nbuckets or nbatch.  All the instances should have
    2872             :  * the same nbuckets_original and nbatch_original; but there's little value
    2873             :  * in depending on that here, so handle them the same way.
    2874             :  */
    2875             : void
    2876         334 : ExecHashAccumInstrumentation(HashInstrumentation *instrument,
    2877             :                              HashJoinTable hashtable)
    2878             : {
    2879         334 :     instrument->nbuckets = Max(instrument->nbuckets,
    2880             :                                hashtable->nbuckets);
    2881         334 :     instrument->nbuckets_original = Max(instrument->nbuckets_original,
    2882             :                                         hashtable->nbuckets_original);
    2883         334 :     instrument->nbatch = Max(instrument->nbatch,
    2884             :                              hashtable->nbatch);
    2885         334 :     instrument->nbatch_original = Max(instrument->nbatch_original,
    2886             :                                       hashtable->nbatch_original);
    2887         334 :     instrument->space_peak = Max(instrument->space_peak,
    2888             :                                  hashtable->spacePeak);
    2889         334 : }
    2890             : 
    2891             : /*
    2892             :  * Allocate 'size' bytes from the currently active HashMemoryChunk
    2893             :  */
    2894             : static void *
    2895     9438990 : dense_alloc(HashJoinTable hashtable, Size size)
    2896             : {
    2897             :     HashMemoryChunk newChunk;
    2898             :     char       *ptr;
    2899             : 
    2900             :     /* just in case the size is not already aligned properly */
    2901     9438990 :     size = MAXALIGN(size);
    2902             : 
    2903             :     /*
    2904             :      * If tuple size is larger than threshold, allocate a separate chunk.
    2905             :      */
    2906     9438990 :     if (size > HASH_CHUNK_THRESHOLD)
    2907             :     {
    2908             :         /* allocate new chunk and put it at the beginning of the list */
    2909           0 :         newChunk = (HashMemoryChunk) MemoryContextAlloc(hashtable->batchCxt,
    2910             :                                                         HASH_CHUNK_HEADER_SIZE + size);
    2911           0 :         newChunk->maxlen = size;
    2912           0 :         newChunk->used = size;
    2913           0 :         newChunk->ntuples = 1;
    2914             : 
    2915             :         /*
    2916             :          * Add this chunk to the list after the first existing chunk, so that
    2917             :          * we don't lose the remaining space in the "current" chunk.
    2918             :          */
    2919           0 :         if (hashtable->chunks != NULL)
    2920             :         {
    2921           0 :             newChunk->next = hashtable->chunks->next;
    2922           0 :             hashtable->chunks->next.unshared = newChunk;
    2923             :         }
    2924             :         else
    2925             :         {
    2926           0 :             newChunk->next.unshared = hashtable->chunks;
    2927           0 :             hashtable->chunks = newChunk;
    2928             :         }
    2929             : 
    2930           0 :         return HASH_CHUNK_DATA(newChunk);
    2931             :     }
    2932             : 
    2933             :     /*
    2934             :      * See if we have enough space for it in the current chunk (if any). If
    2935             :      * not, allocate a fresh chunk.
    2936             :      */
    2937     9438990 :     if ((hashtable->chunks == NULL) ||
    2938     9416130 :         (hashtable->chunks->maxlen - hashtable->chunks->used) < size)
    2939             :     {
    2940             :         /* allocate new chunk and put it at the beginning of the list */
    2941       34912 :         newChunk = (HashMemoryChunk) MemoryContextAlloc(hashtable->batchCxt,
    2942             :                                                         HASH_CHUNK_HEADER_SIZE + HASH_CHUNK_SIZE);
    2943             : 
    2944       34912 :         newChunk->maxlen = HASH_CHUNK_SIZE;
    2945       34912 :         newChunk->used = size;
    2946       34912 :         newChunk->ntuples = 1;
    2947             : 
    2948       34912 :         newChunk->next.unshared = hashtable->chunks;
    2949       34912 :         hashtable->chunks = newChunk;
    2950             : 
    2951       34912 :         return HASH_CHUNK_DATA(newChunk);
    2952             :     }
    2953             : 
    2954             :     /* There is enough space in the current chunk, let's add the tuple */
    2955     9404078 :     ptr = HASH_CHUNK_DATA(hashtable->chunks) + hashtable->chunks->used;
    2956     9404078 :     hashtable->chunks->used += size;
    2957     9404078 :     hashtable->chunks->ntuples += 1;
    2958             : 
    2959             :     /* return pointer to the start of the tuple memory */
    2960     9404078 :     return ptr;
    2961             : }
    2962             : 
    2963             : /*
    2964             :  * Allocate space for a tuple in shared dense storage.  This is equivalent to
    2965             :  * dense_alloc but for Parallel Hash using shared memory.
    2966             :  *
    2967             :  * While loading a tuple into shared memory, we might run out of memory and
    2968             :  * decide to repartition, or determine that the load factor is too high and
    2969             :  * decide to expand the bucket array, or discover that another participant has
    2970             :  * commanded us to help do that.  Return NULL if number of buckets or batches
    2971             :  * has changed, indicating that the caller must retry (considering the
    2972             :  * possibility that the tuple no longer belongs in the same batch).
    2973             :  */
    2974             : static HashJoinTuple
    2975     2390856 : ExecParallelHashTupleAlloc(HashJoinTable hashtable, size_t size,
    2976             :                            dsa_pointer *shared)
    2977             : {
    2978     2390856 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    2979             :     dsa_pointer chunk_shared;
    2980             :     HashMemoryChunk chunk;
    2981             :     Size        chunk_size;
    2982             :     HashJoinTuple result;
    2983     2390856 :     int         curbatch = hashtable->curbatch;
    2984             : 
    2985     2390856 :     size = MAXALIGN(size);
    2986             : 
    2987             :     /*
    2988             :      * Fast path: if there is enough space in this backend's current chunk,
    2989             :      * then we can allocate without any locking.
    2990             :      */
    2991     2390856 :     chunk = hashtable->current_chunk;
    2992     2390856 :     if (chunk != NULL &&
    2993     2389988 :         size <= HASH_CHUNK_THRESHOLD &&
    2994     2389988 :         chunk->maxlen - chunk->used >= size)
    2995             :     {
    2996             : 
    2997     2387118 :         chunk_shared = hashtable->current_chunk_shared;
    2998             :         Assert(chunk == dsa_get_address(hashtable->area, chunk_shared));
    2999     2387118 :         *shared = chunk_shared + HASH_CHUNK_HEADER_SIZE + chunk->used;
    3000     2387118 :         result = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + chunk->used);
    3001     2387118 :         chunk->used += size;
    3002             : 
    3003             :         Assert(chunk->used <= chunk->maxlen);
    3004             :         Assert(result == dsa_get_address(hashtable->area, *shared));
    3005             : 
    3006     2387118 :         return result;
    3007             :     }
    3008             : 
    3009             :     /* Slow path: try to allocate a new chunk. */
    3010        3738 :     LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
    3011             : 
    3012             :     /*
    3013             :      * Check if we need to help increase the number of buckets or batches.
    3014             :      */
    3015        3738 :     if (pstate->growth == PHJ_GROWTH_NEED_MORE_BATCHES ||
    3016        3702 :         pstate->growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
    3017             :     {
    3018         162 :         ParallelHashGrowth growth = pstate->growth;
    3019             : 
    3020         162 :         hashtable->current_chunk = NULL;
    3021         162 :         LWLockRelease(&pstate->lock);
    3022             : 
    3023             :         /* Another participant has commanded us to help grow. */
    3024         162 :         if (growth == PHJ_GROWTH_NEED_MORE_BATCHES)
    3025          36 :             ExecParallelHashIncreaseNumBatches(hashtable);
    3026         126 :         else if (growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
    3027         126 :             ExecParallelHashIncreaseNumBuckets(hashtable);
    3028             : 
    3029             :         /* The caller must retry. */
    3030         162 :         return NULL;
    3031             :     }
    3032             : 
    3033             :     /* Oversized tuples get their own chunk. */
    3034        3576 :     if (size > HASH_CHUNK_THRESHOLD)
    3035          48 :         chunk_size = size + HASH_CHUNK_HEADER_SIZE;
    3036             :     else
    3037        3528 :         chunk_size = HASH_CHUNK_SIZE;
    3038             : 
    3039             :     /* Check if it's time to grow batches or buckets. */
    3040        3576 :     if (pstate->growth != PHJ_GROWTH_DISABLED)
    3041             :     {
    3042             :         Assert(curbatch == 0);
    3043             :         Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASH_INNER);
    3044             : 
    3045             :         /*
    3046             :          * Check if our space limit would be exceeded.  To avoid choking on
    3047             :          * very large tuples or very low hash_mem setting, we'll always allow
    3048             :          * each backend to allocate at least one chunk.
    3049             :          */
    3050        1886 :         if (hashtable->batches[0].at_least_one_chunk &&
    3051        1466 :             hashtable->batches[0].shared->size +
    3052        1466 :             chunk_size > pstate->space_allowed)
    3053             :         {
    3054          36 :             pstate->growth = PHJ_GROWTH_NEED_MORE_BATCHES;
    3055          36 :             hashtable->batches[0].shared->space_exhausted = true;
    3056          36 :             LWLockRelease(&pstate->lock);
    3057             : 
    3058          36 :             return NULL;
    3059             :         }
    3060             : 
    3061             :         /* Check if our load factor limit would be exceeded. */
    3062        1850 :         if (hashtable->nbatch == 1)
    3063             :         {
    3064        1574 :             hashtable->batches[0].shared->ntuples += hashtable->batches[0].ntuples;
    3065        1574 :             hashtable->batches[0].ntuples = 0;
    3066             :             /* Guard against integer overflow and alloc size overflow */
    3067        1574 :             if (hashtable->batches[0].shared->ntuples + 1 >
    3068        1574 :                 hashtable->nbuckets * NTUP_PER_BUCKET &&
    3069         108 :                 hashtable->nbuckets < (INT_MAX / 2) &&
    3070         108 :                 hashtable->nbuckets * 2 <=
    3071             :                 MaxAllocSize / sizeof(dsa_pointer_atomic))
    3072             :             {
    3073         108 :                 pstate->growth = PHJ_GROWTH_NEED_MORE_BUCKETS;
    3074         108 :                 LWLockRelease(&pstate->lock);
    3075             : 
    3076         108 :                 return NULL;
    3077             :             }
    3078             :         }
    3079             :     }
    3080             : 
    3081             :     /* We are cleared to allocate a new chunk. */
    3082        3432 :     chunk_shared = dsa_allocate(hashtable->area, chunk_size);
    3083        3432 :     hashtable->batches[curbatch].shared->size += chunk_size;
    3084        3432 :     hashtable->batches[curbatch].at_least_one_chunk = true;
    3085             : 
    3086             :     /* Set up the chunk. */
    3087        3432 :     chunk = (HashMemoryChunk) dsa_get_address(hashtable->area, chunk_shared);
    3088        3432 :     *shared = chunk_shared + HASH_CHUNK_HEADER_SIZE;
    3089        3432 :     chunk->maxlen = chunk_size - HASH_CHUNK_HEADER_SIZE;
    3090        3432 :     chunk->used = size;
    3091             : 
    3092             :     /*
    3093             :      * Push it onto the list of chunks, so that it can be found if we need to
    3094             :      * increase the number of buckets or batches (batch 0 only) and later for
    3095             :      * freeing the memory (all batches).
    3096             :      */
    3097        3432 :     chunk->next.shared = hashtable->batches[curbatch].shared->chunks;
    3098        3432 :     hashtable->batches[curbatch].shared->chunks = chunk_shared;
    3099             : 
    3100        3432 :     if (size <= HASH_CHUNK_THRESHOLD)
    3101             :     {
    3102             :         /*
    3103             :          * Make this the current chunk so that we can use the fast path to
    3104             :          * fill the rest of it up in future calls.
    3105             :          */
    3106        3396 :         hashtable->current_chunk = chunk;
    3107        3396 :         hashtable->current_chunk_shared = chunk_shared;
    3108             :     }
    3109        3432 :     LWLockRelease(&pstate->lock);
    3110             : 
    3111             :     Assert(HASH_CHUNK_DATA(chunk) == dsa_get_address(hashtable->area, *shared));
    3112        3432 :     result = (HashJoinTuple) HASH_CHUNK_DATA(chunk);
    3113             : 
    3114        3432 :     return result;
    3115             : }
    3116             : 
    3117             : /*
    3118             :  * One backend needs to set up the shared batch state including tuplestores.
    3119             :  * Other backends will ensure they have correctly configured accessors by
    3120             :  * called ExecParallelHashEnsureBatchAccessors().
    3121             :  */
    3122             : static void
    3123         226 : ExecParallelHashJoinSetUpBatches(HashJoinTable hashtable, int nbatch)
    3124             : {
    3125         226 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    3126             :     ParallelHashJoinBatch *batches;
    3127             :     MemoryContext oldcxt;
    3128             :     int         i;
    3129             : 
    3130             :     Assert(hashtable->batches == NULL);
    3131             : 
    3132             :     /* Allocate space. */
    3133         226 :     pstate->batches =
    3134         226 :         dsa_allocate0(hashtable->area,
    3135             :                       EstimateParallelHashJoinBatch(hashtable) * nbatch);
    3136         226 :     pstate->nbatch = nbatch;
    3137         226 :     batches = dsa_get_address(hashtable->area, pstate->batches);
    3138             : 
    3139             :     /*
    3140             :      * Use hash join spill memory context to allocate accessors, including
    3141             :      * buffers for the temporary files.
    3142             :      */
    3143         226 :     oldcxt = MemoryContextSwitchTo(hashtable->spillCxt);
    3144             : 
    3145             :     /* Allocate this backend's accessor array. */
    3146         226 :     hashtable->nbatch = nbatch;
    3147         226 :     hashtable->batches =
    3148         226 :         palloc0_array(ParallelHashJoinBatchAccessor, hashtable->nbatch);
    3149             : 
    3150             :     /* Set up the shared state, tuplestores and backend-local accessors. */
    3151         954 :     for (i = 0; i < hashtable->nbatch; ++i)
    3152             :     {
    3153         728 :         ParallelHashJoinBatchAccessor *accessor = &hashtable->batches[i];
    3154         728 :         ParallelHashJoinBatch *shared = NthParallelHashJoinBatch(batches, i);
    3155             :         char        name[MAXPGPATH];
    3156             : 
    3157             :         /*
    3158             :          * All members of shared were zero-initialized.  We just need to set
    3159             :          * up the Barrier.
    3160             :          */
    3161         728 :         BarrierInit(&shared->batch_barrier, 0);
    3162         728 :         if (i == 0)
    3163             :         {
    3164             :             /* Batch 0 doesn't need to be loaded. */
    3165         226 :             BarrierAttach(&shared->batch_barrier);
    3166         904 :             while (BarrierPhase(&shared->batch_barrier) < PHJ_BATCH_PROBE)
    3167         678 :                 BarrierArriveAndWait(&shared->batch_barrier, 0);
    3168         226 :             BarrierDetach(&shared->batch_barrier);
    3169             :         }
    3170             : 
    3171             :         /* Initialize accessor state.  All members were zero-initialized. */
    3172         728 :         accessor->shared = shared;
    3173             : 
    3174             :         /* Initialize the shared tuplestores. */
    3175         728 :         snprintf(name, sizeof(name), "i%dof%d", i, hashtable->nbatch);
    3176         728 :         accessor->inner_tuples =
    3177         728 :             sts_initialize(ParallelHashJoinBatchInner(shared),
    3178             :                            pstate->nparticipants,
    3179             :                            ParallelWorkerNumber + 1,
    3180             :                            sizeof(uint32),
    3181             :                            SHARED_TUPLESTORE_SINGLE_PASS,
    3182             :                            &pstate->fileset,
    3183             :                            name);
    3184         728 :         snprintf(name, sizeof(name), "o%dof%d", i, hashtable->nbatch);
    3185         728 :         accessor->outer_tuples =
    3186         728 :             sts_initialize(ParallelHashJoinBatchOuter(shared,
    3187             :                                                       pstate->nparticipants),
    3188             :                            pstate->nparticipants,
    3189             :                            ParallelWorkerNumber + 1,
    3190             :                            sizeof(uint32),
    3191             :                            SHARED_TUPLESTORE_SINGLE_PASS,
    3192             :                            &pstate->fileset,
    3193             :                            name);
    3194             :     }
    3195             : 
    3196         226 :     MemoryContextSwitchTo(oldcxt);
    3197         226 : }
    3198             : 
    3199             : /*
    3200             :  * Free the current set of ParallelHashJoinBatchAccessor objects.
    3201             :  */
    3202             : static void
    3203          56 : ExecParallelHashCloseBatchAccessors(HashJoinTable hashtable)
    3204             : {
    3205             :     int         i;
    3206             : 
    3207         172 :     for (i = 0; i < hashtable->nbatch; ++i)
    3208             :     {
    3209             :         /* Make sure no files are left open. */
    3210         116 :         sts_end_write(hashtable->batches[i].inner_tuples);
    3211         116 :         sts_end_write(hashtable->batches[i].outer_tuples);
    3212         116 :         sts_end_parallel_scan(hashtable->batches[i].inner_tuples);
    3213         116 :         sts_end_parallel_scan(hashtable->batches[i].outer_tuples);
    3214             :     }
    3215          56 :     pfree(hashtable->batches);
    3216          56 :     hashtable->batches = NULL;
    3217          56 : }
    3218             : 
    3219             : /*
    3220             :  * Make sure this backend has up-to-date accessors for the current set of
    3221             :  * batches.
    3222             :  */
    3223             : static void
    3224         936 : ExecParallelHashEnsureBatchAccessors(HashJoinTable hashtable)
    3225             : {
    3226         936 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    3227             :     ParallelHashJoinBatch *batches;
    3228             :     MemoryContext oldcxt;
    3229             :     int         i;
    3230             : 
    3231         936 :     if (hashtable->batches != NULL)
    3232             :     {
    3233         694 :         if (hashtable->nbatch == pstate->nbatch)
    3234         692 :             return;
    3235           2 :         ExecParallelHashCloseBatchAccessors(hashtable);
    3236             :     }
    3237             : 
    3238             :     /*
    3239             :      * We should never see a state where the batch-tracking array is freed,
    3240             :      * because we should have given up sooner if we join when the build
    3241             :      * barrier has reached the PHJ_BUILD_FREE phase.
    3242             :      */
    3243             :     Assert(DsaPointerIsValid(pstate->batches));
    3244             : 
    3245             :     /*
    3246             :      * Use hash join spill memory context to allocate accessors, including
    3247             :      * buffers for the temporary files.
    3248             :      */
    3249         244 :     oldcxt = MemoryContextSwitchTo(hashtable->spillCxt);
    3250             : 
    3251             :     /* Allocate this backend's accessor array. */
    3252         244 :     hashtable->nbatch = pstate->nbatch;
    3253         244 :     hashtable->batches =
    3254         244 :         palloc0_array(ParallelHashJoinBatchAccessor, hashtable->nbatch);
    3255             : 
    3256             :     /* Find the base of the pseudo-array of ParallelHashJoinBatch objects. */
    3257             :     batches = (ParallelHashJoinBatch *)
    3258         244 :         dsa_get_address(hashtable->area, pstate->batches);
    3259             : 
    3260             :     /* Set up the accessor array and attach to the tuplestores. */
    3261        1164 :     for (i = 0; i < hashtable->nbatch; ++i)
    3262             :     {
    3263         920 :         ParallelHashJoinBatchAccessor *accessor = &hashtable->batches[i];
    3264         920 :         ParallelHashJoinBatch *shared = NthParallelHashJoinBatch(batches, i);
    3265             : 
    3266         920 :         accessor->shared = shared;
    3267         920 :         accessor->preallocated = 0;
    3268         920 :         accessor->done = false;
    3269         920 :         accessor->outer_eof = false;
    3270         920 :         accessor->inner_tuples =
    3271         920 :             sts_attach(ParallelHashJoinBatchInner(shared),
    3272             :                        ParallelWorkerNumber + 1,
    3273             :                        &pstate->fileset);
    3274         920 :         accessor->outer_tuples =
    3275         920 :             sts_attach(ParallelHashJoinBatchOuter(shared,
    3276             :                                                   pstate->nparticipants),
    3277             :                        ParallelWorkerNumber + 1,
    3278             :                        &pstate->fileset);
    3279             :     }
    3280             : 
    3281         244 :     MemoryContextSwitchTo(oldcxt);
    3282             : }
    3283             : 
    3284             : /*
    3285             :  * Allocate an empty shared memory hash table for a given batch.
    3286             :  */
    3287             : void
    3288         628 : ExecParallelHashTableAlloc(HashJoinTable hashtable, int batchno)
    3289             : {
    3290         628 :     ParallelHashJoinBatch *batch = hashtable->batches[batchno].shared;
    3291             :     dsa_pointer_atomic *buckets;
    3292         628 :     int         nbuckets = hashtable->parallel_state->nbuckets;
    3293             :     int         i;
    3294             : 
    3295         628 :     batch->buckets =
    3296         628 :         dsa_allocate(hashtable->area, sizeof(dsa_pointer_atomic) * nbuckets);
    3297             :     buckets = (dsa_pointer_atomic *)
    3298         628 :         dsa_get_address(hashtable->area, batch->buckets);
    3299     3187316 :     for (i = 0; i < nbuckets; ++i)
    3300     3186688 :         dsa_pointer_atomic_init(&buckets[i], InvalidDsaPointer);
    3301         628 : }
    3302             : 
    3303             : /*
    3304             :  * If we are currently attached to a shared hash join batch, detach.  If we
    3305             :  * are last to detach, clean up.
    3306             :  */
    3307             : void
    3308       24568 : ExecHashTableDetachBatch(HashJoinTable hashtable)
    3309             : {
    3310       24568 :     if (hashtable->parallel_state != NULL &&
    3311        1198 :         hashtable->curbatch >= 0)
    3312             :     {
    3313         784 :         int         curbatch = hashtable->curbatch;
    3314         784 :         ParallelHashJoinBatch *batch = hashtable->batches[curbatch].shared;
    3315         784 :         bool        attached = true;
    3316             : 
    3317             :         /* Make sure any temporary files are closed. */
    3318         784 :         sts_end_parallel_scan(hashtable->batches[curbatch].inner_tuples);
    3319         784 :         sts_end_parallel_scan(hashtable->batches[curbatch].outer_tuples);
    3320             : 
    3321             :         /* After attaching we always get at least to PHJ_BATCH_PROBE. */
    3322             :         Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE ||
    3323             :                BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_SCAN);
    3324             : 
    3325             :         /*
    3326             :          * If we're abandoning the PHJ_BATCH_PROBE phase early without having
    3327             :          * reached the end of it, it means the plan doesn't want any more
    3328             :          * tuples, and it is happy to abandon any tuples buffered in this
    3329             :          * process's subplans.  For correctness, we can't allow any process to
    3330             :          * execute the PHJ_BATCH_SCAN phase, because we will never have the
    3331             :          * complete set of match bits.  Therefore we skip emitting unmatched
    3332             :          * tuples in all backends (if this is a full/right join), as if those
    3333             :          * tuples were all due to be emitted by this process and it has
    3334             :          * abandoned them too.
    3335             :          */
    3336         784 :         if (BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE &&
    3337         716 :             !hashtable->batches[curbatch].outer_eof)
    3338             :         {
    3339             :             /*
    3340             :              * This flag may be written to by multiple backends during
    3341             :              * PHJ_BATCH_PROBE phase, but will only be read in PHJ_BATCH_SCAN
    3342             :              * phase so requires no extra locking.
    3343             :              */
    3344           0 :             batch->skip_unmatched = true;
    3345             :         }
    3346             : 
    3347             :         /*
    3348             :          * Even if we aren't doing a full/right outer join, we'll step through
    3349             :          * the PHJ_BATCH_SCAN phase just to maintain the invariant that
    3350             :          * freeing happens in PHJ_BATCH_FREE, but that'll be wait-free.
    3351             :          */
    3352         784 :         if (BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE)
    3353         716 :             attached = BarrierArriveAndDetachExceptLast(&batch->batch_barrier);
    3354         784 :         if (attached && BarrierArriveAndDetach(&batch->batch_barrier))
    3355             :         {
    3356             :             /*
    3357             :              * We are not longer attached to the batch barrier, but we're the
    3358             :              * process that was chosen to free resources and it's safe to
    3359             :              * assert the current phase.  The ParallelHashJoinBatch can't go
    3360             :              * away underneath us while we are attached to the build barrier,
    3361             :              * making this access safe.
    3362             :              */
    3363             :             Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_FREE);
    3364             : 
    3365             :             /* Free shared chunks and buckets. */
    3366        3758 :             while (DsaPointerIsValid(batch->chunks))
    3367             :             {
    3368             :                 HashMemoryChunk chunk =
    3369        3130 :                     dsa_get_address(hashtable->area, batch->chunks);
    3370        3130 :                 dsa_pointer next = chunk->next.shared;
    3371             : 
    3372        3130 :                 dsa_free(hashtable->area, batch->chunks);
    3373        3130 :                 batch->chunks = next;
    3374             :             }
    3375         628 :             if (DsaPointerIsValid(batch->buckets))
    3376             :             {
    3377         628 :                 dsa_free(hashtable->area, batch->buckets);
    3378         628 :                 batch->buckets = InvalidDsaPointer;
    3379             :             }
    3380             :         }
    3381             : 
    3382             :         /*
    3383             :          * Track the largest batch we've been attached to.  Though each
    3384             :          * backend might see a different subset of batches, explain.c will
    3385             :          * scan the results from all backends to find the largest value.
    3386             :          */
    3387         784 :         hashtable->spacePeak =
    3388         784 :             Max(hashtable->spacePeak,
    3389             :                 batch->size + sizeof(dsa_pointer_atomic) * hashtable->nbuckets);
    3390             : 
    3391             :         /* Remember that we are not attached to a batch. */
    3392         784 :         hashtable->curbatch = -1;
    3393             :     }
    3394       24568 : }
    3395             : 
    3396             : /*
    3397             :  * Detach from all shared resources.  If we are last to detach, clean up.
    3398             :  */
    3399             : void
    3400       23784 : ExecHashTableDetach(HashJoinTable hashtable)
    3401             : {
    3402       23784 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    3403             : 
    3404             :     /*
    3405             :      * If we're involved in a parallel query, we must either have gotten all
    3406             :      * the way to PHJ_BUILD_RUN, or joined too late and be in PHJ_BUILD_FREE.
    3407             :      */
    3408             :     Assert(!pstate ||
    3409             :            BarrierPhase(&pstate->build_barrier) >= PHJ_BUILD_RUN);
    3410             : 
    3411       23784 :     if (pstate && BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_RUN)
    3412             :     {
    3413             :         int         i;
    3414             : 
    3415             :         /* Make sure any temporary files are closed. */
    3416         414 :         if (hashtable->batches)
    3417             :         {
    3418        1946 :             for (i = 0; i < hashtable->nbatch; ++i)
    3419             :             {
    3420        1532 :                 sts_end_write(hashtable->batches[i].inner_tuples);
    3421        1532 :                 sts_end_write(hashtable->batches[i].outer_tuples);
    3422        1532 :                 sts_end_parallel_scan(hashtable->batches[i].inner_tuples);
    3423        1532 :                 sts_end_parallel_scan(hashtable->batches[i].outer_tuples);
    3424             :             }
    3425             :         }
    3426             : 
    3427             :         /* If we're last to detach, clean up shared memory. */
    3428         414 :         if (BarrierArriveAndDetach(&pstate->build_barrier))
    3429             :         {
    3430             :             /*
    3431             :              * Late joining processes will see this state and give up
    3432             :              * immediately.
    3433             :              */
    3434             :             Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_FREE);
    3435             : 
    3436         174 :             if (DsaPointerIsValid(pstate->batches))
    3437             :             {
    3438         174 :                 dsa_free(hashtable->area, pstate->batches);
    3439         174 :                 pstate->batches = InvalidDsaPointer;
    3440             :             }
    3441             :         }
    3442             :     }
    3443       23784 :     hashtable->parallel_state = NULL;
    3444       23784 : }
    3445             : 
    3446             : /*
    3447             :  * Get the first tuple in a given bucket identified by number.
    3448             :  */
    3449             : static inline HashJoinTuple
    3450     2780430 : ExecParallelHashFirstTuple(HashJoinTable hashtable, int bucketno)
    3451             : {
    3452             :     HashJoinTuple tuple;
    3453             :     dsa_pointer p;
    3454             : 
    3455             :     Assert(hashtable->parallel_state);
    3456     2780430 :     p = dsa_pointer_atomic_read(&hashtable->buckets.shared[bucketno]);
    3457     2780430 :     tuple = (HashJoinTuple) dsa_get_address(hashtable->area, p);
    3458             : 
    3459     2780430 :     return tuple;
    3460             : }
    3461             : 
    3462             : /*
    3463             :  * Get the next tuple in the same bucket as 'tuple'.
    3464             :  */
    3465             : static inline HashJoinTuple
    3466     3794594 : ExecParallelHashNextTuple(HashJoinTable hashtable, HashJoinTuple tuple)
    3467             : {
    3468             :     HashJoinTuple next;
    3469             : 
    3470             :     Assert(hashtable->parallel_state);
    3471     3794594 :     next = (HashJoinTuple) dsa_get_address(hashtable->area, tuple->next.shared);
    3472             : 
    3473     3794594 :     return next;
    3474             : }
    3475             : 
    3476             : /*
    3477             :  * Insert a tuple at the front of a chain of tuples in DSA memory atomically.
    3478             :  */
    3479             : static inline void
    3480     2946936 : ExecParallelHashPushTuple(dsa_pointer_atomic *head,
    3481             :                           HashJoinTuple tuple,
    3482             :                           dsa_pointer tuple_shared)
    3483             : {
    3484             :     for (;;)
    3485             :     {
    3486     2959412 :         tuple->next.shared = dsa_pointer_atomic_read(head);
    3487     2959412 :         if (dsa_pointer_atomic_compare_exchange(head,
    3488     2959412 :                                                 &tuple->next.shared,
    3489             :                                                 tuple_shared))
    3490     2946936 :             break;
    3491             :     }
    3492     2946936 : }
    3493             : 
    3494             : /*
    3495             :  * Prepare to work on a given batch.
    3496             :  */
    3497             : void
    3498        1804 : ExecParallelHashTableSetCurrentBatch(HashJoinTable hashtable, int batchno)
    3499             : {
    3500             :     Assert(hashtable->batches[batchno].shared->buckets != InvalidDsaPointer);
    3501             : 
    3502        1804 :     hashtable->curbatch = batchno;
    3503        1804 :     hashtable->buckets.shared = (dsa_pointer_atomic *)
    3504        1804 :         dsa_get_address(hashtable->area,
    3505        1804 :                         hashtable->batches[batchno].shared->buckets);
    3506        1804 :     hashtable->nbuckets = hashtable->parallel_state->nbuckets;
    3507        1804 :     hashtable->log2_nbuckets = pg_ceil_log2_32(hashtable->nbuckets);
    3508        1804 :     hashtable->current_chunk = NULL;
    3509        1804 :     hashtable->current_chunk_shared = InvalidDsaPointer;
    3510        1804 :     hashtable->batches[batchno].at_least_one_chunk = false;
    3511        1804 : }
    3512             : 
    3513             : /*
    3514             :  * Take the next available chunk from the queue of chunks being worked on in
    3515             :  * parallel.  Return NULL if there are none left.  Otherwise return a pointer
    3516             :  * to the chunk, and set *shared to the DSA pointer to the chunk.
    3517             :  */
    3518             : static HashMemoryChunk
    3519        1164 : ExecParallelHashPopChunkQueue(HashJoinTable hashtable, dsa_pointer *shared)
    3520             : {
    3521        1164 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    3522             :     HashMemoryChunk chunk;
    3523             : 
    3524        1164 :     LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
    3525        1164 :     if (DsaPointerIsValid(pstate->chunk_work_queue))
    3526             :     {
    3527         984 :         *shared = pstate->chunk_work_queue;
    3528             :         chunk = (HashMemoryChunk)
    3529         984 :             dsa_get_address(hashtable->area, *shared);
    3530         984 :         pstate->chunk_work_queue = chunk->next.shared;
    3531             :     }
    3532             :     else
    3533         180 :         chunk = NULL;
    3534        1164 :     LWLockRelease(&pstate->lock);
    3535             : 
    3536        1164 :     return chunk;
    3537             : }
    3538             : 
    3539             : /*
    3540             :  * Increase the space preallocated in this backend for a given inner batch by
    3541             :  * at least a given amount.  This allows us to track whether a given batch
    3542             :  * would fit in memory when loaded back in.  Also increase the number of
    3543             :  * batches or buckets if required.
    3544             :  *
    3545             :  * This maintains a running estimation of how much space will be taken when we
    3546             :  * load the batch back into memory by simulating the way chunks will be handed
    3547             :  * out to workers.  It's not perfectly accurate because the tuples will be
    3548             :  * packed into memory chunks differently by ExecParallelHashTupleAlloc(), but
    3549             :  * it should be pretty close.  It tends to overestimate by a fraction of a
    3550             :  * chunk per worker since all workers gang up to preallocate during hashing,
    3551             :  * but workers tend to reload batches alone if there are enough to go around,
    3552             :  * leaving fewer partially filled chunks.  This effect is bounded by
    3553             :  * nparticipants.
    3554             :  *
    3555             :  * Return false if the number of batches or buckets has changed, and the
    3556             :  * caller should reconsider which batch a given tuple now belongs in and call
    3557             :  * again.
    3558             :  */
    3559             : static bool
    3560        1516 : ExecParallelHashTuplePrealloc(HashJoinTable hashtable, int batchno, size_t size)
    3561             : {
    3562        1516 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    3563        1516 :     ParallelHashJoinBatchAccessor *batch = &hashtable->batches[batchno];
    3564        1516 :     size_t      want = Max(size, HASH_CHUNK_SIZE - HASH_CHUNK_HEADER_SIZE);
    3565             : 
    3566             :     Assert(batchno > 0);
    3567             :     Assert(batchno < hashtable->nbatch);
    3568             :     Assert(size == MAXALIGN(size));
    3569             : 
    3570        1516 :     LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
    3571             : 
    3572             :     /* Has another participant commanded us to help grow? */
    3573        1516 :     if (pstate->growth == PHJ_GROWTH_NEED_MORE_BATCHES ||
    3574        1498 :         pstate->growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
    3575             :     {
    3576          18 :         ParallelHashGrowth growth = pstate->growth;
    3577             : 
    3578          18 :         LWLockRelease(&pstate->lock);
    3579          18 :         if (growth == PHJ_GROWTH_NEED_MORE_BATCHES)
    3580          18 :             ExecParallelHashIncreaseNumBatches(hashtable);
    3581           0 :         else if (growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
    3582           0 :             ExecParallelHashIncreaseNumBuckets(hashtable);
    3583             : 
    3584          18 :         return false;
    3585             :     }
    3586             : 
    3587        1498 :     if (pstate->growth != PHJ_GROWTH_DISABLED &&
    3588        1270 :         batch->at_least_one_chunk &&
    3589         640 :         (batch->shared->estimated_size + want + HASH_CHUNK_HEADER_SIZE
    3590         640 :          > pstate->space_allowed))
    3591             :     {
    3592             :         /*
    3593             :          * We have determined that this batch would exceed the space budget if
    3594             :          * loaded into memory.  Command all participants to help repartition.
    3595             :          */
    3596          16 :         batch->shared->space_exhausted = true;
    3597          16 :         pstate->growth = PHJ_GROWTH_NEED_MORE_BATCHES;
    3598          16 :         LWLockRelease(&pstate->lock);
    3599             : 
    3600          16 :         return false;
    3601             :     }
    3602             : 
    3603        1482 :     batch->at_least_one_chunk = true;
    3604        1482 :     batch->shared->estimated_size += want + HASH_CHUNK_HEADER_SIZE;
    3605        1482 :     batch->preallocated = want;
    3606        1482 :     LWLockRelease(&pstate->lock);
    3607             : 
    3608        1482 :     return true;
    3609             : }
    3610             : 
    3611             : /*
    3612             :  * Calculate the limit on how much memory can be used by Hash and similar
    3613             :  * plan types.  This is work_mem times hash_mem_multiplier, and is
    3614             :  * expressed in bytes.
    3615             :  *
    3616             :  * Exported for use by the planner, as well as other hash-like executor
    3617             :  * nodes.  This is a rather random place for this, but there is no better
    3618             :  * place.
    3619             :  */
    3620             : size_t
    3621     1739680 : get_hash_memory_limit(void)
    3622             : {
    3623             :     double      mem_limit;
    3624             : 
    3625             :     /* Do initial calculation in double arithmetic */
    3626     1739680 :     mem_limit = (double) work_mem * hash_mem_multiplier * 1024.0;
    3627             : 
    3628             :     /* Clamp in case it doesn't fit in size_t */
    3629     1739680 :     mem_limit = Min(mem_limit, (double) SIZE_MAX);
    3630             : 
    3631     1739680 :     return (size_t) mem_limit;
    3632             : }

Generated by: LCOV version 1.16