LCOV - code coverage report
Current view: top level - src/backend/executor - nodeHash.c (source / functions) Hit Total Coverage
Test: PostgreSQL 19devel Lines: 1051 1106 95.0 %
Date: 2025-10-10 10:17:52 Functions: 54 55 98.2 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * nodeHash.c
       4             :  *    Routines to hash relations for hashjoin
       5             :  *
       6             :  * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
       7             :  * Portions Copyright (c) 1994, Regents of the University of California
       8             :  *
       9             :  *
      10             :  * IDENTIFICATION
      11             :  *    src/backend/executor/nodeHash.c
      12             :  *
      13             :  * See note on parallelism in nodeHashjoin.c.
      14             :  *
      15             :  *-------------------------------------------------------------------------
      16             :  */
      17             : /*
      18             :  * INTERFACE ROUTINES
      19             :  *      MultiExecHash   - generate an in-memory hash table of the relation
      20             :  *      ExecInitHash    - initialize node and subnodes
      21             :  *      ExecEndHash     - shutdown node and subnodes
      22             :  */
      23             : 
      24             : #include "postgres.h"
      25             : 
      26             : #include <math.h>
      27             : #include <limits.h>
      28             : 
      29             : #include "access/htup_details.h"
      30             : #include "access/parallel.h"
      31             : #include "catalog/pg_statistic.h"
      32             : #include "commands/tablespace.h"
      33             : #include "executor/executor.h"
      34             : #include "executor/hashjoin.h"
      35             : #include "executor/nodeHash.h"
      36             : #include "executor/nodeHashjoin.h"
      37             : #include "miscadmin.h"
      38             : #include "port/pg_bitutils.h"
      39             : #include "utils/lsyscache.h"
      40             : #include "utils/memutils.h"
      41             : #include "utils/syscache.h"
      42             : #include "utils/wait_event.h"
      43             : 
      44             : static void ExecHashIncreaseNumBatches(HashJoinTable hashtable);
      45             : static void ExecHashIncreaseNumBuckets(HashJoinTable hashtable);
      46             : static void ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable);
      47             : static void ExecParallelHashIncreaseNumBuckets(HashJoinTable hashtable);
      48             : static void ExecHashBuildSkewHash(HashState *hashstate,
      49             :                                   HashJoinTable hashtable, Hash *node,
      50             :                                   int mcvsToUse);
      51             : static void ExecHashSkewTableInsert(HashJoinTable hashtable,
      52             :                                     TupleTableSlot *slot,
      53             :                                     uint32 hashvalue,
      54             :                                     int bucketNumber);
      55             : static void ExecHashRemoveNextSkewBucket(HashJoinTable hashtable);
      56             : 
      57             : static void *dense_alloc(HashJoinTable hashtable, Size size);
      58             : static HashJoinTuple ExecParallelHashTupleAlloc(HashJoinTable hashtable,
      59             :                                                 size_t size,
      60             :                                                 dsa_pointer *shared);
      61             : static void MultiExecPrivateHash(HashState *node);
      62             : static void MultiExecParallelHash(HashState *node);
      63             : static inline HashJoinTuple ExecParallelHashFirstTuple(HashJoinTable hashtable,
      64             :                                                        int bucketno);
      65             : static inline HashJoinTuple ExecParallelHashNextTuple(HashJoinTable hashtable,
      66             :                                                       HashJoinTuple tuple);
      67             : static inline void ExecParallelHashPushTuple(dsa_pointer_atomic *head,
      68             :                                              HashJoinTuple tuple,
      69             :                                              dsa_pointer tuple_shared);
      70             : static void ExecParallelHashJoinSetUpBatches(HashJoinTable hashtable, int nbatch);
      71             : static void ExecParallelHashEnsureBatchAccessors(HashJoinTable hashtable);
      72             : static void ExecParallelHashRepartitionFirst(HashJoinTable hashtable);
      73             : static void ExecParallelHashRepartitionRest(HashJoinTable hashtable);
      74             : static HashMemoryChunk ExecParallelHashPopChunkQueue(HashJoinTable hashtable,
      75             :                                                      dsa_pointer *shared);
      76             : static bool ExecParallelHashTuplePrealloc(HashJoinTable hashtable,
      77             :                                           int batchno,
      78             :                                           size_t size);
      79             : static void ExecParallelHashMergeCounters(HashJoinTable hashtable);
      80             : static void ExecParallelHashCloseBatchAccessors(HashJoinTable hashtable);
      81             : 
      82             : 
      83             : /* ----------------------------------------------------------------
      84             :  *      ExecHash
      85             :  *
      86             :  *      stub for pro forma compliance
      87             :  * ----------------------------------------------------------------
      88             :  */
      89             : static TupleTableSlot *
      90           0 : ExecHash(PlanState *pstate)
      91             : {
      92           0 :     elog(ERROR, "Hash node does not support ExecProcNode call convention");
      93             :     return NULL;
      94             : }
      95             : 
      96             : /* ----------------------------------------------------------------
      97             :  *      MultiExecHash
      98             :  *
      99             :  *      build hash table for hashjoin, doing partitioning if more
     100             :  *      than one batch is required.
     101             :  * ----------------------------------------------------------------
     102             :  */
     103             : Node *
     104       26112 : MultiExecHash(HashState *node)
     105             : {
     106             :     /* must provide our own instrumentation support */
     107       26112 :     if (node->ps.instrument)
     108         332 :         InstrStartNode(node->ps.instrument);
     109             : 
     110       26112 :     if (node->parallel_state != NULL)
     111         414 :         MultiExecParallelHash(node);
     112             :     else
     113       25698 :         MultiExecPrivateHash(node);
     114             : 
     115             :     /* must provide our own instrumentation support */
     116       26112 :     if (node->ps.instrument)
     117         332 :         InstrStopNode(node->ps.instrument, node->hashtable->partialTuples);
     118             : 
     119             :     /*
     120             :      * We do not return the hash table directly because it's not a subtype of
     121             :      * Node, and so would violate the MultiExecProcNode API.  Instead, our
     122             :      * parent Hashjoin node is expected to know how to fish it out of our node
     123             :      * state.  Ugly but not really worth cleaning up, since Hashjoin knows
     124             :      * quite a bit more about Hash besides that.
     125             :      */
     126       26112 :     return NULL;
     127             : }
     128             : 
     129             : /* ----------------------------------------------------------------
     130             :  *      MultiExecPrivateHash
     131             :  *
     132             :  *      parallel-oblivious version, building a backend-private
     133             :  *      hash table and (if necessary) batch files.
     134             :  * ----------------------------------------------------------------
     135             :  */
     136             : static void
     137       25698 : MultiExecPrivateHash(HashState *node)
     138             : {
     139             :     PlanState  *outerNode;
     140             :     HashJoinTable hashtable;
     141             :     TupleTableSlot *slot;
     142             :     ExprContext *econtext;
     143             : 
     144             :     /*
     145             :      * get state info from node
     146             :      */
     147       25698 :     outerNode = outerPlanState(node);
     148       25698 :     hashtable = node->hashtable;
     149             : 
     150             :     /*
     151             :      * set expression context
     152             :      */
     153       25698 :     econtext = node->ps.ps_ExprContext;
     154             : 
     155             :     /*
     156             :      * Get all tuples from the node below the Hash node and insert into the
     157             :      * hash table (or temp files).
     158             :      */
     159             :     for (;;)
     160     8863144 :     {
     161             :         bool        isnull;
     162             :         Datum       hashdatum;
     163             : 
     164     8888842 :         slot = ExecProcNode(outerNode);
     165     8888842 :         if (TupIsNull(slot))
     166             :             break;
     167             :         /* We have to compute the hash value */
     168     8863144 :         econtext->ecxt_outertuple = slot;
     169             : 
     170     8863144 :         ResetExprContext(econtext);
     171             : 
     172     8863144 :         hashdatum = ExecEvalExprSwitchContext(node->hash_expr, econtext,
     173             :                                               &isnull);
     174             : 
     175     8863144 :         if (!isnull)
     176             :         {
     177     8863030 :             uint32      hashvalue = DatumGetUInt32(hashdatum);
     178             :             int         bucketNumber;
     179             : 
     180     8863030 :             bucketNumber = ExecHashGetSkewBucket(hashtable, hashvalue);
     181     8863030 :             if (bucketNumber != INVALID_SKEW_BUCKET_NO)
     182             :             {
     183             :                 /* It's a skew tuple, so put it into that hash table */
     184         588 :                 ExecHashSkewTableInsert(hashtable, slot, hashvalue,
     185             :                                         bucketNumber);
     186         588 :                 hashtable->skewTuples += 1;
     187             :             }
     188             :             else
     189             :             {
     190             :                 /* Not subject to skew optimization, so insert normally */
     191     8862442 :                 ExecHashTableInsert(hashtable, slot, hashvalue);
     192             :             }
     193     8863030 :             hashtable->totalTuples += 1;
     194             :         }
     195             :     }
     196             : 
     197             :     /* resize the hash table if needed (NTUP_PER_BUCKET exceeded) */
     198       25698 :     if (hashtable->nbuckets != hashtable->nbuckets_optimal)
     199          72 :         ExecHashIncreaseNumBuckets(hashtable);
     200             : 
     201             :     /* Account for the buckets in spaceUsed (reported in EXPLAIN ANALYZE) */
     202       25698 :     hashtable->spaceUsed += hashtable->nbuckets * sizeof(HashJoinTuple);
     203       25698 :     if (hashtable->spaceUsed > hashtable->spacePeak)
     204       25650 :         hashtable->spacePeak = hashtable->spaceUsed;
     205             : 
     206       25698 :     hashtable->partialTuples = hashtable->totalTuples;
     207       25698 : }
     208             : 
     209             : /* ----------------------------------------------------------------
     210             :  *      MultiExecParallelHash
     211             :  *
     212             :  *      parallel-aware version, building a shared hash table and
     213             :  *      (if necessary) batch files using the combined effort of
     214             :  *      a set of co-operating backends.
     215             :  * ----------------------------------------------------------------
     216             :  */
     217             : static void
     218         414 : MultiExecParallelHash(HashState *node)
     219             : {
     220             :     ParallelHashJoinState *pstate;
     221             :     PlanState  *outerNode;
     222             :     HashJoinTable hashtable;
     223             :     TupleTableSlot *slot;
     224             :     ExprContext *econtext;
     225             :     uint32      hashvalue;
     226             :     Barrier    *build_barrier;
     227             :     int         i;
     228             : 
     229             :     /*
     230             :      * get state info from node
     231             :      */
     232         414 :     outerNode = outerPlanState(node);
     233         414 :     hashtable = node->hashtable;
     234             : 
     235             :     /*
     236             :      * set expression context
     237             :      */
     238         414 :     econtext = node->ps.ps_ExprContext;
     239             : 
     240             :     /*
     241             :      * Synchronize the parallel hash table build.  At this stage we know that
     242             :      * the shared hash table has been or is being set up by
     243             :      * ExecHashTableCreate(), but we don't know if our peers have returned
     244             :      * from there or are here in MultiExecParallelHash(), and if so how far
     245             :      * through they are.  To find out, we check the build_barrier phase then
     246             :      * and jump to the right step in the build algorithm.
     247             :      */
     248         414 :     pstate = hashtable->parallel_state;
     249         414 :     build_barrier = &pstate->build_barrier;
     250             :     Assert(BarrierPhase(build_barrier) >= PHJ_BUILD_ALLOCATE);
     251         414 :     switch (BarrierPhase(build_barrier))
     252             :     {
     253         184 :         case PHJ_BUILD_ALLOCATE:
     254             : 
     255             :             /*
     256             :              * Either I just allocated the initial hash table in
     257             :              * ExecHashTableCreate(), or someone else is doing that.  Either
     258             :              * way, wait for everyone to arrive here so we can proceed.
     259             :              */
     260         184 :             BarrierArriveAndWait(build_barrier, WAIT_EVENT_HASH_BUILD_ALLOCATE);
     261             :             /* Fall through. */
     262             : 
     263         280 :         case PHJ_BUILD_HASH_INNER:
     264             : 
     265             :             /*
     266             :              * It's time to begin hashing, or if we just arrived here then
     267             :              * hashing is already underway, so join in that effort.  While
     268             :              * hashing we have to be prepared to help increase the number of
     269             :              * batches or buckets at any time, and if we arrived here when
     270             :              * that was already underway we'll have to help complete that work
     271             :              * immediately so that it's safe to access batches and buckets
     272             :              * below.
     273             :              */
     274         280 :             if (PHJ_GROW_BATCHES_PHASE(BarrierAttach(&pstate->grow_batches_barrier)) !=
     275             :                 PHJ_GROW_BATCHES_ELECT)
     276           0 :                 ExecParallelHashIncreaseNumBatches(hashtable);
     277         280 :             if (PHJ_GROW_BUCKETS_PHASE(BarrierAttach(&pstate->grow_buckets_barrier)) !=
     278             :                 PHJ_GROW_BUCKETS_ELECT)
     279           0 :                 ExecParallelHashIncreaseNumBuckets(hashtable);
     280         280 :             ExecParallelHashEnsureBatchAccessors(hashtable);
     281         280 :             ExecParallelHashTableSetCurrentBatch(hashtable, 0);
     282             :             for (;;)
     283     2160192 :             {
     284             :                 bool        isnull;
     285             : 
     286     2160472 :                 slot = ExecProcNode(outerNode);
     287     2160472 :                 if (TupIsNull(slot))
     288             :                     break;
     289     2160192 :                 econtext->ecxt_outertuple = slot;
     290             : 
     291     2160192 :                 ResetExprContext(econtext);
     292             : 
     293     2160192 :                 hashvalue = DatumGetUInt32(ExecEvalExprSwitchContext(node->hash_expr,
     294             :                                                                      econtext,
     295             :                                                                      &isnull));
     296             : 
     297     2160192 :                 if (!isnull)
     298     2160192 :                     ExecParallelHashTableInsert(hashtable, slot, hashvalue);
     299     2160192 :                 hashtable->partialTuples++;
     300             :             }
     301             : 
     302             :             /*
     303             :              * Make sure that any tuples we wrote to disk are visible to
     304             :              * others before anyone tries to load them.
     305             :              */
     306        1284 :             for (i = 0; i < hashtable->nbatch; ++i)
     307        1004 :                 sts_end_write(hashtable->batches[i].inner_tuples);
     308             : 
     309             :             /*
     310             :              * Update shared counters.  We need an accurate total tuple count
     311             :              * to control the empty table optimization.
     312             :              */
     313         280 :             ExecParallelHashMergeCounters(hashtable);
     314             : 
     315         280 :             BarrierDetach(&pstate->grow_buckets_barrier);
     316         280 :             BarrierDetach(&pstate->grow_batches_barrier);
     317             : 
     318             :             /*
     319             :              * Wait for everyone to finish building and flushing files and
     320             :              * counters.
     321             :              */
     322         280 :             if (BarrierArriveAndWait(build_barrier,
     323             :                                      WAIT_EVENT_HASH_BUILD_HASH_INNER))
     324             :             {
     325             :                 /*
     326             :                  * Elect one backend to disable any further growth.  Batches
     327             :                  * are now fixed.  While building them we made sure they'd fit
     328             :                  * in our memory budget when we load them back in later (or we
     329             :                  * tried to do that and gave up because we detected extreme
     330             :                  * skew).
     331             :                  */
     332         174 :                 pstate->growth = PHJ_GROWTH_DISABLED;
     333             :             }
     334             :     }
     335             : 
     336             :     /*
     337             :      * We're not yet attached to a batch.  We all agree on the dimensions and
     338             :      * number of inner tuples (for the empty table optimization).
     339             :      */
     340         414 :     hashtable->curbatch = -1;
     341         414 :     hashtable->nbuckets = pstate->nbuckets;
     342         414 :     hashtable->log2_nbuckets = pg_ceil_log2_32(hashtable->nbuckets);
     343         414 :     hashtable->totalTuples = pstate->total_tuples;
     344             : 
     345             :     /*
     346             :      * Unless we're completely done and the batch state has been freed, make
     347             :      * sure we have accessors.
     348             :      */
     349         414 :     if (BarrierPhase(build_barrier) < PHJ_BUILD_FREE)
     350         412 :         ExecParallelHashEnsureBatchAccessors(hashtable);
     351             : 
     352             :     /*
     353             :      * The next synchronization point is in ExecHashJoin's HJ_BUILD_HASHTABLE
     354             :      * case, which will bring the build phase to PHJ_BUILD_RUN (if it isn't
     355             :      * there already).
     356             :      */
     357             :     Assert(BarrierPhase(build_barrier) == PHJ_BUILD_HASH_OUTER ||
     358             :            BarrierPhase(build_barrier) == PHJ_BUILD_RUN ||
     359             :            BarrierPhase(build_barrier) == PHJ_BUILD_FREE);
     360         414 : }
     361             : 
     362             : /* ----------------------------------------------------------------
     363             :  *      ExecInitHash
     364             :  *
     365             :  *      Init routine for Hash node
     366             :  * ----------------------------------------------------------------
     367             :  */
     368             : HashState *
     369       35332 : ExecInitHash(Hash *node, EState *estate, int eflags)
     370             : {
     371             :     HashState  *hashstate;
     372             : 
     373             :     /* check for unsupported flags */
     374             :     Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
     375             : 
     376             :     /*
     377             :      * create state structure
     378             :      */
     379       35332 :     hashstate = makeNode(HashState);
     380       35332 :     hashstate->ps.plan = (Plan *) node;
     381       35332 :     hashstate->ps.state = estate;
     382       35332 :     hashstate->ps.ExecProcNode = ExecHash;
     383             :     /* delay building hashtable until ExecHashTableCreate() in executor run */
     384       35332 :     hashstate->hashtable = NULL;
     385             : 
     386             :     /*
     387             :      * Miscellaneous initialization
     388             :      *
     389             :      * create expression context for node
     390             :      */
     391       35332 :     ExecAssignExprContext(estate, &hashstate->ps);
     392             : 
     393             :     /*
     394             :      * initialize child nodes
     395             :      */
     396       35332 :     outerPlanState(hashstate) = ExecInitNode(outerPlan(node), estate, eflags);
     397             : 
     398             :     /*
     399             :      * initialize our result slot and type. No need to build projection
     400             :      * because this node doesn't do projections.
     401             :      */
     402       35332 :     ExecInitResultTupleSlotTL(&hashstate->ps, &TTSOpsMinimalTuple);
     403       35332 :     hashstate->ps.ps_ProjInfo = NULL;
     404             : 
     405             :     Assert(node->plan.qual == NIL);
     406             : 
     407             :     /*
     408             :      * Delay initialization of hash_expr until ExecInitHashJoin().  We cannot
     409             :      * build the ExprState here as we don't yet know the join type we're going
     410             :      * to be hashing values for and we need to know that before calling
     411             :      * ExecBuildHash32Expr as the keep_nulls parameter depends on the join
     412             :      * type.
     413             :      */
     414       35332 :     hashstate->hash_expr = NULL;
     415             : 
     416       35332 :     return hashstate;
     417             : }
     418             : 
     419             : /* ---------------------------------------------------------------
     420             :  *      ExecEndHash
     421             :  *
     422             :  *      clean up routine for Hash node
     423             :  * ----------------------------------------------------------------
     424             :  */
     425             : void
     426       35220 : ExecEndHash(HashState *node)
     427             : {
     428             :     PlanState  *outerPlan;
     429             : 
     430             :     /*
     431             :      * shut down the subplan
     432             :      */
     433       35220 :     outerPlan = outerPlanState(node);
     434       35220 :     ExecEndNode(outerPlan);
     435       35220 : }
     436             : 
     437             : 
     438             : /* ----------------------------------------------------------------
     439             :  *      ExecHashTableCreate
     440             :  *
     441             :  *      create an empty hashtable data structure for hashjoin.
     442             :  * ----------------------------------------------------------------
     443             :  */
     444             : HashJoinTable
     445       26112 : ExecHashTableCreate(HashState *state)
     446             : {
     447             :     Hash       *node;
     448             :     HashJoinTable hashtable;
     449             :     Plan       *outerNode;
     450             :     size_t      space_allowed;
     451             :     int         nbuckets;
     452             :     int         nbatch;
     453             :     double      rows;
     454             :     int         num_skew_mcvs;
     455             :     int         log2_nbuckets;
     456             :     MemoryContext oldcxt;
     457             : 
     458             :     /*
     459             :      * Get information about the size of the relation to be hashed (it's the
     460             :      * "outer" subtree of this node, but the inner relation of the hashjoin).
     461             :      * Compute the appropriate size of the hash table.
     462             :      */
     463       26112 :     node = (Hash *) state->ps.plan;
     464       26112 :     outerNode = outerPlan(node);
     465             : 
     466             :     /*
     467             :      * If this is shared hash table with a partial plan, then we can't use
     468             :      * outerNode->plan_rows to estimate its size.  We need an estimate of the
     469             :      * total number of rows across all copies of the partial plan.
     470             :      */
     471       26112 :     rows = node->plan.parallel_aware ? node->rows_total : outerNode->plan_rows;
     472             : 
     473       25698 :     ExecChooseHashTableSize(rows, outerNode->plan_width,
     474       26112 :                             OidIsValid(node->skewTable),
     475       26112 :                             state->parallel_state != NULL,
     476       26112 :                             state->parallel_state != NULL ?
     477         414 :                             state->parallel_state->nparticipants - 1 : 0,
     478             :                             &space_allowed,
     479             :                             &nbuckets, &nbatch, &num_skew_mcvs);
     480             : 
     481             :     /* nbuckets must be a power of 2 */
     482       26112 :     log2_nbuckets = pg_ceil_log2_32(nbuckets);
     483             :     Assert(nbuckets == (1 << log2_nbuckets));
     484             : 
     485             :     /*
     486             :      * Initialize the hash table control block.
     487             :      *
     488             :      * The hashtable control block is just palloc'd from the executor's
     489             :      * per-query memory context.  Everything else should be kept inside the
     490             :      * subsidiary hashCxt, batchCxt or spillCxt.
     491             :      */
     492       26112 :     hashtable = palloc_object(HashJoinTableData);
     493       26112 :     hashtable->nbuckets = nbuckets;
     494       26112 :     hashtable->nbuckets_original = nbuckets;
     495       26112 :     hashtable->nbuckets_optimal = nbuckets;
     496       26112 :     hashtable->log2_nbuckets = log2_nbuckets;
     497       26112 :     hashtable->log2_nbuckets_optimal = log2_nbuckets;
     498       26112 :     hashtable->buckets.unshared = NULL;
     499       26112 :     hashtable->skewEnabled = false;
     500       26112 :     hashtable->skewBucket = NULL;
     501       26112 :     hashtable->skewBucketLen = 0;
     502       26112 :     hashtable->nSkewBuckets = 0;
     503       26112 :     hashtable->skewBucketNums = NULL;
     504       26112 :     hashtable->nbatch = nbatch;
     505       26112 :     hashtable->curbatch = 0;
     506       26112 :     hashtable->nbatch_original = nbatch;
     507       26112 :     hashtable->nbatch_outstart = nbatch;
     508       26112 :     hashtable->growEnabled = true;
     509       26112 :     hashtable->totalTuples = 0;
     510       26112 :     hashtable->partialTuples = 0;
     511       26112 :     hashtable->skewTuples = 0;
     512       26112 :     hashtable->innerBatchFile = NULL;
     513       26112 :     hashtable->outerBatchFile = NULL;
     514       26112 :     hashtable->spaceUsed = 0;
     515       26112 :     hashtable->spacePeak = 0;
     516       26112 :     hashtable->spaceAllowed = space_allowed;
     517       26112 :     hashtable->spaceUsedSkew = 0;
     518       26112 :     hashtable->spaceAllowedSkew =
     519       26112 :         hashtable->spaceAllowed * SKEW_HASH_MEM_PERCENT / 100;
     520       26112 :     hashtable->chunks = NULL;
     521       26112 :     hashtable->current_chunk = NULL;
     522       26112 :     hashtable->parallel_state = state->parallel_state;
     523       26112 :     hashtable->area = state->ps.state->es_query_dsa;
     524       26112 :     hashtable->batches = NULL;
     525             : 
     526             : #ifdef HJDEBUG
     527             :     printf("Hashjoin %p: initial nbatch = %d, nbuckets = %d\n",
     528             :            hashtable, nbatch, nbuckets);
     529             : #endif
     530             : 
     531             :     /*
     532             :      * Create temporary memory contexts in which to keep the hashtable working
     533             :      * storage.  See notes in executor/hashjoin.h.
     534             :      */
     535       26112 :     hashtable->hashCxt = AllocSetContextCreate(CurrentMemoryContext,
     536             :                                                "HashTableContext",
     537             :                                                ALLOCSET_DEFAULT_SIZES);
     538             : 
     539       26112 :     hashtable->batchCxt = AllocSetContextCreate(hashtable->hashCxt,
     540             :                                                 "HashBatchContext",
     541             :                                                 ALLOCSET_DEFAULT_SIZES);
     542             : 
     543       26112 :     hashtable->spillCxt = AllocSetContextCreate(hashtable->hashCxt,
     544             :                                                 "HashSpillContext",
     545             :                                                 ALLOCSET_DEFAULT_SIZES);
     546             : 
     547             :     /* Allocate data that will live for the life of the hashjoin */
     548             : 
     549       26112 :     oldcxt = MemoryContextSwitchTo(hashtable->hashCxt);
     550             : 
     551       26112 :     if (nbatch > 1 && hashtable->parallel_state == NULL)
     552             :     {
     553             :         MemoryContext oldctx;
     554             : 
     555             :         /*
     556             :          * allocate and initialize the file arrays in hashCxt (not needed for
     557             :          * parallel case which uses shared tuplestores instead of raw files)
     558             :          */
     559         118 :         oldctx = MemoryContextSwitchTo(hashtable->spillCxt);
     560             : 
     561         118 :         hashtable->innerBatchFile = palloc0_array(BufFile *, nbatch);
     562         118 :         hashtable->outerBatchFile = palloc0_array(BufFile *, nbatch);
     563             : 
     564         118 :         MemoryContextSwitchTo(oldctx);
     565             : 
     566             :         /* The files will not be opened until needed... */
     567             :         /* ... but make sure we have temp tablespaces established for them */
     568         118 :         PrepareTempTablespaces();
     569             :     }
     570             : 
     571       26112 :     MemoryContextSwitchTo(oldcxt);
     572             : 
     573       26112 :     if (hashtable->parallel_state)
     574             :     {
     575         414 :         ParallelHashJoinState *pstate = hashtable->parallel_state;
     576             :         Barrier    *build_barrier;
     577             : 
     578             :         /*
     579             :          * Attach to the build barrier.  The corresponding detach operation is
     580             :          * in ExecHashTableDetach.  Note that we won't attach to the
     581             :          * batch_barrier for batch 0 yet.  We'll attach later and start it out
     582             :          * in PHJ_BATCH_PROBE phase, because batch 0 is allocated up front and
     583             :          * then loaded while hashing (the standard hybrid hash join
     584             :          * algorithm), and we'll coordinate that using build_barrier.
     585             :          */
     586         414 :         build_barrier = &pstate->build_barrier;
     587         414 :         BarrierAttach(build_barrier);
     588             : 
     589             :         /*
     590             :          * So far we have no idea whether there are any other participants,
     591             :          * and if so, what phase they are working on.  The only thing we care
     592             :          * about at this point is whether someone has already created the
     593             :          * SharedHashJoinBatch objects and the hash table for batch 0.  One
     594             :          * backend will be elected to do that now if necessary.
     595             :          */
     596         588 :         if (BarrierPhase(build_barrier) == PHJ_BUILD_ELECT &&
     597         174 :             BarrierArriveAndWait(build_barrier, WAIT_EVENT_HASH_BUILD_ELECT))
     598             :         {
     599         174 :             pstate->nbatch = nbatch;
     600         174 :             pstate->space_allowed = space_allowed;
     601         174 :             pstate->growth = PHJ_GROWTH_OK;
     602             : 
     603             :             /* Set up the shared state for coordinating batches. */
     604         174 :             ExecParallelHashJoinSetUpBatches(hashtable, nbatch);
     605             : 
     606             :             /*
     607             :              * Allocate batch 0's hash table up front so we can load it
     608             :              * directly while hashing.
     609             :              */
     610         174 :             pstate->nbuckets = nbuckets;
     611         174 :             ExecParallelHashTableAlloc(hashtable, 0);
     612             :         }
     613             : 
     614             :         /*
     615             :          * The next Parallel Hash synchronization point is in
     616             :          * MultiExecParallelHash(), which will progress it all the way to
     617             :          * PHJ_BUILD_RUN.  The caller must not return control from this
     618             :          * executor node between now and then.
     619             :          */
     620             :     }
     621             :     else
     622             :     {
     623             :         /*
     624             :          * Prepare context for the first-scan space allocations; allocate the
     625             :          * hashbucket array therein, and set each bucket "empty".
     626             :          */
     627       25698 :         MemoryContextSwitchTo(hashtable->batchCxt);
     628             : 
     629       25698 :         hashtable->buckets.unshared = palloc0_array(HashJoinTuple, nbuckets);
     630             : 
     631             :         /*
     632             :          * Set up for skew optimization, if possible and there's a need for
     633             :          * more than one batch.  (In a one-batch join, there's no point in
     634             :          * it.)
     635             :          */
     636       25698 :         if (nbatch > 1)
     637         118 :             ExecHashBuildSkewHash(state, hashtable, node, num_skew_mcvs);
     638             : 
     639       25698 :         MemoryContextSwitchTo(oldcxt);
     640             :     }
     641             : 
     642       26112 :     return hashtable;
     643             : }
     644             : 
     645             : 
     646             : /*
     647             :  * Compute appropriate size for hashtable given the estimated size of the
     648             :  * relation to be hashed (number of rows and average row width).
     649             :  *
     650             :  * This is exported so that the planner's costsize.c can use it.
     651             :  */
     652             : 
     653             : /* Target bucket loading (tuples per bucket) */
     654             : #define NTUP_PER_BUCKET         1
     655             : 
     656             : void
     657      895358 : ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
     658             :                         bool try_combined_hash_mem,
     659             :                         int parallel_workers,
     660             :                         size_t *space_allowed,
     661             :                         int *numbuckets,
     662             :                         int *numbatches,
     663             :                         int *num_skew_mcvs)
     664             : {
     665             :     int         tupsize;
     666             :     double      inner_rel_bytes;
     667             :     size_t      hash_table_bytes;
     668             :     size_t      bucket_bytes;
     669             :     size_t      max_pointers;
     670      895358 :     int         nbatch = 1;
     671             :     int         nbuckets;
     672             :     double      dbuckets;
     673             : 
     674             :     /* Force a plausible relation size if no info */
     675      895358 :     if (ntuples <= 0.0)
     676         150 :         ntuples = 1000.0;
     677             : 
     678             :     /*
     679             :      * Estimate tupsize based on footprint of tuple in hashtable... note this
     680             :      * does not allow for any palloc overhead.  The manipulations of spaceUsed
     681             :      * don't count palloc overhead either.
     682             :      */
     683      895358 :     tupsize = HJTUPLE_OVERHEAD +
     684      895358 :         MAXALIGN(SizeofMinimalTupleHeader) +
     685      895358 :         MAXALIGN(tupwidth);
     686      895358 :     inner_rel_bytes = ntuples * tupsize;
     687             : 
     688             :     /*
     689             :      * Compute in-memory hashtable size limit from GUCs.
     690             :      */
     691      895358 :     hash_table_bytes = get_hash_memory_limit();
     692             : 
     693             :     /*
     694             :      * Parallel Hash tries to use the combined hash_mem of all workers to
     695             :      * avoid the need to batch.  If that won't work, it falls back to hash_mem
     696             :      * per worker and tries to process batches in parallel.
     697             :      */
     698      895358 :     if (try_combined_hash_mem)
     699             :     {
     700             :         /* Careful, this could overflow size_t */
     701             :         double      newlimit;
     702             : 
     703       76086 :         newlimit = (double) hash_table_bytes * (double) (parallel_workers + 1);
     704       76086 :         newlimit = Min(newlimit, (double) SIZE_MAX);
     705       76086 :         hash_table_bytes = (size_t) newlimit;
     706             :     }
     707             : 
     708      895358 :     *space_allowed = hash_table_bytes;
     709             : 
     710             :     /*
     711             :      * If skew optimization is possible, estimate the number of skew buckets
     712             :      * that will fit in the memory allowed, and decrement the assumed space
     713             :      * available for the main hash table accordingly.
     714             :      *
     715             :      * We make the optimistic assumption that each skew bucket will contain
     716             :      * one inner-relation tuple.  If that turns out to be low, we will recover
     717             :      * at runtime by reducing the number of skew buckets.
     718             :      *
     719             :      * hashtable->skewBucket will have up to 8 times as many HashSkewBucket
     720             :      * pointers as the number of MCVs we allow, since ExecHashBuildSkewHash
     721             :      * will round up to the next power of 2 and then multiply by 4 to reduce
     722             :      * collisions.
     723             :      */
     724      895358 :     if (useskew)
     725             :     {
     726             :         size_t      bytes_per_mcv;
     727             :         size_t      skew_mcvs;
     728             : 
     729             :         /*----------
     730             :          * Compute number of MCVs we could hold in hash_table_bytes
     731             :          *
     732             :          * Divisor is:
     733             :          * size of a hash tuple +
     734             :          * worst-case size of skewBucket[] per MCV +
     735             :          * size of skewBucketNums[] entry +
     736             :          * size of skew bucket struct itself
     737             :          *----------
     738             :          */
     739      889592 :         bytes_per_mcv = tupsize +
     740             :             (8 * sizeof(HashSkewBucket *)) +
     741      889592 :             sizeof(int) +
     742             :             SKEW_BUCKET_OVERHEAD;
     743      889592 :         skew_mcvs = hash_table_bytes / bytes_per_mcv;
     744             : 
     745             :         /*
     746             :          * Now scale by SKEW_HASH_MEM_PERCENT (we do it in this order so as
     747             :          * not to worry about size_t overflow in the multiplication)
     748             :          */
     749      889592 :         skew_mcvs = (skew_mcvs * SKEW_HASH_MEM_PERCENT) / 100;
     750             : 
     751             :         /* Now clamp to integer range */
     752      889592 :         skew_mcvs = Min(skew_mcvs, INT_MAX);
     753             : 
     754      889592 :         *num_skew_mcvs = (int) skew_mcvs;
     755             : 
     756             :         /* Reduce hash_table_bytes by the amount needed for the skew table */
     757      889592 :         if (skew_mcvs > 0)
     758      889592 :             hash_table_bytes -= skew_mcvs * bytes_per_mcv;
     759             :     }
     760             :     else
     761        5766 :         *num_skew_mcvs = 0;
     762             : 
     763             :     /*
     764             :      * Set nbuckets to achieve an average bucket load of NTUP_PER_BUCKET when
     765             :      * memory is filled, assuming a single batch; but limit the value so that
     766             :      * the pointer arrays we'll try to allocate do not exceed hash_table_bytes
     767             :      * nor MaxAllocSize.
     768             :      *
     769             :      * Note that both nbuckets and nbatch must be powers of 2 to make
     770             :      * ExecHashGetBucketAndBatch fast.
     771             :      */
     772      895358 :     max_pointers = hash_table_bytes / sizeof(HashJoinTuple);
     773      895358 :     max_pointers = Min(max_pointers, MaxAllocSize / sizeof(HashJoinTuple));
     774             :     /* If max_pointers isn't a power of 2, must round it down to one */
     775      895358 :     max_pointers = pg_prevpower2_size_t(max_pointers);
     776             : 
     777             :     /* Also ensure we avoid integer overflow in nbatch and nbuckets */
     778             :     /* (this step is redundant given the current value of MaxAllocSize) */
     779      895358 :     max_pointers = Min(max_pointers, INT_MAX / 2 + 1);
     780             : 
     781      895358 :     dbuckets = ceil(ntuples / NTUP_PER_BUCKET);
     782      895358 :     dbuckets = Min(dbuckets, max_pointers);
     783      895358 :     nbuckets = (int) dbuckets;
     784             :     /* don't let nbuckets be really small, though ... */
     785      895358 :     nbuckets = Max(nbuckets, 1024);
     786             :     /* ... and force it to be a power of 2. */
     787      895358 :     nbuckets = pg_nextpower2_32(nbuckets);
     788             : 
     789             :     /*
     790             :      * If there's not enough space to store the projected number of tuples and
     791             :      * the required bucket headers, we will need multiple batches.
     792             :      */
     793      895358 :     bucket_bytes = sizeof(HashJoinTuple) * nbuckets;
     794      895358 :     if (inner_rel_bytes + bucket_bytes > hash_table_bytes)
     795             :     {
     796             :         /* We'll need multiple batches */
     797             :         size_t      sbuckets;
     798             :         double      dbatch;
     799             :         int         minbatch;
     800             :         size_t      bucket_size;
     801             : 
     802             :         /*
     803             :          * If Parallel Hash with combined hash_mem would still need multiple
     804             :          * batches, we'll have to fall back to regular hash_mem budget.
     805             :          */
     806        5148 :         if (try_combined_hash_mem)
     807             :         {
     808         246 :             ExecChooseHashTableSize(ntuples, tupwidth, useskew,
     809             :                                     false, parallel_workers,
     810             :                                     space_allowed,
     811             :                                     numbuckets,
     812             :                                     numbatches,
     813             :                                     num_skew_mcvs);
     814         246 :             return;
     815             :         }
     816             : 
     817             :         /*
     818             :          * Estimate the number of buckets we'll want to have when hash_mem is
     819             :          * entirely full.  Each bucket will contain a bucket pointer plus
     820             :          * NTUP_PER_BUCKET tuples, whose projected size already includes
     821             :          * overhead for the hash code, pointer to the next tuple, etc.
     822             :          */
     823        4902 :         bucket_size = (tupsize * NTUP_PER_BUCKET + sizeof(HashJoinTuple));
     824        4902 :         if (hash_table_bytes <= bucket_size)
     825           0 :             sbuckets = 1;       /* avoid pg_nextpower2_size_t(0) */
     826             :         else
     827        4902 :             sbuckets = pg_nextpower2_size_t(hash_table_bytes / bucket_size);
     828        4902 :         sbuckets = Min(sbuckets, max_pointers);
     829        4902 :         nbuckets = (int) sbuckets;
     830        4902 :         nbuckets = pg_nextpower2_32(nbuckets);
     831        4902 :         bucket_bytes = nbuckets * sizeof(HashJoinTuple);
     832             : 
     833             :         /*
     834             :          * Buckets are simple pointers to hashjoin tuples, while tupsize
     835             :          * includes the pointer, hash code, and MinimalTupleData.  So buckets
     836             :          * should never really exceed 25% of hash_mem (even for
     837             :          * NTUP_PER_BUCKET=1); except maybe for hash_mem values that are not
     838             :          * 2^N bytes, where we might get more because of doubling. So let's
     839             :          * look for 50% here.
     840             :          */
     841             :         Assert(bucket_bytes <= hash_table_bytes / 2);
     842             : 
     843             :         /* Calculate required number of batches. */
     844        4902 :         dbatch = ceil(inner_rel_bytes / (hash_table_bytes - bucket_bytes));
     845        4902 :         dbatch = Min(dbatch, max_pointers);
     846        4902 :         minbatch = (int) dbatch;
     847        4902 :         nbatch = pg_nextpower2_32(Max(2, minbatch));
     848             :     }
     849             : 
     850             :     /*
     851             :      * Optimize the total amount of memory consumed by the hash node.
     852             :      *
     853             :      * The nbatch calculation above focuses on the size of the in-memory hash
     854             :      * table, assuming no per-batch overhead. Now adjust the number of batches
     855             :      * and the size of the hash table to minimize total memory consumed by the
     856             :      * hash node.
     857             :      *
     858             :      * Each batch file has a BLCKSZ buffer, and we may need two files per
     859             :      * batch (inner and outer side). So with enough batches this can be
     860             :      * significantly more memory than the hashtable itself.
     861             :      *
     862             :      * The total memory usage may be expressed by this formula:
     863             :      *
     864             :      * (inner_rel_bytes / nbatch) + (2 * nbatch * BLCKSZ) <= hash_table_bytes
     865             :      *
     866             :      * where (inner_rel_bytes / nbatch) is the size of the in-memory hash
     867             :      * table and (2 * nbatch * BLCKSZ) is the amount of memory used by file
     868             :      * buffers. But for sufficiently large values of inner_rel_bytes value
     869             :      * there may not be a nbatch value that would make both parts fit into
     870             :      * hash_table_bytes.
     871             :      *
     872             :      * In this case we can't enforce the memory limit - we're going to exceed
     873             :      * it. We can however minimize the impact and use as little memory as
     874             :      * possible. (We haven't really enforced it before either, as we simply
     875             :      * ignored the batch files.)
     876             :      *
     877             :      * The formula for total memory usage says that given an inner relation of
     878             :      * size inner_rel_bytes, we may divide it into an arbitrary number of
     879             :      * batches. This determines both the size of the in-memory hash table and
     880             :      * the amount of memory needed for batch files. These two terms work in
     881             :      * opposite ways - when one decreases, the other increases.
     882             :      *
     883             :      * For low nbatch values, the hash table takes most of the memory, but at
     884             :      * some point the batch files start to dominate. If you combine these two
     885             :      * terms, the memory consumption (for a fixed size of the inner relation)
     886             :      * has a u-shape, with a minimum at some nbatch value.
     887             :      *
     888             :      * Our goal is to find this nbatch value, minimizing the memory usage. We
     889             :      * calculate the memory usage with half the batches (i.e. nbatch/2), and
     890             :      * if it's lower than the current memory usage we know it's better to use
     891             :      * fewer batches. We repeat this until reducing the number of batches does
     892             :      * not reduce the memory usage - we found the optimum. We know the optimum
     893             :      * exists, thanks to the u-shape.
     894             :      *
     895             :      * We only want to do this when exceeding the memory limit, not every
     896             :      * time. The goal is not to minimize memory usage in every case, but to
     897             :      * minimize the memory usage when we can't stay within the memory limit.
     898             :      *
     899             :      * For this reason we only consider reducing the number of batches. We
     900             :      * could try the opposite direction too, but that would save memory only
     901             :      * when most of the memory is used by the hash table. And the hash table
     902             :      * was used for the initial sizing, so we shouldn't be exceeding the
     903             :      * memory limit too much. We might save memory by using more batches, but
     904             :      * it would result in spilling more batch files, which does not seem like
     905             :      * a great trade off.
     906             :      *
     907             :      * While growing the hashtable, we also adjust the number of buckets, to
     908             :      * not have more than one tuple per bucket (load factor 1). We can only do
     909             :      * this during the initial sizing - once we start building the hash,
     910             :      * nbucket is fixed.
     911             :      */
     912      896064 :     while (nbatch > 0)
     913             :     {
     914             :         /* how much memory are we using with current nbatch value */
     915      896064 :         size_t      current_space = hash_table_bytes + (2 * nbatch * BLCKSZ);
     916             : 
     917             :         /* how much memory would we use with half the batches */
     918      896064 :         size_t      new_space = hash_table_bytes * 2 + (nbatch * BLCKSZ);
     919             : 
     920             :         /* If the memory usage would not decrease, we found the optimum. */
     921      896064 :         if (current_space < new_space)
     922      895112 :             break;
     923             : 
     924             :         /*
     925             :          * It's better to use half the batches, so do that and adjust the
     926             :          * nbucket in the opposite direction, and double the allowance.
     927             :          */
     928         952 :         nbatch /= 2;
     929         952 :         nbuckets *= 2;
     930             : 
     931         952 :         *space_allowed = (*space_allowed) * 2;
     932             :     }
     933             : 
     934             :     Assert(nbuckets > 0);
     935             :     Assert(nbatch > 0);
     936             : 
     937      895112 :     *numbuckets = nbuckets;
     938      895112 :     *numbatches = nbatch;
     939             : }
     940             : 
     941             : 
     942             : /* ----------------------------------------------------------------
     943             :  *      ExecHashTableDestroy
     944             :  *
     945             :  *      destroy a hash table
     946             :  * ----------------------------------------------------------------
     947             :  */
     948             : void
     949       26002 : ExecHashTableDestroy(HashJoinTable hashtable)
     950             : {
     951             :     int         i;
     952             : 
     953             :     /*
     954             :      * Make sure all the temp files are closed.  We skip batch 0, since it
     955             :      * can't have any temp files (and the arrays might not even exist if
     956             :      * nbatch is only 1).  Parallel hash joins don't use these files.
     957             :      */
     958       26002 :     if (hashtable->innerBatchFile != NULL)
     959             :     {
     960        1096 :         for (i = 1; i < hashtable->nbatch; i++)
     961             :         {
     962         884 :             if (hashtable->innerBatchFile[i])
     963           0 :                 BufFileClose(hashtable->innerBatchFile[i]);
     964         884 :             if (hashtable->outerBatchFile[i])
     965           0 :                 BufFileClose(hashtable->outerBatchFile[i]);
     966             :         }
     967             :     }
     968             : 
     969             :     /* Release working memory (batchCxt is a child, so it goes away too) */
     970       26002 :     MemoryContextDelete(hashtable->hashCxt);
     971             : 
     972             :     /* And drop the control block */
     973       26002 :     pfree(hashtable);
     974       26002 : }
     975             : 
     976             : /*
     977             :  * Consider adjusting the allowed hash table size, depending on the number
     978             :  * of batches, to minimize the overall memory usage (for both the hashtable
     979             :  * and batch files).
     980             :  *
     981             :  * We're adjusting the size of the hash table, not the (optimal) number of
     982             :  * buckets. We can't change that once we start building the hash, due to how
     983             :  * ExecHashGetBucketAndBatch calculates batchno/bucketno from the hash. This
     984             :  * means the load factor may not be optimal, but we're in damage control so
     985             :  * we accept slower lookups. It's still much better than batch explosion.
     986             :  *
     987             :  * Returns true if we chose to increase the batch size (and thus we don't
     988             :  * need to add batches), and false if we should increase nbatch.
     989             :  */
     990             : static bool
     991         192 : ExecHashIncreaseBatchSize(HashJoinTable hashtable)
     992             : {
     993             :     /*
     994             :      * How much additional memory would doubling nbatch use? Each batch may
     995             :      * require two buffered files (inner/outer), with a BLCKSZ buffer.
     996             :      */
     997         192 :     size_t      batchSpace = (hashtable->nbatch * 2 * BLCKSZ);
     998             : 
     999             :     /*
    1000             :      * Compare the new space needed for doubling nbatch and for enlarging the
    1001             :      * in-memory hash table. If doubling the hash table needs less memory,
    1002             :      * just do that. Otherwise, continue with doubling the nbatch.
    1003             :      *
    1004             :      * We're either doubling spaceAllowed of batchSpace, so which of those
    1005             :      * increases the memory usage the least is the same as comparing the
    1006             :      * values directly.
    1007             :      */
    1008         192 :     if (hashtable->spaceAllowed <= batchSpace)
    1009             :     {
    1010           0 :         hashtable->spaceAllowed *= 2;
    1011           0 :         return true;
    1012             :     }
    1013             : 
    1014         192 :     return false;
    1015             : }
    1016             : 
    1017             : /*
    1018             :  * ExecHashIncreaseNumBatches
    1019             :  *      increase the original number of batches in order to reduce
    1020             :  *      current memory consumption
    1021             :  */
    1022             : static void
    1023      829152 : ExecHashIncreaseNumBatches(HashJoinTable hashtable)
    1024             : {
    1025      829152 :     int         oldnbatch = hashtable->nbatch;
    1026      829152 :     int         curbatch = hashtable->curbatch;
    1027             :     int         nbatch;
    1028             :     long        ninmemory;
    1029             :     long        nfreed;
    1030             :     HashMemoryChunk oldchunks;
    1031             : 
    1032             :     /* do nothing if we've decided to shut off growth */
    1033      829152 :     if (!hashtable->growEnabled)
    1034      828960 :         return;
    1035             : 
    1036             :     /* safety check to avoid overflow */
    1037         192 :     if (oldnbatch > Min(INT_MAX / 2, MaxAllocSize / (sizeof(void *) * 2)))
    1038           0 :         return;
    1039             : 
    1040             :     /* consider increasing size of the in-memory hash table instead */
    1041         192 :     if (ExecHashIncreaseBatchSize(hashtable))
    1042           0 :         return;
    1043             : 
    1044         192 :     nbatch = oldnbatch * 2;
    1045             :     Assert(nbatch > 1);
    1046             : 
    1047             : #ifdef HJDEBUG
    1048             :     printf("Hashjoin %p: increasing nbatch to %d because space = %zu\n",
    1049             :            hashtable, nbatch, hashtable->spaceUsed);
    1050             : #endif
    1051             : 
    1052         192 :     if (hashtable->innerBatchFile == NULL)
    1053             :     {
    1054          94 :         MemoryContext oldcxt = MemoryContextSwitchTo(hashtable->spillCxt);
    1055             : 
    1056             :         /* we had no file arrays before */
    1057          94 :         hashtable->innerBatchFile = palloc0_array(BufFile *, nbatch);
    1058          94 :         hashtable->outerBatchFile = palloc0_array(BufFile *, nbatch);
    1059             : 
    1060          94 :         MemoryContextSwitchTo(oldcxt);
    1061             : 
    1062             :         /* time to establish the temp tablespaces, too */
    1063          94 :         PrepareTempTablespaces();
    1064             :     }
    1065             :     else
    1066             :     {
    1067             :         /* enlarge arrays and zero out added entries */
    1068          98 :         hashtable->innerBatchFile = repalloc0_array(hashtable->innerBatchFile, BufFile *, oldnbatch, nbatch);
    1069          98 :         hashtable->outerBatchFile = repalloc0_array(hashtable->outerBatchFile, BufFile *, oldnbatch, nbatch);
    1070             :     }
    1071             : 
    1072         192 :     hashtable->nbatch = nbatch;
    1073             : 
    1074             :     /*
    1075             :      * Scan through the existing hash table entries and dump out any that are
    1076             :      * no longer of the current batch.
    1077             :      */
    1078         192 :     ninmemory = nfreed = 0;
    1079             : 
    1080             :     /* If know we need to resize nbuckets, we can do it while rebatching. */
    1081         192 :     if (hashtable->nbuckets_optimal != hashtable->nbuckets)
    1082             :     {
    1083             :         /* we never decrease the number of buckets */
    1084             :         Assert(hashtable->nbuckets_optimal > hashtable->nbuckets);
    1085             : 
    1086          94 :         hashtable->nbuckets = hashtable->nbuckets_optimal;
    1087          94 :         hashtable->log2_nbuckets = hashtable->log2_nbuckets_optimal;
    1088             : 
    1089          94 :         hashtable->buckets.unshared =
    1090          94 :             repalloc_array(hashtable->buckets.unshared,
    1091             :                            HashJoinTuple, hashtable->nbuckets);
    1092             :     }
    1093             : 
    1094             :     /*
    1095             :      * We will scan through the chunks directly, so that we can reset the
    1096             :      * buckets now and not have to keep track which tuples in the buckets have
    1097             :      * already been processed. We will free the old chunks as we go.
    1098             :      */
    1099         192 :     memset(hashtable->buckets.unshared, 0,
    1100         192 :            sizeof(HashJoinTuple) * hashtable->nbuckets);
    1101         192 :     oldchunks = hashtable->chunks;
    1102         192 :     hashtable->chunks = NULL;
    1103             : 
    1104             :     /* so, let's scan through the old chunks, and all tuples in each chunk */
    1105         960 :     while (oldchunks != NULL)
    1106             :     {
    1107         768 :         HashMemoryChunk nextchunk = oldchunks->next.unshared;
    1108             : 
    1109             :         /* position within the buffer (up to oldchunks->used) */
    1110         768 :         size_t      idx = 0;
    1111             : 
    1112             :         /* process all tuples stored in this chunk (and then free it) */
    1113      524688 :         while (idx < oldchunks->used)
    1114             :         {
    1115      523920 :             HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(oldchunks) + idx);
    1116      523920 :             MinimalTuple tuple = HJTUPLE_MINTUPLE(hashTuple);
    1117      523920 :             int         hashTupleSize = (HJTUPLE_OVERHEAD + tuple->t_len);
    1118             :             int         bucketno;
    1119             :             int         batchno;
    1120             : 
    1121      523920 :             ninmemory++;
    1122      523920 :             ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
    1123             :                                       &bucketno, &batchno);
    1124             : 
    1125      523920 :             if (batchno == curbatch)
    1126             :             {
    1127             :                 /* keep tuple in memory - copy it into the new chunk */
    1128             :                 HashJoinTuple copyTuple;
    1129             : 
    1130      194758 :                 copyTuple = (HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
    1131      194758 :                 memcpy(copyTuple, hashTuple, hashTupleSize);
    1132             : 
    1133             :                 /* and add it back to the appropriate bucket */
    1134      194758 :                 copyTuple->next.unshared = hashtable->buckets.unshared[bucketno];
    1135      194758 :                 hashtable->buckets.unshared[bucketno] = copyTuple;
    1136             :             }
    1137             :             else
    1138             :             {
    1139             :                 /* dump it out */
    1140             :                 Assert(batchno > curbatch);
    1141      329162 :                 ExecHashJoinSaveTuple(HJTUPLE_MINTUPLE(hashTuple),
    1142             :                                       hashTuple->hashvalue,
    1143      329162 :                                       &hashtable->innerBatchFile[batchno],
    1144             :                                       hashtable);
    1145             : 
    1146      329162 :                 hashtable->spaceUsed -= hashTupleSize;
    1147      329162 :                 nfreed++;
    1148             :             }
    1149             : 
    1150             :             /* next tuple in this chunk */
    1151      523920 :             idx += MAXALIGN(hashTupleSize);
    1152             : 
    1153             :             /* allow this loop to be cancellable */
    1154      523920 :             CHECK_FOR_INTERRUPTS();
    1155             :         }
    1156             : 
    1157             :         /* we're done with this chunk - free it and proceed to the next one */
    1158         768 :         pfree(oldchunks);
    1159         768 :         oldchunks = nextchunk;
    1160             :     }
    1161             : 
    1162             : #ifdef HJDEBUG
    1163             :     printf("Hashjoin %p: freed %ld of %ld tuples, space now %zu\n",
    1164             :            hashtable, nfreed, ninmemory, hashtable->spaceUsed);
    1165             : #endif
    1166             : 
    1167             :     /*
    1168             :      * If we dumped out either all or none of the tuples in the table, disable
    1169             :      * further expansion of nbatch.  This situation implies that we have
    1170             :      * enough tuples of identical hashvalues to overflow spaceAllowed.
    1171             :      * Increasing nbatch will not fix it since there's no way to subdivide the
    1172             :      * group any more finely. We have to just gut it out and hope the server
    1173             :      * has enough RAM.
    1174             :      */
    1175         192 :     if (nfreed == 0 || nfreed == ninmemory)
    1176             :     {
    1177          48 :         hashtable->growEnabled = false;
    1178             : #ifdef HJDEBUG
    1179             :         printf("Hashjoin %p: disabling further increase of nbatch\n",
    1180             :                hashtable);
    1181             : #endif
    1182             :     }
    1183             : }
    1184             : 
    1185             : /*
    1186             :  * ExecParallelHashIncreaseNumBatches
    1187             :  *      Every participant attached to grow_batches_barrier must run this
    1188             :  *      function when it observes growth == PHJ_GROWTH_NEED_MORE_BATCHES.
    1189             :  */
    1190             : static void
    1191          60 : ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
    1192             : {
    1193          60 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    1194             : 
    1195             :     Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASH_INNER);
    1196             : 
    1197             :     /*
    1198             :      * It's unlikely, but we need to be prepared for new participants to show
    1199             :      * up while we're in the middle of this operation so we need to switch on
    1200             :      * barrier phase here.
    1201             :      */
    1202          60 :     switch (PHJ_GROW_BATCHES_PHASE(BarrierPhase(&pstate->grow_batches_barrier)))
    1203             :     {
    1204          60 :         case PHJ_GROW_BATCHES_ELECT:
    1205             : 
    1206             :             /*
    1207             :              * Elect one participant to prepare to grow the number of batches.
    1208             :              * This involves reallocating or resetting the buckets of batch 0
    1209             :              * in preparation for all participants to begin repartitioning the
    1210             :              * tuples.
    1211             :              */
    1212          60 :             if (BarrierArriveAndWait(&pstate->grow_batches_barrier,
    1213             :                                      WAIT_EVENT_HASH_GROW_BATCHES_ELECT))
    1214             :             {
    1215             :                 dsa_pointer_atomic *buckets;
    1216             :                 ParallelHashJoinBatch *old_batch0;
    1217             :                 int         new_nbatch;
    1218             :                 int         i;
    1219             : 
    1220             :                 /* Move the old batch out of the way. */
    1221          52 :                 old_batch0 = hashtable->batches[0].shared;
    1222          52 :                 pstate->old_batches = pstate->batches;
    1223          52 :                 pstate->old_nbatch = hashtable->nbatch;
    1224          52 :                 pstate->batches = InvalidDsaPointer;
    1225             : 
    1226             :                 /* Free this backend's old accessors. */
    1227          52 :                 ExecParallelHashCloseBatchAccessors(hashtable);
    1228             : 
    1229             :                 /* Figure out how many batches to use. */
    1230          52 :                 if (hashtable->nbatch == 1)
    1231             :                 {
    1232             :                     /*
    1233             :                      * We are going from single-batch to multi-batch.  We need
    1234             :                      * to switch from one large combined memory budget to the
    1235             :                      * regular hash_mem budget.
    1236             :                      */
    1237          36 :                     pstate->space_allowed = get_hash_memory_limit();
    1238             : 
    1239             :                     /*
    1240             :                      * The combined hash_mem of all participants wasn't
    1241             :                      * enough. Therefore one batch per participant would be
    1242             :                      * approximately equivalent and would probably also be
    1243             :                      * insufficient.  So try two batches per participant,
    1244             :                      * rounded up to a power of two.
    1245             :                      */
    1246          36 :                     new_nbatch = pg_nextpower2_32(pstate->nparticipants * 2);
    1247             :                 }
    1248             :                 else
    1249             :                 {
    1250             :                     /*
    1251             :                      * We were already multi-batched.  Try doubling the number
    1252             :                      * of batches.
    1253             :                      */
    1254          16 :                     new_nbatch = hashtable->nbatch * 2;
    1255             :                 }
    1256             : 
    1257             :                 /* Allocate new larger generation of batches. */
    1258             :                 Assert(hashtable->nbatch == pstate->nbatch);
    1259          52 :                 ExecParallelHashJoinSetUpBatches(hashtable, new_nbatch);
    1260             :                 Assert(hashtable->nbatch == pstate->nbatch);
    1261             : 
    1262             :                 /* Replace or recycle batch 0's bucket array. */
    1263          52 :                 if (pstate->old_nbatch == 1)
    1264             :                 {
    1265             :                     double      dtuples;
    1266             :                     double      dbuckets;
    1267             :                     int         new_nbuckets;
    1268             :                     uint32      max_buckets;
    1269             : 
    1270             :                     /*
    1271             :                      * We probably also need a smaller bucket array.  How many
    1272             :                      * tuples do we expect per batch, assuming we have only
    1273             :                      * half of them so far?  Normally we don't need to change
    1274             :                      * the bucket array's size, because the size of each batch
    1275             :                      * stays the same as we add more batches, but in this
    1276             :                      * special case we move from a large batch to many smaller
    1277             :                      * batches and it would be wasteful to keep the large
    1278             :                      * array.
    1279             :                      */
    1280          36 :                     dtuples = (old_batch0->ntuples * 2.0) / new_nbatch;
    1281             : 
    1282             :                     /*
    1283             :                      * We need to calculate the maximum number of buckets to
    1284             :                      * stay within the MaxAllocSize boundary.  Round the
    1285             :                      * maximum number to the previous power of 2 given that
    1286             :                      * later we round the number to the next power of 2.
    1287             :                      */
    1288          36 :                     max_buckets = pg_prevpower2_32((uint32)
    1289             :                                                    (MaxAllocSize / sizeof(dsa_pointer_atomic)));
    1290          36 :                     dbuckets = ceil(dtuples / NTUP_PER_BUCKET);
    1291          36 :                     dbuckets = Min(dbuckets, max_buckets);
    1292          36 :                     new_nbuckets = (int) dbuckets;
    1293          36 :                     new_nbuckets = Max(new_nbuckets, 1024);
    1294          36 :                     new_nbuckets = pg_nextpower2_32(new_nbuckets);
    1295          36 :                     dsa_free(hashtable->area, old_batch0->buckets);
    1296          72 :                     hashtable->batches[0].shared->buckets =
    1297          36 :                         dsa_allocate(hashtable->area,
    1298             :                                      sizeof(dsa_pointer_atomic) * new_nbuckets);
    1299             :                     buckets = (dsa_pointer_atomic *)
    1300          36 :                         dsa_get_address(hashtable->area,
    1301          36 :                                         hashtable->batches[0].shared->buckets);
    1302      110628 :                     for (i = 0; i < new_nbuckets; ++i)
    1303      110592 :                         dsa_pointer_atomic_init(&buckets[i], InvalidDsaPointer);
    1304          36 :                     pstate->nbuckets = new_nbuckets;
    1305             :                 }
    1306             :                 else
    1307             :                 {
    1308             :                     /* Recycle the existing bucket array. */
    1309          16 :                     hashtable->batches[0].shared->buckets = old_batch0->buckets;
    1310             :                     buckets = (dsa_pointer_atomic *)
    1311          16 :                         dsa_get_address(hashtable->area, old_batch0->buckets);
    1312       65552 :                     for (i = 0; i < hashtable->nbuckets; ++i)
    1313       65536 :                         dsa_pointer_atomic_write(&buckets[i], InvalidDsaPointer);
    1314             :                 }
    1315             : 
    1316             :                 /* Move all chunks to the work queue for parallel processing. */
    1317          52 :                 pstate->chunk_work_queue = old_batch0->chunks;
    1318             : 
    1319             :                 /* Disable further growth temporarily while we're growing. */
    1320          52 :                 pstate->growth = PHJ_GROWTH_DISABLED;
    1321             :             }
    1322             :             else
    1323             :             {
    1324             :                 /* All other participants just flush their tuples to disk. */
    1325           8 :                 ExecParallelHashCloseBatchAccessors(hashtable);
    1326             :             }
    1327             :             /* Fall through. */
    1328             : 
    1329             :         case PHJ_GROW_BATCHES_REALLOCATE:
    1330             :             /* Wait for the above to be finished. */
    1331          60 :             BarrierArriveAndWait(&pstate->grow_batches_barrier,
    1332             :                                  WAIT_EVENT_HASH_GROW_BATCHES_REALLOCATE);
    1333             :             /* Fall through. */
    1334             : 
    1335          60 :         case PHJ_GROW_BATCHES_REPARTITION:
    1336             :             /* Make sure that we have the current dimensions and buckets. */
    1337          60 :             ExecParallelHashEnsureBatchAccessors(hashtable);
    1338          60 :             ExecParallelHashTableSetCurrentBatch(hashtable, 0);
    1339             :             /* Then partition, flush counters. */
    1340          60 :             ExecParallelHashRepartitionFirst(hashtable);
    1341          60 :             ExecParallelHashRepartitionRest(hashtable);
    1342          60 :             ExecParallelHashMergeCounters(hashtable);
    1343             :             /* Wait for the above to be finished. */
    1344          60 :             BarrierArriveAndWait(&pstate->grow_batches_barrier,
    1345             :                                  WAIT_EVENT_HASH_GROW_BATCHES_REPARTITION);
    1346             :             /* Fall through. */
    1347             : 
    1348          60 :         case PHJ_GROW_BATCHES_DECIDE:
    1349             : 
    1350             :             /*
    1351             :              * Elect one participant to clean up and decide whether further
    1352             :              * repartitioning is needed, or should be disabled because it's
    1353             :              * not helping.
    1354             :              */
    1355          60 :             if (BarrierArriveAndWait(&pstate->grow_batches_barrier,
    1356             :                                      WAIT_EVENT_HASH_GROW_BATCHES_DECIDE))
    1357             :             {
    1358             :                 ParallelHashJoinBatch *old_batches;
    1359          52 :                 bool        space_exhausted = false;
    1360          52 :                 bool        extreme_skew_detected = false;
    1361             : 
    1362             :                 /* Make sure that we have the current dimensions and buckets. */
    1363          52 :                 ExecParallelHashEnsureBatchAccessors(hashtable);
    1364          52 :                 ExecParallelHashTableSetCurrentBatch(hashtable, 0);
    1365             : 
    1366          52 :                 old_batches = dsa_get_address(hashtable->area, pstate->old_batches);
    1367             : 
    1368             :                 /* Are any of the new generation of batches exhausted? */
    1369         372 :                 for (int i = 0; i < hashtable->nbatch; ++i)
    1370             :                 {
    1371             :                     ParallelHashJoinBatch *batch;
    1372             :                     ParallelHashJoinBatch *old_batch;
    1373             :                     int         parent;
    1374             : 
    1375         320 :                     batch = hashtable->batches[i].shared;
    1376         320 :                     if (batch->space_exhausted ||
    1377         320 :                         batch->estimated_size > pstate->space_allowed)
    1378          24 :                         space_exhausted = true;
    1379             : 
    1380         320 :                     parent = i % pstate->old_nbatch;
    1381         320 :                     old_batch = NthParallelHashJoinBatch(old_batches, parent);
    1382         320 :                     if (old_batch->space_exhausted ||
    1383          96 :                         batch->estimated_size > pstate->space_allowed)
    1384             :                     {
    1385             :                         /*
    1386             :                          * Did this batch receive ALL of the tuples from its
    1387             :                          * parent batch?  That would indicate that further
    1388             :                          * repartitioning isn't going to help (the hash values
    1389             :                          * are probably all the same).
    1390             :                          */
    1391         224 :                         if (batch->ntuples == hashtable->batches[parent].shared->old_ntuples)
    1392          24 :                             extreme_skew_detected = true;
    1393             :                     }
    1394             :                 }
    1395             : 
    1396             :                 /* Don't keep growing if it's not helping or we'd overflow. */
    1397          52 :                 if (extreme_skew_detected || hashtable->nbatch >= INT_MAX / 2)
    1398          24 :                     pstate->growth = PHJ_GROWTH_DISABLED;
    1399          28 :                 else if (space_exhausted)
    1400           0 :                     pstate->growth = PHJ_GROWTH_NEED_MORE_BATCHES;
    1401             :                 else
    1402          28 :                     pstate->growth = PHJ_GROWTH_OK;
    1403             : 
    1404             :                 /* Free the old batches in shared memory. */
    1405          52 :                 dsa_free(hashtable->area, pstate->old_batches);
    1406          52 :                 pstate->old_batches = InvalidDsaPointer;
    1407             :             }
    1408             :             /* Fall through. */
    1409             : 
    1410             :         case PHJ_GROW_BATCHES_FINISH:
    1411             :             /* Wait for the above to complete. */
    1412          60 :             BarrierArriveAndWait(&pstate->grow_batches_barrier,
    1413             :                                  WAIT_EVENT_HASH_GROW_BATCHES_FINISH);
    1414             :     }
    1415          60 : }
    1416             : 
    1417             : /*
    1418             :  * Repartition the tuples currently loaded into memory for inner batch 0
    1419             :  * because the number of batches has been increased.  Some tuples are retained
    1420             :  * in memory and some are written out to a later batch.
    1421             :  */
    1422             : static void
    1423          60 : ExecParallelHashRepartitionFirst(HashJoinTable hashtable)
    1424             : {
    1425             :     dsa_pointer chunk_shared;
    1426             :     HashMemoryChunk chunk;
    1427             : 
    1428             :     Assert(hashtable->nbatch == hashtable->parallel_state->nbatch);
    1429             : 
    1430         422 :     while ((chunk = ExecParallelHashPopChunkQueue(hashtable, &chunk_shared)))
    1431             :     {
    1432         302 :         size_t      idx = 0;
    1433             : 
    1434             :         /* Repartition all tuples in this chunk. */
    1435      230316 :         while (idx < chunk->used)
    1436             :         {
    1437      230014 :             HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
    1438      230014 :             MinimalTuple tuple = HJTUPLE_MINTUPLE(hashTuple);
    1439             :             HashJoinTuple copyTuple;
    1440             :             dsa_pointer shared;
    1441             :             int         bucketno;
    1442             :             int         batchno;
    1443             : 
    1444      230014 :             ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
    1445             :                                       &bucketno, &batchno);
    1446             : 
    1447             :             Assert(batchno < hashtable->nbatch);
    1448      230014 :             if (batchno == 0)
    1449             :             {
    1450             :                 /* It still belongs in batch 0.  Copy to a new chunk. */
    1451             :                 copyTuple =
    1452       55394 :                     ExecParallelHashTupleAlloc(hashtable,
    1453       55394 :                                                HJTUPLE_OVERHEAD + tuple->t_len,
    1454             :                                                &shared);
    1455       55394 :                 copyTuple->hashvalue = hashTuple->hashvalue;
    1456       55394 :                 memcpy(HJTUPLE_MINTUPLE(copyTuple), tuple, tuple->t_len);
    1457       55394 :                 ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
    1458             :                                           copyTuple, shared);
    1459             :             }
    1460             :             else
    1461             :             {
    1462      174620 :                 size_t      tuple_size =
    1463      174620 :                     MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
    1464             : 
    1465             :                 /* It belongs in a later batch. */
    1466      174620 :                 hashtable->batches[batchno].estimated_size += tuple_size;
    1467      174620 :                 sts_puttuple(hashtable->batches[batchno].inner_tuples,
    1468      174620 :                              &hashTuple->hashvalue, tuple);
    1469             :             }
    1470             : 
    1471             :             /* Count this tuple. */
    1472      230014 :             ++hashtable->batches[0].old_ntuples;
    1473      230014 :             ++hashtable->batches[batchno].ntuples;
    1474             : 
    1475      230014 :             idx += MAXALIGN(HJTUPLE_OVERHEAD +
    1476             :                             HJTUPLE_MINTUPLE(hashTuple)->t_len);
    1477             :         }
    1478             : 
    1479             :         /* Free this chunk. */
    1480         302 :         dsa_free(hashtable->area, chunk_shared);
    1481             : 
    1482         302 :         CHECK_FOR_INTERRUPTS();
    1483             :     }
    1484          60 : }
    1485             : 
    1486             : /*
    1487             :  * Help repartition inner batches 1..n.
    1488             :  */
    1489             : static void
    1490          60 : ExecParallelHashRepartitionRest(HashJoinTable hashtable)
    1491             : {
    1492          60 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    1493          60 :     int         old_nbatch = pstate->old_nbatch;
    1494             :     SharedTuplestoreAccessor **old_inner_tuples;
    1495             :     ParallelHashJoinBatch *old_batches;
    1496             :     int         i;
    1497             : 
    1498             :     /* Get our hands on the previous generation of batches. */
    1499             :     old_batches = (ParallelHashJoinBatch *)
    1500          60 :         dsa_get_address(hashtable->area, pstate->old_batches);
    1501          60 :     old_inner_tuples = palloc0_array(SharedTuplestoreAccessor *, old_nbatch);
    1502         132 :     for (i = 1; i < old_nbatch; ++i)
    1503             :     {
    1504          72 :         ParallelHashJoinBatch *shared =
    1505          72 :             NthParallelHashJoinBatch(old_batches, i);
    1506             : 
    1507          72 :         old_inner_tuples[i] = sts_attach(ParallelHashJoinBatchInner(shared),
    1508             :                                          ParallelWorkerNumber + 1,
    1509             :                                          &pstate->fileset);
    1510             :     }
    1511             : 
    1512             :     /* Join in the effort to repartition them. */
    1513         132 :     for (i = 1; i < old_nbatch; ++i)
    1514             :     {
    1515             :         MinimalTuple tuple;
    1516             :         uint32      hashvalue;
    1517             : 
    1518             :         /* Scan one partition from the previous generation. */
    1519          72 :         sts_begin_parallel_scan(old_inner_tuples[i]);
    1520      189950 :         while ((tuple = sts_parallel_scan_next(old_inner_tuples[i], &hashvalue)))
    1521             :         {
    1522      189878 :             size_t      tuple_size = MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
    1523             :             int         bucketno;
    1524             :             int         batchno;
    1525             : 
    1526             :             /* Decide which partition it goes to in the new generation. */
    1527      189878 :             ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno,
    1528             :                                       &batchno);
    1529             : 
    1530      189878 :             hashtable->batches[batchno].estimated_size += tuple_size;
    1531      189878 :             ++hashtable->batches[batchno].ntuples;
    1532      189878 :             ++hashtable->batches[i].old_ntuples;
    1533             : 
    1534             :             /* Store the tuple its new batch. */
    1535      189878 :             sts_puttuple(hashtable->batches[batchno].inner_tuples,
    1536             :                          &hashvalue, tuple);
    1537             : 
    1538      189878 :             CHECK_FOR_INTERRUPTS();
    1539             :         }
    1540          72 :         sts_end_parallel_scan(old_inner_tuples[i]);
    1541             :     }
    1542             : 
    1543          60 :     pfree(old_inner_tuples);
    1544          60 : }
    1545             : 
    1546             : /*
    1547             :  * Transfer the backend-local per-batch counters to the shared totals.
    1548             :  */
    1549             : static void
    1550         340 : ExecParallelHashMergeCounters(HashJoinTable hashtable)
    1551             : {
    1552         340 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    1553             :     int         i;
    1554             : 
    1555         340 :     LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
    1556         340 :     pstate->total_tuples = 0;
    1557        1728 :     for (i = 0; i < hashtable->nbatch; ++i)
    1558             :     {
    1559        1388 :         ParallelHashJoinBatchAccessor *batch = &hashtable->batches[i];
    1560             : 
    1561        1388 :         batch->shared->size += batch->size;
    1562        1388 :         batch->shared->estimated_size += batch->estimated_size;
    1563        1388 :         batch->shared->ntuples += batch->ntuples;
    1564        1388 :         batch->shared->old_ntuples += batch->old_ntuples;
    1565        1388 :         batch->size = 0;
    1566        1388 :         batch->estimated_size = 0;
    1567        1388 :         batch->ntuples = 0;
    1568        1388 :         batch->old_ntuples = 0;
    1569        1388 :         pstate->total_tuples += batch->shared->ntuples;
    1570             :     }
    1571         340 :     LWLockRelease(&pstate->lock);
    1572         340 : }
    1573             : 
    1574             : /*
    1575             :  * ExecHashIncreaseNumBuckets
    1576             :  *      increase the original number of buckets in order to reduce
    1577             :  *      number of tuples per bucket
    1578             :  */
    1579             : static void
    1580          72 : ExecHashIncreaseNumBuckets(HashJoinTable hashtable)
    1581             : {
    1582             :     HashMemoryChunk chunk;
    1583             : 
    1584             :     /* do nothing if not an increase (it's called increase for a reason) */
    1585          72 :     if (hashtable->nbuckets >= hashtable->nbuckets_optimal)
    1586           0 :         return;
    1587             : 
    1588             : #ifdef HJDEBUG
    1589             :     printf("Hashjoin %p: increasing nbuckets %d => %d\n",
    1590             :            hashtable, hashtable->nbuckets, hashtable->nbuckets_optimal);
    1591             : #endif
    1592             : 
    1593          72 :     hashtable->nbuckets = hashtable->nbuckets_optimal;
    1594          72 :     hashtable->log2_nbuckets = hashtable->log2_nbuckets_optimal;
    1595             : 
    1596             :     Assert(hashtable->nbuckets > 1);
    1597             :     Assert(hashtable->nbuckets <= (INT_MAX / 2));
    1598             :     Assert(hashtable->nbuckets == (1 << hashtable->log2_nbuckets));
    1599             : 
    1600             :     /*
    1601             :      * Just reallocate the proper number of buckets - we don't need to walk
    1602             :      * through them - we can walk the dense-allocated chunks (just like in
    1603             :      * ExecHashIncreaseNumBatches, but without all the copying into new
    1604             :      * chunks)
    1605             :      */
    1606          72 :     hashtable->buckets.unshared =
    1607          72 :         repalloc_array(hashtable->buckets.unshared,
    1608             :                        HashJoinTuple, hashtable->nbuckets);
    1609             : 
    1610          72 :     memset(hashtable->buckets.unshared, 0,
    1611          72 :            hashtable->nbuckets * sizeof(HashJoinTuple));
    1612             : 
    1613             :     /* scan through all tuples in all chunks to rebuild the hash table */
    1614        1008 :     for (chunk = hashtable->chunks; chunk != NULL; chunk = chunk->next.unshared)
    1615             :     {
    1616             :         /* process all tuples stored in this chunk */
    1617         936 :         size_t      idx = 0;
    1618             : 
    1619      720936 :         while (idx < chunk->used)
    1620             :         {
    1621      720000 :             HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
    1622             :             int         bucketno;
    1623             :             int         batchno;
    1624             : 
    1625      720000 :             ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
    1626             :                                       &bucketno, &batchno);
    1627             : 
    1628             :             /* add the tuple to the proper bucket */
    1629      720000 :             hashTuple->next.unshared = hashtable->buckets.unshared[bucketno];
    1630      720000 :             hashtable->buckets.unshared[bucketno] = hashTuple;
    1631             : 
    1632             :             /* advance index past the tuple */
    1633      720000 :             idx += MAXALIGN(HJTUPLE_OVERHEAD +
    1634             :                             HJTUPLE_MINTUPLE(hashTuple)->t_len);
    1635             :         }
    1636             : 
    1637             :         /* allow this loop to be cancellable */
    1638         936 :         CHECK_FOR_INTERRUPTS();
    1639             :     }
    1640             : }
    1641             : 
    1642             : static void
    1643         132 : ExecParallelHashIncreaseNumBuckets(HashJoinTable hashtable)
    1644             : {
    1645         132 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    1646             :     int         i;
    1647             :     HashMemoryChunk chunk;
    1648             :     dsa_pointer chunk_s;
    1649             : 
    1650             :     Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASH_INNER);
    1651             : 
    1652             :     /*
    1653             :      * It's unlikely, but we need to be prepared for new participants to show
    1654             :      * up while we're in the middle of this operation so we need to switch on
    1655             :      * barrier phase here.
    1656             :      */
    1657         132 :     switch (PHJ_GROW_BUCKETS_PHASE(BarrierPhase(&pstate->grow_buckets_barrier)))
    1658             :     {
    1659         132 :         case PHJ_GROW_BUCKETS_ELECT:
    1660             :             /* Elect one participant to prepare to increase nbuckets. */
    1661         132 :             if (BarrierArriveAndWait(&pstate->grow_buckets_barrier,
    1662             :                                      WAIT_EVENT_HASH_GROW_BUCKETS_ELECT))
    1663             :             {
    1664             :                 size_t      size;
    1665             :                 dsa_pointer_atomic *buckets;
    1666             : 
    1667             :                 /* Double the size of the bucket array. */
    1668         108 :                 pstate->nbuckets *= 2;
    1669         108 :                 size = pstate->nbuckets * sizeof(dsa_pointer_atomic);
    1670         108 :                 hashtable->batches[0].shared->size += size / 2;
    1671         108 :                 dsa_free(hashtable->area, hashtable->batches[0].shared->buckets);
    1672         216 :                 hashtable->batches[0].shared->buckets =
    1673         108 :                     dsa_allocate(hashtable->area, size);
    1674             :                 buckets = (dsa_pointer_atomic *)
    1675         108 :                     dsa_get_address(hashtable->area,
    1676         108 :                                     hashtable->batches[0].shared->buckets);
    1677      933996 :                 for (i = 0; i < pstate->nbuckets; ++i)
    1678      933888 :                     dsa_pointer_atomic_init(&buckets[i], InvalidDsaPointer);
    1679             : 
    1680             :                 /* Put the chunk list onto the work queue. */
    1681         108 :                 pstate->chunk_work_queue = hashtable->batches[0].shared->chunks;
    1682             : 
    1683             :                 /* Clear the flag. */
    1684         108 :                 pstate->growth = PHJ_GROWTH_OK;
    1685             :             }
    1686             :             /* Fall through. */
    1687             : 
    1688             :         case PHJ_GROW_BUCKETS_REALLOCATE:
    1689             :             /* Wait for the above to complete. */
    1690         132 :             BarrierArriveAndWait(&pstate->grow_buckets_barrier,
    1691             :                                  WAIT_EVENT_HASH_GROW_BUCKETS_REALLOCATE);
    1692             :             /* Fall through. */
    1693             : 
    1694         132 :         case PHJ_GROW_BUCKETS_REINSERT:
    1695             :             /* Reinsert all tuples into the hash table. */
    1696         132 :             ExecParallelHashEnsureBatchAccessors(hashtable);
    1697         132 :             ExecParallelHashTableSetCurrentBatch(hashtable, 0);
    1698         938 :             while ((chunk = ExecParallelHashPopChunkQueue(hashtable, &chunk_s)))
    1699             :             {
    1700         674 :                 size_t      idx = 0;
    1701             : 
    1702      551242 :                 while (idx < chunk->used)
    1703             :                 {
    1704      550568 :                     HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
    1705      550568 :                     dsa_pointer shared = chunk_s + HASH_CHUNK_HEADER_SIZE + idx;
    1706             :                     int         bucketno;
    1707             :                     int         batchno;
    1708             : 
    1709      550568 :                     ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
    1710             :                                               &bucketno, &batchno);
    1711             :                     Assert(batchno == 0);
    1712             : 
    1713             :                     /* add the tuple to the proper bucket */
    1714      550568 :                     ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
    1715             :                                               hashTuple, shared);
    1716             : 
    1717             :                     /* advance index past the tuple */
    1718      550568 :                     idx += MAXALIGN(HJTUPLE_OVERHEAD +
    1719             :                                     HJTUPLE_MINTUPLE(hashTuple)->t_len);
    1720             :                 }
    1721             : 
    1722             :                 /* allow this loop to be cancellable */
    1723         674 :                 CHECK_FOR_INTERRUPTS();
    1724             :             }
    1725         132 :             BarrierArriveAndWait(&pstate->grow_buckets_barrier,
    1726             :                                  WAIT_EVENT_HASH_GROW_BUCKETS_REINSERT);
    1727             :     }
    1728         132 : }
    1729             : 
    1730             : /*
    1731             :  * ExecHashTableInsert
    1732             :  *      insert a tuple into the hash table depending on the hash value
    1733             :  *      it may just go to a temp file for later batches
    1734             :  *
    1735             :  * Note: the passed TupleTableSlot may contain a regular, minimal, or virtual
    1736             :  * tuple; the minimal case in particular is certain to happen while reloading
    1737             :  * tuples from batch files.  We could save some cycles in the regular-tuple
    1738             :  * case by not forcing the slot contents into minimal form; not clear if it's
    1739             :  * worth the messiness required.
    1740             :  */
    1741             : void
    1742    12142764 : ExecHashTableInsert(HashJoinTable hashtable,
    1743             :                     TupleTableSlot *slot,
    1744             :                     uint32 hashvalue)
    1745             : {
    1746             :     bool        shouldFree;
    1747    12142764 :     MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
    1748             :     int         bucketno;
    1749             :     int         batchno;
    1750             : 
    1751    12142764 :     ExecHashGetBucketAndBatch(hashtable, hashvalue,
    1752             :                               &bucketno, &batchno);
    1753             : 
    1754             :     /*
    1755             :      * decide whether to put the tuple in the hash table or a temp file
    1756             :      */
    1757    12142764 :     if (batchno == hashtable->curbatch)
    1758             :     {
    1759             :         /*
    1760             :          * put the tuple in hash table
    1761             :          */
    1762             :         HashJoinTuple hashTuple;
    1763             :         int         hashTupleSize;
    1764     9191814 :         double      ntuples = (hashtable->totalTuples - hashtable->skewTuples);
    1765             : 
    1766             :         /* Create the HashJoinTuple */
    1767     9191814 :         hashTupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
    1768     9191814 :         hashTuple = (HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
    1769             : 
    1770     9191814 :         hashTuple->hashvalue = hashvalue;
    1771     9191814 :         memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
    1772             : 
    1773             :         /*
    1774             :          * We always reset the tuple-matched flag on insertion.  This is okay
    1775             :          * even when reloading a tuple from a batch file, since the tuple
    1776             :          * could not possibly have been matched to an outer tuple before it
    1777             :          * went into the batch file.
    1778             :          */
    1779     9191814 :         HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
    1780             : 
    1781             :         /* Push it onto the front of the bucket's list */
    1782     9191814 :         hashTuple->next.unshared = hashtable->buckets.unshared[bucketno];
    1783     9191814 :         hashtable->buckets.unshared[bucketno] = hashTuple;
    1784             : 
    1785             :         /*
    1786             :          * Increase the (optimal) number of buckets if we just exceeded the
    1787             :          * NTUP_PER_BUCKET threshold, but only when there's still a single
    1788             :          * batch.
    1789             :          */
    1790     9191814 :         if (hashtable->nbatch == 1 &&
    1791     5659744 :             ntuples > (hashtable->nbuckets_optimal * NTUP_PER_BUCKET))
    1792             :         {
    1793             :             /* Guard against integer overflow and alloc size overflow */
    1794         260 :             if (hashtable->nbuckets_optimal <= INT_MAX / 2 &&
    1795         260 :                 hashtable->nbuckets_optimal * 2 <= MaxAllocSize / sizeof(HashJoinTuple))
    1796             :             {
    1797         260 :                 hashtable->nbuckets_optimal *= 2;
    1798         260 :                 hashtable->log2_nbuckets_optimal += 1;
    1799             :             }
    1800             :         }
    1801             : 
    1802             :         /* Account for space used, and back off if we've used too much */
    1803     9191814 :         hashtable->spaceUsed += hashTupleSize;
    1804     9191814 :         if (hashtable->spaceUsed > hashtable->spacePeak)
    1805     6784542 :             hashtable->spacePeak = hashtable->spaceUsed;
    1806     9191814 :         if (hashtable->spaceUsed +
    1807     9191814 :             hashtable->nbuckets_optimal * sizeof(HashJoinTuple)
    1808     9191814 :             > hashtable->spaceAllowed)
    1809      829152 :             ExecHashIncreaseNumBatches(hashtable);
    1810             :     }
    1811             :     else
    1812             :     {
    1813             :         /*
    1814             :          * put the tuple into a temp file for later batches
    1815             :          */
    1816             :         Assert(batchno > hashtable->curbatch);
    1817     2950950 :         ExecHashJoinSaveTuple(tuple,
    1818             :                               hashvalue,
    1819     2950950 :                               &hashtable->innerBatchFile[batchno],
    1820             :                               hashtable);
    1821             :     }
    1822             : 
    1823    12142764 :     if (shouldFree)
    1824     8780564 :         heap_free_minimal_tuple(tuple);
    1825    12142764 : }
    1826             : 
    1827             : /*
    1828             :  * ExecParallelHashTableInsert
    1829             :  *      insert a tuple into a shared hash table or shared batch tuplestore
    1830             :  */
    1831             : void
    1832     2160192 : ExecParallelHashTableInsert(HashJoinTable hashtable,
    1833             :                             TupleTableSlot *slot,
    1834             :                             uint32 hashvalue)
    1835             : {
    1836             :     bool        shouldFree;
    1837     2160192 :     MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
    1838             :     dsa_pointer shared;
    1839             :     int         bucketno;
    1840             :     int         batchno;
    1841             : 
    1842         352 : retry:
    1843     2160544 :     ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
    1844             : 
    1845     2160544 :     if (batchno == 0)
    1846             :     {
    1847             :         HashJoinTuple hashTuple;
    1848             : 
    1849             :         /* Try to load it into memory. */
    1850             :         Assert(BarrierPhase(&hashtable->parallel_state->build_barrier) ==
    1851             :                PHJ_BUILD_HASH_INNER);
    1852     1293680 :         hashTuple = ExecParallelHashTupleAlloc(hashtable,
    1853     1293680 :                                                HJTUPLE_OVERHEAD + tuple->t_len,
    1854             :                                                &shared);
    1855     1293680 :         if (hashTuple == NULL)
    1856         312 :             goto retry;
    1857             : 
    1858             :         /* Store the hash value in the HashJoinTuple header. */
    1859     1293368 :         hashTuple->hashvalue = hashvalue;
    1860     1293368 :         memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
    1861     1293368 :         HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
    1862             : 
    1863             :         /* Push it onto the front of the bucket's list */
    1864     1293368 :         ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
    1865             :                                   hashTuple, shared);
    1866             :     }
    1867             :     else
    1868             :     {
    1869      866864 :         size_t      tuple_size = MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
    1870             : 
    1871             :         Assert(batchno > 0);
    1872             : 
    1873             :         /* Try to preallocate space in the batch if necessary. */
    1874      866864 :         if (hashtable->batches[batchno].preallocated < tuple_size)
    1875             :         {
    1876        1528 :             if (!ExecParallelHashTuplePrealloc(hashtable, batchno, tuple_size))
    1877          40 :                 goto retry;
    1878             :         }
    1879             : 
    1880             :         Assert(hashtable->batches[batchno].preallocated >= tuple_size);
    1881      866824 :         hashtable->batches[batchno].preallocated -= tuple_size;
    1882      866824 :         sts_puttuple(hashtable->batches[batchno].inner_tuples, &hashvalue,
    1883             :                      tuple);
    1884             :     }
    1885     2160192 :     ++hashtable->batches[batchno].ntuples;
    1886             : 
    1887     2160192 :     if (shouldFree)
    1888     2160192 :         heap_free_minimal_tuple(tuple);
    1889     2160192 : }
    1890             : 
    1891             : /*
    1892             :  * Insert a tuple into the current hash table.  Unlike
    1893             :  * ExecParallelHashTableInsert, this version is not prepared to send the tuple
    1894             :  * to other batches or to run out of memory, and should only be called with
    1895             :  * tuples that belong in the current batch once growth has been disabled.
    1896             :  */
    1897             : void
    1898     1041444 : ExecParallelHashTableInsertCurrentBatch(HashJoinTable hashtable,
    1899             :                                         TupleTableSlot *slot,
    1900             :                                         uint32 hashvalue)
    1901             : {
    1902             :     bool        shouldFree;
    1903     1041444 :     MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
    1904             :     HashJoinTuple hashTuple;
    1905             :     dsa_pointer shared;
    1906             :     int         batchno;
    1907             :     int         bucketno;
    1908             : 
    1909     1041444 :     ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
    1910             :     Assert(batchno == hashtable->curbatch);
    1911     1041444 :     hashTuple = ExecParallelHashTupleAlloc(hashtable,
    1912     1041444 :                                            HJTUPLE_OVERHEAD + tuple->t_len,
    1913             :                                            &shared);
    1914     1041444 :     hashTuple->hashvalue = hashvalue;
    1915     1041444 :     memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
    1916     1041444 :     HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
    1917     1041444 :     ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
    1918             :                               hashTuple, shared);
    1919             : 
    1920     1041444 :     if (shouldFree)
    1921           0 :         heap_free_minimal_tuple(tuple);
    1922     1041444 : }
    1923             : 
    1924             : 
    1925             : /*
    1926             :  * ExecHashGetBucketAndBatch
    1927             :  *      Determine the bucket number and batch number for a hash value
    1928             :  *
    1929             :  * Note: on-the-fly increases of nbatch must not change the bucket number
    1930             :  * for a given hash code (since we don't move tuples to different hash
    1931             :  * chains), and must only cause the batch number to remain the same or
    1932             :  * increase.  Our algorithm is
    1933             :  *      bucketno = hashvalue MOD nbuckets
    1934             :  *      batchno = ROR(hashvalue, log2_nbuckets) MOD nbatch
    1935             :  * where nbuckets and nbatch are both expected to be powers of 2, so we can
    1936             :  * do the computations by shifting and masking.  (This assumes that all hash
    1937             :  * functions are good about randomizing all their output bits, else we are
    1938             :  * likely to have very skewed bucket or batch occupancy.)
    1939             :  *
    1940             :  * nbuckets and log2_nbuckets may change while nbatch == 1 because of dynamic
    1941             :  * bucket count growth.  Once we start batching, the value is fixed and does
    1942             :  * not change over the course of the join (making it possible to compute batch
    1943             :  * number the way we do here).
    1944             :  *
    1945             :  * nbatch is always a power of 2; we increase it only by doubling it.  This
    1946             :  * effectively adds one more bit to the top of the batchno.  In very large
    1947             :  * joins, we might run out of bits to add, so we do this by rotating the hash
    1948             :  * value.  This causes batchno to steal bits from bucketno when the number of
    1949             :  * virtual buckets exceeds 2^32.  It's better to have longer bucket chains
    1950             :  * than to lose the ability to divide batches.
    1951             :  */
    1952             : void
    1953    39919802 : ExecHashGetBucketAndBatch(HashJoinTable hashtable,
    1954             :                           uint32 hashvalue,
    1955             :                           int *bucketno,
    1956             :                           int *batchno)
    1957             : {
    1958    39919802 :     uint32      nbuckets = (uint32) hashtable->nbuckets;
    1959    39919802 :     uint32      nbatch = (uint32) hashtable->nbatch;
    1960             : 
    1961    39919802 :     if (nbatch > 1)
    1962             :     {
    1963    15154638 :         *bucketno = hashvalue & (nbuckets - 1);
    1964    15154638 :         *batchno = pg_rotate_right32(hashvalue,
    1965    15154638 :                                      hashtable->log2_nbuckets) & (nbatch - 1);
    1966             :     }
    1967             :     else
    1968             :     {
    1969    24765164 :         *bucketno = hashvalue & (nbuckets - 1);
    1970    24765164 :         *batchno = 0;
    1971             :     }
    1972    39919802 : }
    1973             : 
    1974             : /*
    1975             :  * ExecScanHashBucket
    1976             :  *      scan a hash bucket for matches to the current outer tuple
    1977             :  *
    1978             :  * The current outer tuple must be stored in econtext->ecxt_outertuple.
    1979             :  *
    1980             :  * On success, the inner tuple is stored into hjstate->hj_CurTuple and
    1981             :  * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
    1982             :  * for the latter.
    1983             :  */
    1984             : bool
    1985    22760366 : ExecScanHashBucket(HashJoinState *hjstate,
    1986             :                    ExprContext *econtext)
    1987             : {
    1988    22760366 :     ExprState  *hjclauses = hjstate->hashclauses;
    1989    22760366 :     HashJoinTable hashtable = hjstate->hj_HashTable;
    1990    22760366 :     HashJoinTuple hashTuple = hjstate->hj_CurTuple;
    1991    22760366 :     uint32      hashvalue = hjstate->hj_CurHashValue;
    1992             : 
    1993             :     /*
    1994             :      * hj_CurTuple is the address of the tuple last returned from the current
    1995             :      * bucket, or NULL if it's time to start scanning a new bucket.
    1996             :      *
    1997             :      * If the tuple hashed to a skew bucket then scan the skew bucket
    1998             :      * otherwise scan the standard hashtable bucket.
    1999             :      */
    2000    22760366 :     if (hashTuple != NULL)
    2001     5158884 :         hashTuple = hashTuple->next.unshared;
    2002    17601482 :     else if (hjstate->hj_CurSkewBucketNo != INVALID_SKEW_BUCKET_NO)
    2003        2400 :         hashTuple = hashtable->skewBucket[hjstate->hj_CurSkewBucketNo]->tuples;
    2004             :     else
    2005    17599082 :         hashTuple = hashtable->buckets.unshared[hjstate->hj_CurBucketNo];
    2006             : 
    2007    27075888 :     while (hashTuple != NULL)
    2008             :     {
    2009    14718088 :         if (hashTuple->hashvalue == hashvalue)
    2010             :         {
    2011             :             TupleTableSlot *inntuple;
    2012             : 
    2013             :             /* insert hashtable's tuple into exec slot so ExecQual sees it */
    2014    10402578 :             inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
    2015             :                                              hjstate->hj_HashTupleSlot,
    2016             :                                              false);    /* do not pfree */
    2017    10402578 :             econtext->ecxt_innertuple = inntuple;
    2018             : 
    2019    10402578 :             if (ExecQualAndReset(hjclauses, econtext))
    2020             :             {
    2021    10402566 :                 hjstate->hj_CurTuple = hashTuple;
    2022    10402566 :                 return true;
    2023             :             }
    2024             :         }
    2025             : 
    2026     4315522 :         hashTuple = hashTuple->next.unshared;
    2027             :     }
    2028             : 
    2029             :     /*
    2030             :      * no match
    2031             :      */
    2032    12357800 :     return false;
    2033             : }
    2034             : 
    2035             : /*
    2036             :  * ExecParallelScanHashBucket
    2037             :  *      scan a hash bucket for matches to the current outer tuple
    2038             :  *
    2039             :  * The current outer tuple must be stored in econtext->ecxt_outertuple.
    2040             :  *
    2041             :  * On success, the inner tuple is stored into hjstate->hj_CurTuple and
    2042             :  * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
    2043             :  * for the latter.
    2044             :  */
    2045             : bool
    2046     4206108 : ExecParallelScanHashBucket(HashJoinState *hjstate,
    2047             :                            ExprContext *econtext)
    2048             : {
    2049     4206108 :     ExprState  *hjclauses = hjstate->hashclauses;
    2050     4206108 :     HashJoinTable hashtable = hjstate->hj_HashTable;
    2051     4206108 :     HashJoinTuple hashTuple = hjstate->hj_CurTuple;
    2052     4206108 :     uint32      hashvalue = hjstate->hj_CurHashValue;
    2053             : 
    2054             :     /*
    2055             :      * hj_CurTuple is the address of the tuple last returned from the current
    2056             :      * bucket, or NULL if it's time to start scanning a new bucket.
    2057             :      */
    2058     4206108 :     if (hashTuple != NULL)
    2059     2040078 :         hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
    2060             :     else
    2061     2166030 :         hashTuple = ExecParallelHashFirstTuple(hashtable,
    2062             :                                                hjstate->hj_CurBucketNo);
    2063             : 
    2064     5600618 :     while (hashTuple != NULL)
    2065             :     {
    2066     3434588 :         if (hashTuple->hashvalue == hashvalue)
    2067             :         {
    2068             :             TupleTableSlot *inntuple;
    2069             : 
    2070             :             /* insert hashtable's tuple into exec slot so ExecQual sees it */
    2071     2040078 :             inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
    2072             :                                              hjstate->hj_HashTupleSlot,
    2073             :                                              false);    /* do not pfree */
    2074     2040078 :             econtext->ecxt_innertuple = inntuple;
    2075             : 
    2076     2040078 :             if (ExecQualAndReset(hjclauses, econtext))
    2077             :             {
    2078     2040078 :                 hjstate->hj_CurTuple = hashTuple;
    2079     2040078 :                 return true;
    2080             :             }
    2081             :         }
    2082             : 
    2083     1394510 :         hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
    2084             :     }
    2085             : 
    2086             :     /*
    2087             :      * no match
    2088             :      */
    2089     2166030 :     return false;
    2090             : }
    2091             : 
    2092             : /*
    2093             :  * ExecPrepHashTableForUnmatched
    2094             :  *      set up for a series of ExecScanHashTableForUnmatched calls
    2095             :  */
    2096             : void
    2097        4008 : ExecPrepHashTableForUnmatched(HashJoinState *hjstate)
    2098             : {
    2099             :     /*----------
    2100             :      * During this scan we use the HashJoinState fields as follows:
    2101             :      *
    2102             :      * hj_CurBucketNo: next regular bucket to scan
    2103             :      * hj_CurSkewBucketNo: next skew bucket (an index into skewBucketNums)
    2104             :      * hj_CurTuple: last tuple returned, or NULL to start next bucket
    2105             :      *----------
    2106             :      */
    2107        4008 :     hjstate->hj_CurBucketNo = 0;
    2108        4008 :     hjstate->hj_CurSkewBucketNo = 0;
    2109        4008 :     hjstate->hj_CurTuple = NULL;
    2110        4008 : }
    2111             : 
    2112             : /*
    2113             :  * Decide if this process is allowed to run the unmatched scan.  If so, the
    2114             :  * batch barrier is advanced to PHJ_BATCH_SCAN and true is returned.
    2115             :  * Otherwise the batch is detached and false is returned.
    2116             :  */
    2117             : bool
    2118          84 : ExecParallelPrepHashTableForUnmatched(HashJoinState *hjstate)
    2119             : {
    2120          84 :     HashJoinTable hashtable = hjstate->hj_HashTable;
    2121          84 :     int         curbatch = hashtable->curbatch;
    2122          84 :     ParallelHashJoinBatch *batch = hashtable->batches[curbatch].shared;
    2123             : 
    2124             :     Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE);
    2125             : 
    2126             :     /*
    2127             :      * It would not be deadlock-free to wait on the batch barrier, because it
    2128             :      * is in PHJ_BATCH_PROBE phase, and thus processes attached to it have
    2129             :      * already emitted tuples.  Therefore, we'll hold a wait-free election:
    2130             :      * only one process can continue to the next phase, and all others detach
    2131             :      * from this batch.  They can still go any work on other batches, if there
    2132             :      * are any.
    2133             :      */
    2134          84 :     if (!BarrierArriveAndDetachExceptLast(&batch->batch_barrier))
    2135             :     {
    2136             :         /* This process considers the batch to be done. */
    2137          18 :         hashtable->batches[hashtable->curbatch].done = true;
    2138             : 
    2139             :         /* Make sure any temporary files are closed. */
    2140          18 :         sts_end_parallel_scan(hashtable->batches[curbatch].inner_tuples);
    2141          18 :         sts_end_parallel_scan(hashtable->batches[curbatch].outer_tuples);
    2142             : 
    2143             :         /*
    2144             :          * Track largest batch we've seen, which would normally happen in
    2145             :          * ExecHashTableDetachBatch().
    2146             :          */
    2147          18 :         hashtable->spacePeak =
    2148          18 :             Max(hashtable->spacePeak,
    2149             :                 batch->size + sizeof(dsa_pointer_atomic) * hashtable->nbuckets);
    2150          18 :         hashtable->curbatch = -1;
    2151          18 :         return false;
    2152             :     }
    2153             : 
    2154             :     /* Now we are alone with this batch. */
    2155             :     Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_SCAN);
    2156             : 
    2157             :     /*
    2158             :      * Has another process decided to give up early and command all processes
    2159             :      * to skip the unmatched scan?
    2160             :      */
    2161          66 :     if (batch->skip_unmatched)
    2162             :     {
    2163           0 :         hashtable->batches[hashtable->curbatch].done = true;
    2164           0 :         ExecHashTableDetachBatch(hashtable);
    2165           0 :         return false;
    2166             :     }
    2167             : 
    2168             :     /* Now prepare the process local state, just as for non-parallel join. */
    2169          66 :     ExecPrepHashTableForUnmatched(hjstate);
    2170             : 
    2171          66 :     return true;
    2172             : }
    2173             : 
    2174             : /*
    2175             :  * ExecScanHashTableForUnmatched
    2176             :  *      scan the hash table for unmatched inner tuples
    2177             :  *
    2178             :  * On success, the inner tuple is stored into hjstate->hj_CurTuple and
    2179             :  * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
    2180             :  * for the latter.
    2181             :  */
    2182             : bool
    2183      419196 : ExecScanHashTableForUnmatched(HashJoinState *hjstate, ExprContext *econtext)
    2184             : {
    2185      419196 :     HashJoinTable hashtable = hjstate->hj_HashTable;
    2186      419196 :     HashJoinTuple hashTuple = hjstate->hj_CurTuple;
    2187             : 
    2188             :     for (;;)
    2189             :     {
    2190             :         /*
    2191             :          * hj_CurTuple is the address of the tuple last returned from the
    2192             :          * current bucket, or NULL if it's time to start scanning a new
    2193             :          * bucket.
    2194             :          */
    2195     5576544 :         if (hashTuple != NULL)
    2196      415254 :             hashTuple = hashTuple->next.unshared;
    2197     5161290 :         else if (hjstate->hj_CurBucketNo < hashtable->nbuckets)
    2198             :         {
    2199     5157360 :             hashTuple = hashtable->buckets.unshared[hjstate->hj_CurBucketNo];
    2200     5157360 :             hjstate->hj_CurBucketNo++;
    2201             :         }
    2202        3930 :         else if (hjstate->hj_CurSkewBucketNo < hashtable->nSkewBuckets)
    2203             :         {
    2204           0 :             int         j = hashtable->skewBucketNums[hjstate->hj_CurSkewBucketNo];
    2205             : 
    2206           0 :             hashTuple = hashtable->skewBucket[j]->tuples;
    2207           0 :             hjstate->hj_CurSkewBucketNo++;
    2208             :         }
    2209             :         else
    2210        3930 :             break;              /* finished all buckets */
    2211             : 
    2212     5982130 :         while (hashTuple != NULL)
    2213             :         {
    2214      824782 :             if (!HeapTupleHeaderHasMatch(HJTUPLE_MINTUPLE(hashTuple)))
    2215             :             {
    2216             :                 TupleTableSlot *inntuple;
    2217             : 
    2218             :                 /* insert hashtable's tuple into exec slot */
    2219      415266 :                 inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
    2220             :                                                  hjstate->hj_HashTupleSlot,
    2221             :                                                  false);    /* do not pfree */
    2222      415266 :                 econtext->ecxt_innertuple = inntuple;
    2223             : 
    2224             :                 /*
    2225             :                  * Reset temp memory each time; although this function doesn't
    2226             :                  * do any qual eval, the caller will, so let's keep it
    2227             :                  * parallel to ExecScanHashBucket.
    2228             :                  */
    2229      415266 :                 ResetExprContext(econtext);
    2230             : 
    2231      415266 :                 hjstate->hj_CurTuple = hashTuple;
    2232      415266 :                 return true;
    2233             :             }
    2234             : 
    2235      409516 :             hashTuple = hashTuple->next.unshared;
    2236             :         }
    2237             : 
    2238             :         /* allow this loop to be cancellable */
    2239     5157348 :         CHECK_FOR_INTERRUPTS();
    2240             :     }
    2241             : 
    2242             :     /*
    2243             :      * no more unmatched tuples
    2244             :      */
    2245        3930 :     return false;
    2246             : }
    2247             : 
    2248             : /*
    2249             :  * ExecParallelScanHashTableForUnmatched
    2250             :  *      scan the hash table for unmatched inner tuples, in parallel join
    2251             :  *
    2252             :  * On success, the inner tuple is stored into hjstate->hj_CurTuple and
    2253             :  * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
    2254             :  * for the latter.
    2255             :  */
    2256             : bool
    2257      120072 : ExecParallelScanHashTableForUnmatched(HashJoinState *hjstate,
    2258             :                                       ExprContext *econtext)
    2259             : {
    2260      120072 :     HashJoinTable hashtable = hjstate->hj_HashTable;
    2261      120072 :     HashJoinTuple hashTuple = hjstate->hj_CurTuple;
    2262             : 
    2263             :     for (;;)
    2264             :     {
    2265             :         /*
    2266             :          * hj_CurTuple is the address of the tuple last returned from the
    2267             :          * current bucket, or NULL if it's time to start scanning a new
    2268             :          * bucket.
    2269             :          */
    2270      734472 :         if (hashTuple != NULL)
    2271      120006 :             hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
    2272      614466 :         else if (hjstate->hj_CurBucketNo < hashtable->nbuckets)
    2273      614400 :             hashTuple = ExecParallelHashFirstTuple(hashtable,
    2274      614400 :                                                    hjstate->hj_CurBucketNo++);
    2275             :         else
    2276          66 :             break;              /* finished all buckets */
    2277             : 
    2278      974406 :         while (hashTuple != NULL)
    2279             :         {
    2280      360006 :             if (!HeapTupleHeaderHasMatch(HJTUPLE_MINTUPLE(hashTuple)))
    2281             :             {
    2282             :                 TupleTableSlot *inntuple;
    2283             : 
    2284             :                 /* insert hashtable's tuple into exec slot */
    2285      120006 :                 inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
    2286             :                                                  hjstate->hj_HashTupleSlot,
    2287             :                                                  false);    /* do not pfree */
    2288      120006 :                 econtext->ecxt_innertuple = inntuple;
    2289             : 
    2290             :                 /*
    2291             :                  * Reset temp memory each time; although this function doesn't
    2292             :                  * do any qual eval, the caller will, so let's keep it
    2293             :                  * parallel to ExecScanHashBucket.
    2294             :                  */
    2295      120006 :                 ResetExprContext(econtext);
    2296             : 
    2297      120006 :                 hjstate->hj_CurTuple = hashTuple;
    2298      120006 :                 return true;
    2299             :             }
    2300             : 
    2301      240000 :             hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
    2302             :         }
    2303             : 
    2304             :         /* allow this loop to be cancellable */
    2305      614400 :         CHECK_FOR_INTERRUPTS();
    2306             :     }
    2307             : 
    2308             :     /*
    2309             :      * no more unmatched tuples
    2310             :      */
    2311          66 :     return false;
    2312             : }
    2313             : 
    2314             : /*
    2315             :  * ExecHashTableReset
    2316             :  *
    2317             :  *      reset hash table header for new batch
    2318             :  */
    2319             : void
    2320         884 : ExecHashTableReset(HashJoinTable hashtable)
    2321             : {
    2322             :     MemoryContext oldcxt;
    2323         884 :     int         nbuckets = hashtable->nbuckets;
    2324             : 
    2325             :     /*
    2326             :      * Release all the hash buckets and tuples acquired in the prior pass, and
    2327             :      * reinitialize the context for a new pass.
    2328             :      */
    2329         884 :     MemoryContextReset(hashtable->batchCxt);
    2330         884 :     oldcxt = MemoryContextSwitchTo(hashtable->batchCxt);
    2331             : 
    2332             :     /* Reallocate and reinitialize the hash bucket headers. */
    2333         884 :     hashtable->buckets.unshared = palloc0_array(HashJoinTuple, nbuckets);
    2334             : 
    2335         884 :     hashtable->spaceUsed = 0;
    2336             : 
    2337         884 :     MemoryContextSwitchTo(oldcxt);
    2338             : 
    2339             :     /* Forget the chunks (the memory was freed by the context reset above). */
    2340         884 :     hashtable->chunks = NULL;
    2341         884 : }
    2342             : 
    2343             : /*
    2344             :  * ExecHashTableResetMatchFlags
    2345             :  *      Clear all the HeapTupleHeaderHasMatch flags in the table
    2346             :  */
    2347             : void
    2348          74 : ExecHashTableResetMatchFlags(HashJoinTable hashtable)
    2349             : {
    2350             :     HashJoinTuple tuple;
    2351             :     int         i;
    2352             : 
    2353             :     /* Reset all flags in the main table ... */
    2354       75850 :     for (i = 0; i < hashtable->nbuckets; i++)
    2355             :     {
    2356       76162 :         for (tuple = hashtable->buckets.unshared[i]; tuple != NULL;
    2357         386 :              tuple = tuple->next.unshared)
    2358         386 :             HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(tuple));
    2359             :     }
    2360             : 
    2361             :     /* ... and the same for the skew buckets, if any */
    2362          74 :     for (i = 0; i < hashtable->nSkewBuckets; i++)
    2363             :     {
    2364           0 :         int         j = hashtable->skewBucketNums[i];
    2365           0 :         HashSkewBucket *skewBucket = hashtable->skewBucket[j];
    2366             : 
    2367           0 :         for (tuple = skewBucket->tuples; tuple != NULL; tuple = tuple->next.unshared)
    2368           0 :             HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(tuple));
    2369             :     }
    2370          74 : }
    2371             : 
    2372             : 
    2373             : void
    2374        1896 : ExecReScanHash(HashState *node)
    2375             : {
    2376        1896 :     PlanState  *outerPlan = outerPlanState(node);
    2377             : 
    2378             :     /*
    2379             :      * if chgParam of subnode is not null then plan will be re-scanned by
    2380             :      * first ExecProcNode.
    2381             :      */
    2382        1896 :     if (outerPlan->chgParam == NULL)
    2383          30 :         ExecReScan(outerPlan);
    2384        1896 : }
    2385             : 
    2386             : 
    2387             : /*
    2388             :  * ExecHashBuildSkewHash
    2389             :  *
    2390             :  *      Set up for skew optimization if we can identify the most common values
    2391             :  *      (MCVs) of the outer relation's join key.  We make a skew hash bucket
    2392             :  *      for the hash value of each MCV, up to the number of slots allowed
    2393             :  *      based on available memory.
    2394             :  */
    2395             : static void
    2396         118 : ExecHashBuildSkewHash(HashState *hashstate, HashJoinTable hashtable,
    2397             :                       Hash *node, int mcvsToUse)
    2398             : {
    2399             :     HeapTupleData *statsTuple;
    2400             :     AttStatsSlot sslot;
    2401             : 
    2402             :     /* Do nothing if planner didn't identify the outer relation's join key */
    2403         118 :     if (!OidIsValid(node->skewTable))
    2404          72 :         return;
    2405             :     /* Also, do nothing if we don't have room for at least one skew bucket */
    2406         118 :     if (mcvsToUse <= 0)
    2407           0 :         return;
    2408             : 
    2409             :     /*
    2410             :      * Try to find the MCV statistics for the outer relation's join key.
    2411             :      */
    2412         118 :     statsTuple = SearchSysCache3(STATRELATTINH,
    2413             :                                  ObjectIdGetDatum(node->skewTable),
    2414         118 :                                  Int16GetDatum(node->skewColumn),
    2415         118 :                                  BoolGetDatum(node->skewInherit));
    2416         118 :     if (!HeapTupleIsValid(statsTuple))
    2417          72 :         return;
    2418             : 
    2419          46 :     if (get_attstatsslot(&sslot, statsTuple,
    2420             :                          STATISTIC_KIND_MCV, InvalidOid,
    2421             :                          ATTSTATSSLOT_VALUES | ATTSTATSSLOT_NUMBERS))
    2422             :     {
    2423             :         double      frac;
    2424             :         int         nbuckets;
    2425             :         int         i;
    2426             : 
    2427           6 :         if (mcvsToUse > sslot.nvalues)
    2428           0 :             mcvsToUse = sslot.nvalues;
    2429             : 
    2430             :         /*
    2431             :          * Calculate the expected fraction of outer relation that will
    2432             :          * participate in the skew optimization.  If this isn't at least
    2433             :          * SKEW_MIN_OUTER_FRACTION, don't use skew optimization.
    2434             :          */
    2435           6 :         frac = 0;
    2436         132 :         for (i = 0; i < mcvsToUse; i++)
    2437         126 :             frac += sslot.numbers[i];
    2438           6 :         if (frac < SKEW_MIN_OUTER_FRACTION)
    2439             :         {
    2440           0 :             free_attstatsslot(&sslot);
    2441           0 :             ReleaseSysCache(statsTuple);
    2442           0 :             return;
    2443             :         }
    2444             : 
    2445             :         /*
    2446             :          * Okay, set up the skew hashtable.
    2447             :          *
    2448             :          * skewBucket[] is an open addressing hashtable with a power of 2 size
    2449             :          * that is greater than the number of MCV values.  (This ensures there
    2450             :          * will be at least one null entry, so searches will always
    2451             :          * terminate.)
    2452             :          *
    2453             :          * Note: this code could fail if mcvsToUse exceeds INT_MAX/8 or
    2454             :          * MaxAllocSize/sizeof(void *)/8, but that is not currently possible
    2455             :          * since we limit pg_statistic entries to much less than that.
    2456             :          */
    2457           6 :         nbuckets = pg_nextpower2_32(mcvsToUse + 1);
    2458             :         /* use two more bits just to help avoid collisions */
    2459           6 :         nbuckets <<= 2;
    2460             : 
    2461           6 :         hashtable->skewEnabled = true;
    2462           6 :         hashtable->skewBucketLen = nbuckets;
    2463             : 
    2464             :         /*
    2465             :          * We allocate the bucket memory in the hashtable's batch context. It
    2466             :          * is only needed during the first batch, and this ensures it will be
    2467             :          * automatically removed once the first batch is done.
    2468             :          */
    2469           6 :         hashtable->skewBucket = (HashSkewBucket **)
    2470           6 :             MemoryContextAllocZero(hashtable->batchCxt,
    2471             :                                    nbuckets * sizeof(HashSkewBucket *));
    2472           6 :         hashtable->skewBucketNums = (int *)
    2473           6 :             MemoryContextAllocZero(hashtable->batchCxt,
    2474             :                                    mcvsToUse * sizeof(int));
    2475             : 
    2476           6 :         hashtable->spaceUsed += nbuckets * sizeof(HashSkewBucket *)
    2477           6 :             + mcvsToUse * sizeof(int);
    2478           6 :         hashtable->spaceUsedSkew += nbuckets * sizeof(HashSkewBucket *)
    2479           6 :             + mcvsToUse * sizeof(int);
    2480           6 :         if (hashtable->spaceUsed > hashtable->spacePeak)
    2481           6 :             hashtable->spacePeak = hashtable->spaceUsed;
    2482             : 
    2483             :         /*
    2484             :          * Create a skew bucket for each MCV hash value.
    2485             :          *
    2486             :          * Note: it is very important that we create the buckets in order of
    2487             :          * decreasing MCV frequency.  If we have to remove some buckets, they
    2488             :          * must be removed in reverse order of creation (see notes in
    2489             :          * ExecHashRemoveNextSkewBucket) and we want the least common MCVs to
    2490             :          * be removed first.
    2491             :          */
    2492             : 
    2493         132 :         for (i = 0; i < mcvsToUse; i++)
    2494             :         {
    2495             :             uint32      hashvalue;
    2496             :             int         bucket;
    2497             : 
    2498         126 :             hashvalue = DatumGetUInt32(FunctionCall1Coll(hashstate->skew_hashfunction,
    2499             :                                                          hashstate->skew_collation,
    2500         126 :                                                          sslot.values[i]));
    2501             : 
    2502             :             /*
    2503             :              * While we have not hit a hole in the hashtable and have not hit
    2504             :              * the desired bucket, we have collided with some previous hash
    2505             :              * value, so try the next bucket location.  NB: this code must
    2506             :              * match ExecHashGetSkewBucket.
    2507             :              */
    2508         126 :             bucket = hashvalue & (nbuckets - 1);
    2509         126 :             while (hashtable->skewBucket[bucket] != NULL &&
    2510           0 :                    hashtable->skewBucket[bucket]->hashvalue != hashvalue)
    2511           0 :                 bucket = (bucket + 1) & (nbuckets - 1);
    2512             : 
    2513             :             /*
    2514             :              * If we found an existing bucket with the same hashvalue, leave
    2515             :              * it alone.  It's okay for two MCVs to share a hashvalue.
    2516             :              */
    2517         126 :             if (hashtable->skewBucket[bucket] != NULL)
    2518           0 :                 continue;
    2519             : 
    2520             :             /* Okay, create a new skew bucket for this hashvalue. */
    2521         252 :             hashtable->skewBucket[bucket] = (HashSkewBucket *)
    2522         126 :                 MemoryContextAlloc(hashtable->batchCxt,
    2523             :                                    sizeof(HashSkewBucket));
    2524         126 :             hashtable->skewBucket[bucket]->hashvalue = hashvalue;
    2525         126 :             hashtable->skewBucket[bucket]->tuples = NULL;
    2526         126 :             hashtable->skewBucketNums[hashtable->nSkewBuckets] = bucket;
    2527         126 :             hashtable->nSkewBuckets++;
    2528         126 :             hashtable->spaceUsed += SKEW_BUCKET_OVERHEAD;
    2529         126 :             hashtable->spaceUsedSkew += SKEW_BUCKET_OVERHEAD;
    2530         126 :             if (hashtable->spaceUsed > hashtable->spacePeak)
    2531         126 :                 hashtable->spacePeak = hashtable->spaceUsed;
    2532             :         }
    2533             : 
    2534           6 :         free_attstatsslot(&sslot);
    2535             :     }
    2536             : 
    2537          46 :     ReleaseSysCache(statsTuple);
    2538             : }
    2539             : 
    2540             : /*
    2541             :  * ExecHashGetSkewBucket
    2542             :  *
    2543             :  *      Returns the index of the skew bucket for this hashvalue,
    2544             :  *      or INVALID_SKEW_BUCKET_NO if the hashvalue is not
    2545             :  *      associated with any active skew bucket.
    2546             :  */
    2547             : int
    2548    30023574 : ExecHashGetSkewBucket(HashJoinTable hashtable, uint32 hashvalue)
    2549             : {
    2550             :     int         bucket;
    2551             : 
    2552             :     /*
    2553             :      * Always return INVALID_SKEW_BUCKET_NO if not doing skew optimization (in
    2554             :      * particular, this happens after the initial batch is done).
    2555             :      */
    2556    30023574 :     if (!hashtable->skewEnabled)
    2557    29903574 :         return INVALID_SKEW_BUCKET_NO;
    2558             : 
    2559             :     /*
    2560             :      * Since skewBucketLen is a power of 2, we can do a modulo by ANDing.
    2561             :      */
    2562      120000 :     bucket = hashvalue & (hashtable->skewBucketLen - 1);
    2563             : 
    2564             :     /*
    2565             :      * While we have not hit a hole in the hashtable and have not hit the
    2566             :      * desired bucket, we have collided with some other hash value, so try the
    2567             :      * next bucket location.
    2568             :      */
    2569      127830 :     while (hashtable->skewBucket[bucket] != NULL &&
    2570       10818 :            hashtable->skewBucket[bucket]->hashvalue != hashvalue)
    2571        7830 :         bucket = (bucket + 1) & (hashtable->skewBucketLen - 1);
    2572             : 
    2573             :     /*
    2574             :      * Found the desired bucket?
    2575             :      */
    2576      120000 :     if (hashtable->skewBucket[bucket] != NULL)
    2577        2988 :         return bucket;
    2578             : 
    2579             :     /*
    2580             :      * There must not be any hashtable entry for this hash value.
    2581             :      */
    2582      117012 :     return INVALID_SKEW_BUCKET_NO;
    2583             : }
    2584             : 
    2585             : /*
    2586             :  * ExecHashSkewTableInsert
    2587             :  *
    2588             :  *      Insert a tuple into the skew hashtable.
    2589             :  *
    2590             :  * This should generally match up with the current-batch case in
    2591             :  * ExecHashTableInsert.
    2592             :  */
    2593             : static void
    2594         588 : ExecHashSkewTableInsert(HashJoinTable hashtable,
    2595             :                         TupleTableSlot *slot,
    2596             :                         uint32 hashvalue,
    2597             :                         int bucketNumber)
    2598             : {
    2599             :     bool        shouldFree;
    2600         588 :     MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
    2601             :     HashJoinTuple hashTuple;
    2602             :     int         hashTupleSize;
    2603             : 
    2604             :     /* Create the HashJoinTuple */
    2605         588 :     hashTupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
    2606         588 :     hashTuple = (HashJoinTuple) MemoryContextAlloc(hashtable->batchCxt,
    2607             :                                                    hashTupleSize);
    2608         588 :     hashTuple->hashvalue = hashvalue;
    2609         588 :     memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
    2610         588 :     HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
    2611             : 
    2612             :     /* Push it onto the front of the skew bucket's list */
    2613         588 :     hashTuple->next.unshared = hashtable->skewBucket[bucketNumber]->tuples;
    2614         588 :     hashtable->skewBucket[bucketNumber]->tuples = hashTuple;
    2615             :     Assert(hashTuple != hashTuple->next.unshared);
    2616             : 
    2617             :     /* Account for space used, and back off if we've used too much */
    2618         588 :     hashtable->spaceUsed += hashTupleSize;
    2619         588 :     hashtable->spaceUsedSkew += hashTupleSize;
    2620         588 :     if (hashtable->spaceUsed > hashtable->spacePeak)
    2621         432 :         hashtable->spacePeak = hashtable->spaceUsed;
    2622         690 :     while (hashtable->spaceUsedSkew > hashtable->spaceAllowedSkew)
    2623         102 :         ExecHashRemoveNextSkewBucket(hashtable);
    2624             : 
    2625             :     /* Check we are not over the total spaceAllowed, either */
    2626         588 :     if (hashtable->spaceUsed > hashtable->spaceAllowed)
    2627           0 :         ExecHashIncreaseNumBatches(hashtable);
    2628             : 
    2629         588 :     if (shouldFree)
    2630         588 :         heap_free_minimal_tuple(tuple);
    2631         588 : }
    2632             : 
    2633             : /*
    2634             :  *      ExecHashRemoveNextSkewBucket
    2635             :  *
    2636             :  *      Remove the least valuable skew bucket by pushing its tuples into
    2637             :  *      the main hash table.
    2638             :  */
    2639             : static void
    2640         102 : ExecHashRemoveNextSkewBucket(HashJoinTable hashtable)
    2641             : {
    2642             :     int         bucketToRemove;
    2643             :     HashSkewBucket *bucket;
    2644             :     uint32      hashvalue;
    2645             :     int         bucketno;
    2646             :     int         batchno;
    2647             :     HashJoinTuple hashTuple;
    2648             : 
    2649             :     /* Locate the bucket to remove */
    2650         102 :     bucketToRemove = hashtable->skewBucketNums[hashtable->nSkewBuckets - 1];
    2651         102 :     bucket = hashtable->skewBucket[bucketToRemove];
    2652             : 
    2653             :     /*
    2654             :      * Calculate which bucket and batch the tuples belong to in the main
    2655             :      * hashtable.  They all have the same hash value, so it's the same for all
    2656             :      * of them.  Also note that it's not possible for nbatch to increase while
    2657             :      * we are processing the tuples.
    2658             :      */
    2659         102 :     hashvalue = bucket->hashvalue;
    2660         102 :     ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
    2661             : 
    2662             :     /* Process all tuples in the bucket */
    2663         102 :     hashTuple = bucket->tuples;
    2664         450 :     while (hashTuple != NULL)
    2665             :     {
    2666         348 :         HashJoinTuple nextHashTuple = hashTuple->next.unshared;
    2667             :         MinimalTuple tuple;
    2668             :         Size        tupleSize;
    2669             : 
    2670             :         /*
    2671             :          * This code must agree with ExecHashTableInsert.  We do not use
    2672             :          * ExecHashTableInsert directly as ExecHashTableInsert expects a
    2673             :          * TupleTableSlot while we already have HashJoinTuples.
    2674             :          */
    2675         348 :         tuple = HJTUPLE_MINTUPLE(hashTuple);
    2676         348 :         tupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
    2677             : 
    2678             :         /* Decide whether to put the tuple in the hash table or a temp file */
    2679         348 :         if (batchno == hashtable->curbatch)
    2680             :         {
    2681             :             /* Move the tuple to the main hash table */
    2682             :             HashJoinTuple copyTuple;
    2683             : 
    2684             :             /*
    2685             :              * We must copy the tuple into the dense storage, else it will not
    2686             :              * be found by, eg, ExecHashIncreaseNumBatches.
    2687             :              */
    2688         138 :             copyTuple = (HashJoinTuple) dense_alloc(hashtable, tupleSize);
    2689         138 :             memcpy(copyTuple, hashTuple, tupleSize);
    2690         138 :             pfree(hashTuple);
    2691             : 
    2692         138 :             copyTuple->next.unshared = hashtable->buckets.unshared[bucketno];
    2693         138 :             hashtable->buckets.unshared[bucketno] = copyTuple;
    2694             : 
    2695             :             /* We have reduced skew space, but overall space doesn't change */
    2696         138 :             hashtable->spaceUsedSkew -= tupleSize;
    2697             :         }
    2698             :         else
    2699             :         {
    2700             :             /* Put the tuple into a temp file for later batches */
    2701             :             Assert(batchno > hashtable->curbatch);
    2702         210 :             ExecHashJoinSaveTuple(tuple, hashvalue,
    2703         210 :                                   &hashtable->innerBatchFile[batchno],
    2704             :                                   hashtable);
    2705         210 :             pfree(hashTuple);
    2706         210 :             hashtable->spaceUsed -= tupleSize;
    2707         210 :             hashtable->spaceUsedSkew -= tupleSize;
    2708             :         }
    2709             : 
    2710         348 :         hashTuple = nextHashTuple;
    2711             : 
    2712             :         /* allow this loop to be cancellable */
    2713         348 :         CHECK_FOR_INTERRUPTS();
    2714             :     }
    2715             : 
    2716             :     /*
    2717             :      * Free the bucket struct itself and reset the hashtable entry to NULL.
    2718             :      *
    2719             :      * NOTE: this is not nearly as simple as it looks on the surface, because
    2720             :      * of the possibility of collisions in the hashtable.  Suppose that hash
    2721             :      * values A and B collide at a particular hashtable entry, and that A was
    2722             :      * entered first so B gets shifted to a different table entry.  If we were
    2723             :      * to remove A first then ExecHashGetSkewBucket would mistakenly start
    2724             :      * reporting that B is not in the hashtable, because it would hit the NULL
    2725             :      * before finding B.  However, we always remove entries in the reverse
    2726             :      * order of creation, so this failure cannot happen.
    2727             :      */
    2728         102 :     hashtable->skewBucket[bucketToRemove] = NULL;
    2729         102 :     hashtable->nSkewBuckets--;
    2730         102 :     pfree(bucket);
    2731         102 :     hashtable->spaceUsed -= SKEW_BUCKET_OVERHEAD;
    2732         102 :     hashtable->spaceUsedSkew -= SKEW_BUCKET_OVERHEAD;
    2733             : 
    2734             :     /*
    2735             :      * If we have removed all skew buckets then give up on skew optimization.
    2736             :      * Release the arrays since they aren't useful any more.
    2737             :      */
    2738         102 :     if (hashtable->nSkewBuckets == 0)
    2739             :     {
    2740           0 :         hashtable->skewEnabled = false;
    2741           0 :         pfree(hashtable->skewBucket);
    2742           0 :         pfree(hashtable->skewBucketNums);
    2743           0 :         hashtable->skewBucket = NULL;
    2744           0 :         hashtable->skewBucketNums = NULL;
    2745           0 :         hashtable->spaceUsed -= hashtable->spaceUsedSkew;
    2746           0 :         hashtable->spaceUsedSkew = 0;
    2747             :     }
    2748         102 : }
    2749             : 
    2750             : /*
    2751             :  * Reserve space in the DSM segment for instrumentation data.
    2752             :  */
    2753             : void
    2754         198 : ExecHashEstimate(HashState *node, ParallelContext *pcxt)
    2755             : {
    2756             :     size_t      size;
    2757             : 
    2758             :     /* don't need this if not instrumenting or no workers */
    2759         198 :     if (!node->ps.instrument || pcxt->nworkers == 0)
    2760         114 :         return;
    2761             : 
    2762          84 :     size = mul_size(pcxt->nworkers, sizeof(HashInstrumentation));
    2763          84 :     size = add_size(size, offsetof(SharedHashInfo, hinstrument));
    2764          84 :     shm_toc_estimate_chunk(&pcxt->estimator, size);
    2765          84 :     shm_toc_estimate_keys(&pcxt->estimator, 1);
    2766             : }
    2767             : 
    2768             : /*
    2769             :  * Set up a space in the DSM for all workers to record instrumentation data
    2770             :  * about their hash table.
    2771             :  */
    2772             : void
    2773         198 : ExecHashInitializeDSM(HashState *node, ParallelContext *pcxt)
    2774             : {
    2775             :     size_t      size;
    2776             : 
    2777             :     /* don't need this if not instrumenting or no workers */
    2778         198 :     if (!node->ps.instrument || pcxt->nworkers == 0)
    2779         114 :         return;
    2780             : 
    2781          84 :     size = offsetof(SharedHashInfo, hinstrument) +
    2782          84 :         pcxt->nworkers * sizeof(HashInstrumentation);
    2783          84 :     node->shared_info = (SharedHashInfo *) shm_toc_allocate(pcxt->toc, size);
    2784             : 
    2785             :     /* Each per-worker area must start out as zeroes. */
    2786          84 :     memset(node->shared_info, 0, size);
    2787             : 
    2788          84 :     node->shared_info->num_workers = pcxt->nworkers;
    2789          84 :     shm_toc_insert(pcxt->toc, node->ps.plan->plan_node_id,
    2790          84 :                    node->shared_info);
    2791             : }
    2792             : 
    2793             : /*
    2794             :  * Locate the DSM space for hash table instrumentation data that we'll write
    2795             :  * to at shutdown time.
    2796             :  */
    2797             : void
    2798         558 : ExecHashInitializeWorker(HashState *node, ParallelWorkerContext *pwcxt)
    2799             : {
    2800             :     SharedHashInfo *shared_info;
    2801             : 
    2802             :     /* don't need this if not instrumenting */
    2803         558 :     if (!node->ps.instrument)
    2804         306 :         return;
    2805             : 
    2806             :     /*
    2807             :      * Find our entry in the shared area, and set up a pointer to it so that
    2808             :      * we'll accumulate stats there when shutting down or rebuilding the hash
    2809             :      * table.
    2810             :      */
    2811             :     shared_info = (SharedHashInfo *)
    2812         252 :         shm_toc_lookup(pwcxt->toc, node->ps.plan->plan_node_id, false);
    2813         252 :     node->hinstrument = &shared_info->hinstrument[ParallelWorkerNumber];
    2814             : }
    2815             : 
    2816             : /*
    2817             :  * Collect EXPLAIN stats if needed, saving them into DSM memory if
    2818             :  * ExecHashInitializeWorker was called, or local storage if not.  In the
    2819             :  * parallel case, this must be done in ExecShutdownHash() rather than
    2820             :  * ExecEndHash() because the latter runs after we've detached from the DSM
    2821             :  * segment.
    2822             :  */
    2823             : void
    2824       31062 : ExecShutdownHash(HashState *node)
    2825             : {
    2826             :     /* Allocate save space if EXPLAIN'ing and we didn't do so already */
    2827       31062 :     if (node->ps.instrument && !node->hinstrument)
    2828         114 :         node->hinstrument = palloc0_object(HashInstrumentation);
    2829             :     /* Now accumulate data for the current (final) hash table */
    2830       31062 :     if (node->hinstrument && node->hashtable)
    2831         332 :         ExecHashAccumInstrumentation(node->hinstrument, node->hashtable);
    2832       31062 : }
    2833             : 
    2834             : /*
    2835             :  * Retrieve instrumentation data from workers before the DSM segment is
    2836             :  * detached, so that EXPLAIN can access it.
    2837             :  */
    2838             : void
    2839          84 : ExecHashRetrieveInstrumentation(HashState *node)
    2840             : {
    2841          84 :     SharedHashInfo *shared_info = node->shared_info;
    2842             :     size_t      size;
    2843             : 
    2844          84 :     if (shared_info == NULL)
    2845           0 :         return;
    2846             : 
    2847             :     /* Replace node->shared_info with a copy in backend-local memory. */
    2848          84 :     size = offsetof(SharedHashInfo, hinstrument) +
    2849          84 :         shared_info->num_workers * sizeof(HashInstrumentation);
    2850          84 :     node->shared_info = palloc(size);
    2851          84 :     memcpy(node->shared_info, shared_info, size);
    2852             : }
    2853             : 
    2854             : /*
    2855             :  * Accumulate instrumentation data from 'hashtable' into an
    2856             :  * initially-zeroed HashInstrumentation struct.
    2857             :  *
    2858             :  * This is used to merge information across successive hash table instances
    2859             :  * within a single plan node.  We take the maximum values of each interesting
    2860             :  * number.  The largest nbuckets and largest nbatch values might have occurred
    2861             :  * in different instances, so there's some risk of confusion from reporting
    2862             :  * unrelated numbers; but there's a bigger risk of misdiagnosing a performance
    2863             :  * issue if we don't report the largest values.  Similarly, we want to report
    2864             :  * the largest spacePeak regardless of whether it happened in the same
    2865             :  * instance as the largest nbuckets or nbatch.  All the instances should have
    2866             :  * the same nbuckets_original and nbatch_original; but there's little value
    2867             :  * in depending on that here, so handle them the same way.
    2868             :  */
    2869             : void
    2870         332 : ExecHashAccumInstrumentation(HashInstrumentation *instrument,
    2871             :                              HashJoinTable hashtable)
    2872             : {
    2873         332 :     instrument->nbuckets = Max(instrument->nbuckets,
    2874             :                                hashtable->nbuckets);
    2875         332 :     instrument->nbuckets_original = Max(instrument->nbuckets_original,
    2876             :                                         hashtable->nbuckets_original);
    2877         332 :     instrument->nbatch = Max(instrument->nbatch,
    2878             :                              hashtable->nbatch);
    2879         332 :     instrument->nbatch_original = Max(instrument->nbatch_original,
    2880             :                                       hashtable->nbatch_original);
    2881         332 :     instrument->space_peak = Max(instrument->space_peak,
    2882             :                                  hashtable->spacePeak);
    2883         332 : }
    2884             : 
    2885             : /*
    2886             :  * Allocate 'size' bytes from the currently active HashMemoryChunk
    2887             :  */
    2888             : static void *
    2889     9386710 : dense_alloc(HashJoinTable hashtable, Size size)
    2890             : {
    2891             :     HashMemoryChunk newChunk;
    2892             :     char       *ptr;
    2893             : 
    2894             :     /* just in case the size is not already aligned properly */
    2895     9386710 :     size = MAXALIGN(size);
    2896             : 
    2897             :     /*
    2898             :      * If tuple size is larger than threshold, allocate a separate chunk.
    2899             :      */
    2900     9386710 :     if (size > HASH_CHUNK_THRESHOLD)
    2901             :     {
    2902             :         /* allocate new chunk and put it at the beginning of the list */
    2903           0 :         newChunk = (HashMemoryChunk) MemoryContextAlloc(hashtable->batchCxt,
    2904             :                                                         HASH_CHUNK_HEADER_SIZE + size);
    2905           0 :         newChunk->maxlen = size;
    2906           0 :         newChunk->used = size;
    2907           0 :         newChunk->ntuples = 1;
    2908             : 
    2909             :         /*
    2910             :          * Add this chunk to the list after the first existing chunk, so that
    2911             :          * we don't lose the remaining space in the "current" chunk.
    2912             :          */
    2913           0 :         if (hashtable->chunks != NULL)
    2914             :         {
    2915           0 :             newChunk->next = hashtable->chunks->next;
    2916           0 :             hashtable->chunks->next.unshared = newChunk;
    2917             :         }
    2918             :         else
    2919             :         {
    2920           0 :             newChunk->next.unshared = hashtable->chunks;
    2921           0 :             hashtable->chunks = newChunk;
    2922             :         }
    2923             : 
    2924           0 :         return HASH_CHUNK_DATA(newChunk);
    2925             :     }
    2926             : 
    2927             :     /*
    2928             :      * See if we have enough space for it in the current chunk (if any). If
    2929             :      * not, allocate a fresh chunk.
    2930             :      */
    2931     9386710 :     if ((hashtable->chunks == NULL) ||
    2932     9363624 :         (hashtable->chunks->maxlen - hashtable->chunks->used) < size)
    2933             :     {
    2934             :         /* allocate new chunk and put it at the beginning of the list */
    2935       35130 :         newChunk = (HashMemoryChunk) MemoryContextAlloc(hashtable->batchCxt,
    2936             :                                                         HASH_CHUNK_HEADER_SIZE + HASH_CHUNK_SIZE);
    2937             : 
    2938       35130 :         newChunk->maxlen = HASH_CHUNK_SIZE;
    2939       35130 :         newChunk->used = size;
    2940       35130 :         newChunk->ntuples = 1;
    2941             : 
    2942       35130 :         newChunk->next.unshared = hashtable->chunks;
    2943       35130 :         hashtable->chunks = newChunk;
    2944             : 
    2945       35130 :         return HASH_CHUNK_DATA(newChunk);
    2946             :     }
    2947             : 
    2948             :     /* There is enough space in the current chunk, let's add the tuple */
    2949     9351580 :     ptr = HASH_CHUNK_DATA(hashtable->chunks) + hashtable->chunks->used;
    2950     9351580 :     hashtable->chunks->used += size;
    2951     9351580 :     hashtable->chunks->ntuples += 1;
    2952             : 
    2953             :     /* return pointer to the start of the tuple memory */
    2954     9351580 :     return ptr;
    2955             : }
    2956             : 
    2957             : /*
    2958             :  * Allocate space for a tuple in shared dense storage.  This is equivalent to
    2959             :  * dense_alloc but for Parallel Hash using shared memory.
    2960             :  *
    2961             :  * While loading a tuple into shared memory, we might run out of memory and
    2962             :  * decide to repartition, or determine that the load factor is too high and
    2963             :  * decide to expand the bucket array, or discover that another participant has
    2964             :  * commanded us to help do that.  Return NULL if number of buckets or batches
    2965             :  * has changed, indicating that the caller must retry (considering the
    2966             :  * possibility that the tuple no longer belongs in the same batch).
    2967             :  */
    2968             : static HashJoinTuple
    2969     2390518 : ExecParallelHashTupleAlloc(HashJoinTable hashtable, size_t size,
    2970             :                            dsa_pointer *shared)
    2971             : {
    2972     2390518 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    2973             :     dsa_pointer chunk_shared;
    2974             :     HashMemoryChunk chunk;
    2975             :     Size        chunk_size;
    2976             :     HashJoinTuple result;
    2977     2390518 :     int         curbatch = hashtable->curbatch;
    2978             : 
    2979     2390518 :     size = MAXALIGN(size);
    2980             : 
    2981             :     /*
    2982             :      * Fast path: if there is enough space in this backend's current chunk,
    2983             :      * then we can allocate without any locking.
    2984             :      */
    2985     2390518 :     chunk = hashtable->current_chunk;
    2986     2390518 :     if (chunk != NULL &&
    2987     2389628 :         size <= HASH_CHUNK_THRESHOLD &&
    2988     2389628 :         chunk->maxlen - chunk->used >= size)
    2989             :     {
    2990             : 
    2991     2386764 :         chunk_shared = hashtable->current_chunk_shared;
    2992             :         Assert(chunk == dsa_get_address(hashtable->area, chunk_shared));
    2993     2386764 :         *shared = chunk_shared + HASH_CHUNK_HEADER_SIZE + chunk->used;
    2994     2386764 :         result = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + chunk->used);
    2995     2386764 :         chunk->used += size;
    2996             : 
    2997             :         Assert(chunk->used <= chunk->maxlen);
    2998             :         Assert(result == dsa_get_address(hashtable->area, *shared));
    2999             : 
    3000     2386764 :         return result;
    3001             :     }
    3002             : 
    3003             :     /* Slow path: try to allocate a new chunk. */
    3004        3754 :     LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
    3005             : 
    3006             :     /*
    3007             :      * Check if we need to help increase the number of buckets or batches.
    3008             :      */
    3009        3754 :     if (pstate->growth == PHJ_GROWTH_NEED_MORE_BATCHES ||
    3010        3718 :         pstate->growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
    3011             :     {
    3012         168 :         ParallelHashGrowth growth = pstate->growth;
    3013             : 
    3014         168 :         hashtable->current_chunk = NULL;
    3015         168 :         LWLockRelease(&pstate->lock);
    3016             : 
    3017             :         /* Another participant has commanded us to help grow. */
    3018         168 :         if (growth == PHJ_GROWTH_NEED_MORE_BATCHES)
    3019          36 :             ExecParallelHashIncreaseNumBatches(hashtable);
    3020         132 :         else if (growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
    3021         132 :             ExecParallelHashIncreaseNumBuckets(hashtable);
    3022             : 
    3023             :         /* The caller must retry. */
    3024         168 :         return NULL;
    3025             :     }
    3026             : 
    3027             :     /* Oversized tuples get their own chunk. */
    3028        3586 :     if (size > HASH_CHUNK_THRESHOLD)
    3029          48 :         chunk_size = size + HASH_CHUNK_HEADER_SIZE;
    3030             :     else
    3031        3538 :         chunk_size = HASH_CHUNK_SIZE;
    3032             : 
    3033             :     /* Check if it's time to grow batches or buckets. */
    3034        3586 :     if (pstate->growth != PHJ_GROWTH_DISABLED)
    3035             :     {
    3036             :         Assert(curbatch == 0);
    3037             :         Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASH_INNER);
    3038             : 
    3039             :         /*
    3040             :          * Check if our space limit would be exceeded.  To avoid choking on
    3041             :          * very large tuples or very low hash_mem setting, we'll always allow
    3042             :          * each backend to allocate at least one chunk.
    3043             :          */
    3044        1882 :         if (hashtable->batches[0].at_least_one_chunk &&
    3045        1466 :             hashtable->batches[0].shared->size +
    3046        1466 :             chunk_size > pstate->space_allowed)
    3047             :         {
    3048          36 :             pstate->growth = PHJ_GROWTH_NEED_MORE_BATCHES;
    3049          36 :             hashtable->batches[0].shared->space_exhausted = true;
    3050          36 :             LWLockRelease(&pstate->lock);
    3051             : 
    3052          36 :             return NULL;
    3053             :         }
    3054             : 
    3055             :         /* Check if our load factor limit would be exceeded. */
    3056        1846 :         if (hashtable->nbatch == 1)
    3057             :         {
    3058        1576 :             hashtable->batches[0].shared->ntuples += hashtable->batches[0].ntuples;
    3059        1576 :             hashtable->batches[0].ntuples = 0;
    3060             :             /* Guard against integer overflow and alloc size overflow */
    3061        1576 :             if (hashtable->batches[0].shared->ntuples + 1 >
    3062        1576 :                 hashtable->nbuckets * NTUP_PER_BUCKET &&
    3063         108 :                 hashtable->nbuckets < (INT_MAX / 2) &&
    3064         108 :                 hashtable->nbuckets * 2 <=
    3065             :                 MaxAllocSize / sizeof(dsa_pointer_atomic))
    3066             :             {
    3067         108 :                 pstate->growth = PHJ_GROWTH_NEED_MORE_BUCKETS;
    3068         108 :                 LWLockRelease(&pstate->lock);
    3069             : 
    3070         108 :                 return NULL;
    3071             :             }
    3072             :         }
    3073             :     }
    3074             : 
    3075             :     /* We are cleared to allocate a new chunk. */
    3076        3442 :     chunk_shared = dsa_allocate(hashtable->area, chunk_size);
    3077        3442 :     hashtable->batches[curbatch].shared->size += chunk_size;
    3078        3442 :     hashtable->batches[curbatch].at_least_one_chunk = true;
    3079             : 
    3080             :     /* Set up the chunk. */
    3081        3442 :     chunk = (HashMemoryChunk) dsa_get_address(hashtable->area, chunk_shared);
    3082        3442 :     *shared = chunk_shared + HASH_CHUNK_HEADER_SIZE;
    3083        3442 :     chunk->maxlen = chunk_size - HASH_CHUNK_HEADER_SIZE;
    3084        3442 :     chunk->used = size;
    3085             : 
    3086             :     /*
    3087             :      * Push it onto the list of chunks, so that it can be found if we need to
    3088             :      * increase the number of buckets or batches (batch 0 only) and later for
    3089             :      * freeing the memory (all batches).
    3090             :      */
    3091        3442 :     chunk->next.shared = hashtable->batches[curbatch].shared->chunks;
    3092        3442 :     hashtable->batches[curbatch].shared->chunks = chunk_shared;
    3093             : 
    3094        3442 :     if (size <= HASH_CHUNK_THRESHOLD)
    3095             :     {
    3096             :         /*
    3097             :          * Make this the current chunk so that we can use the fast path to
    3098             :          * fill the rest of it up in future calls.
    3099             :          */
    3100        3406 :         hashtable->current_chunk = chunk;
    3101        3406 :         hashtable->current_chunk_shared = chunk_shared;
    3102             :     }
    3103        3442 :     LWLockRelease(&pstate->lock);
    3104             : 
    3105             :     Assert(HASH_CHUNK_DATA(chunk) == dsa_get_address(hashtable->area, *shared));
    3106        3442 :     result = (HashJoinTuple) HASH_CHUNK_DATA(chunk);
    3107             : 
    3108        3442 :     return result;
    3109             : }
    3110             : 
    3111             : /*
    3112             :  * One backend needs to set up the shared batch state including tuplestores.
    3113             :  * Other backends will ensure they have correctly configured accessors by
    3114             :  * called ExecParallelHashEnsureBatchAccessors().
    3115             :  */
    3116             : static void
    3117         226 : ExecParallelHashJoinSetUpBatches(HashJoinTable hashtable, int nbatch)
    3118             : {
    3119         226 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    3120             :     ParallelHashJoinBatch *batches;
    3121             :     MemoryContext oldcxt;
    3122             :     int         i;
    3123             : 
    3124             :     Assert(hashtable->batches == NULL);
    3125             : 
    3126             :     /* Allocate space. */
    3127         226 :     pstate->batches =
    3128         226 :         dsa_allocate0(hashtable->area,
    3129             :                       EstimateParallelHashJoinBatch(hashtable) * nbatch);
    3130         226 :     pstate->nbatch = nbatch;
    3131         226 :     batches = dsa_get_address(hashtable->area, pstate->batches);
    3132             : 
    3133             :     /*
    3134             :      * Use hash join spill memory context to allocate accessors, including
    3135             :      * buffers for the temporary files.
    3136             :      */
    3137         226 :     oldcxt = MemoryContextSwitchTo(hashtable->spillCxt);
    3138             : 
    3139             :     /* Allocate this backend's accessor array. */
    3140         226 :     hashtable->nbatch = nbatch;
    3141         226 :     hashtable->batches =
    3142         226 :         palloc0_array(ParallelHashJoinBatchAccessor, hashtable->nbatch);
    3143             : 
    3144             :     /* Set up the shared state, tuplestores and backend-local accessors. */
    3145         954 :     for (i = 0; i < hashtable->nbatch; ++i)
    3146             :     {
    3147         728 :         ParallelHashJoinBatchAccessor *accessor = &hashtable->batches[i];
    3148         728 :         ParallelHashJoinBatch *shared = NthParallelHashJoinBatch(batches, i);
    3149             :         char        name[MAXPGPATH];
    3150             : 
    3151             :         /*
    3152             :          * All members of shared were zero-initialized.  We just need to set
    3153             :          * up the Barrier.
    3154             :          */
    3155         728 :         BarrierInit(&shared->batch_barrier, 0);
    3156         728 :         if (i == 0)
    3157             :         {
    3158             :             /* Batch 0 doesn't need to be loaded. */
    3159         226 :             BarrierAttach(&shared->batch_barrier);
    3160         904 :             while (BarrierPhase(&shared->batch_barrier) < PHJ_BATCH_PROBE)
    3161         678 :                 BarrierArriveAndWait(&shared->batch_barrier, 0);
    3162         226 :             BarrierDetach(&shared->batch_barrier);
    3163             :         }
    3164             : 
    3165             :         /* Initialize accessor state.  All members were zero-initialized. */
    3166         728 :         accessor->shared = shared;
    3167             : 
    3168             :         /* Initialize the shared tuplestores. */
    3169         728 :         snprintf(name, sizeof(name), "i%dof%d", i, hashtable->nbatch);
    3170         728 :         accessor->inner_tuples =
    3171         728 :             sts_initialize(ParallelHashJoinBatchInner(shared),
    3172             :                            pstate->nparticipants,
    3173             :                            ParallelWorkerNumber + 1,
    3174             :                            sizeof(uint32),
    3175             :                            SHARED_TUPLESTORE_SINGLE_PASS,
    3176             :                            &pstate->fileset,
    3177             :                            name);
    3178         728 :         snprintf(name, sizeof(name), "o%dof%d", i, hashtable->nbatch);
    3179         728 :         accessor->outer_tuples =
    3180         728 :             sts_initialize(ParallelHashJoinBatchOuter(shared,
    3181             :                                                       pstate->nparticipants),
    3182             :                            pstate->nparticipants,
    3183             :                            ParallelWorkerNumber + 1,
    3184             :                            sizeof(uint32),
    3185             :                            SHARED_TUPLESTORE_SINGLE_PASS,
    3186             :                            &pstate->fileset,
    3187             :                            name);
    3188             :     }
    3189             : 
    3190         226 :     MemoryContextSwitchTo(oldcxt);
    3191         226 : }
    3192             : 
    3193             : /*
    3194             :  * Free the current set of ParallelHashJoinBatchAccessor objects.
    3195             :  */
    3196             : static void
    3197          60 : ExecParallelHashCloseBatchAccessors(HashJoinTable hashtable)
    3198             : {
    3199             :     int         i;
    3200             : 
    3201         192 :     for (i = 0; i < hashtable->nbatch; ++i)
    3202             :     {
    3203             :         /* Make sure no files are left open. */
    3204         132 :         sts_end_write(hashtable->batches[i].inner_tuples);
    3205         132 :         sts_end_write(hashtable->batches[i].outer_tuples);
    3206         132 :         sts_end_parallel_scan(hashtable->batches[i].inner_tuples);
    3207         132 :         sts_end_parallel_scan(hashtable->batches[i].outer_tuples);
    3208             :     }
    3209          60 :     pfree(hashtable->batches);
    3210          60 :     hashtable->batches = NULL;
    3211          60 : }
    3212             : 
    3213             : /*
    3214             :  * Make sure this backend has up-to-date accessors for the current set of
    3215             :  * batches.
    3216             :  */
    3217             : static void
    3218         936 : ExecParallelHashEnsureBatchAccessors(HashJoinTable hashtable)
    3219             : {
    3220         936 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    3221             :     ParallelHashJoinBatch *batches;
    3222             :     MemoryContext oldcxt;
    3223             :     int         i;
    3224             : 
    3225         936 :     if (hashtable->batches != NULL)
    3226             :     {
    3227         690 :         if (hashtable->nbatch == pstate->nbatch)
    3228         690 :             return;
    3229           0 :         ExecParallelHashCloseBatchAccessors(hashtable);
    3230             :     }
    3231             : 
    3232             :     /*
    3233             :      * We should never see a state where the batch-tracking array is freed,
    3234             :      * because we should have given up sooner if we join when the build
    3235             :      * barrier has reached the PHJ_BUILD_FREE phase.
    3236             :      */
    3237             :     Assert(DsaPointerIsValid(pstate->batches));
    3238             : 
    3239             :     /*
    3240             :      * Use hash join spill memory context to allocate accessors, including
    3241             :      * buffers for the temporary files.
    3242             :      */
    3243         246 :     oldcxt = MemoryContextSwitchTo(hashtable->spillCxt);
    3244             : 
    3245             :     /* Allocate this backend's accessor array. */
    3246         246 :     hashtable->nbatch = pstate->nbatch;
    3247         246 :     hashtable->batches =
    3248         246 :         palloc0_array(ParallelHashJoinBatchAccessor, hashtable->nbatch);
    3249             : 
    3250             :     /* Find the base of the pseudo-array of ParallelHashJoinBatch objects. */
    3251             :     batches = (ParallelHashJoinBatch *)
    3252         246 :         dsa_get_address(hashtable->area, pstate->batches);
    3253             : 
    3254             :     /* Set up the accessor array and attach to the tuplestores. */
    3255        1180 :     for (i = 0; i < hashtable->nbatch; ++i)
    3256             :     {
    3257         934 :         ParallelHashJoinBatchAccessor *accessor = &hashtable->batches[i];
    3258         934 :         ParallelHashJoinBatch *shared = NthParallelHashJoinBatch(batches, i);
    3259             : 
    3260         934 :         accessor->shared = shared;
    3261         934 :         accessor->preallocated = 0;
    3262         934 :         accessor->done = false;
    3263         934 :         accessor->outer_eof = false;
    3264         934 :         accessor->inner_tuples =
    3265         934 :             sts_attach(ParallelHashJoinBatchInner(shared),
    3266             :                        ParallelWorkerNumber + 1,
    3267             :                        &pstate->fileset);
    3268         934 :         accessor->outer_tuples =
    3269         934 :             sts_attach(ParallelHashJoinBatchOuter(shared,
    3270             :                                                   pstate->nparticipants),
    3271             :                        ParallelWorkerNumber + 1,
    3272             :                        &pstate->fileset);
    3273             :     }
    3274             : 
    3275         246 :     MemoryContextSwitchTo(oldcxt);
    3276             : }
    3277             : 
    3278             : /*
    3279             :  * Allocate an empty shared memory hash table for a given batch.
    3280             :  */
    3281             : void
    3282         628 : ExecParallelHashTableAlloc(HashJoinTable hashtable, int batchno)
    3283             : {
    3284         628 :     ParallelHashJoinBatch *batch = hashtable->batches[batchno].shared;
    3285             :     dsa_pointer_atomic *buckets;
    3286         628 :     int         nbuckets = hashtable->parallel_state->nbuckets;
    3287             :     int         i;
    3288             : 
    3289         628 :     batch->buckets =
    3290         628 :         dsa_allocate(hashtable->area, sizeof(dsa_pointer_atomic) * nbuckets);
    3291             :     buckets = (dsa_pointer_atomic *)
    3292         628 :         dsa_get_address(hashtable->area, batch->buckets);
    3293     3187316 :     for (i = 0; i < nbuckets; ++i)
    3294     3186688 :         dsa_pointer_atomic_init(&buckets[i], InvalidDsaPointer);
    3295         628 : }
    3296             : 
    3297             : /*
    3298             :  * If we are currently attached to a shared hash join batch, detach.  If we
    3299             :  * are last to detach, clean up.
    3300             :  */
    3301             : void
    3302       25138 : ExecHashTableDetachBatch(HashJoinTable hashtable)
    3303             : {
    3304       25138 :     if (hashtable->parallel_state != NULL &&
    3305        1204 :         hashtable->curbatch >= 0)
    3306             :     {
    3307         790 :         int         curbatch = hashtable->curbatch;
    3308         790 :         ParallelHashJoinBatch *batch = hashtable->batches[curbatch].shared;
    3309         790 :         bool        attached = true;
    3310             : 
    3311             :         /* Make sure any temporary files are closed. */
    3312         790 :         sts_end_parallel_scan(hashtable->batches[curbatch].inner_tuples);
    3313         790 :         sts_end_parallel_scan(hashtable->batches[curbatch].outer_tuples);
    3314             : 
    3315             :         /* After attaching we always get at least to PHJ_BATCH_PROBE. */
    3316             :         Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE ||
    3317             :                BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_SCAN);
    3318             : 
    3319             :         /*
    3320             :          * If we're abandoning the PHJ_BATCH_PROBE phase early without having
    3321             :          * reached the end of it, it means the plan doesn't want any more
    3322             :          * tuples, and it is happy to abandon any tuples buffered in this
    3323             :          * process's subplans.  For correctness, we can't allow any process to
    3324             :          * execute the PHJ_BATCH_SCAN phase, because we will never have the
    3325             :          * complete set of match bits.  Therefore we skip emitting unmatched
    3326             :          * tuples in all backends (if this is a full/right join), as if those
    3327             :          * tuples were all due to be emitted by this process and it has
    3328             :          * abandoned them too.
    3329             :          */
    3330         790 :         if (BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE &&
    3331         718 :             !hashtable->batches[curbatch].outer_eof)
    3332             :         {
    3333             :             /*
    3334             :              * This flag may be written to by multiple backends during
    3335             :              * PHJ_BATCH_PROBE phase, but will only be read in PHJ_BATCH_SCAN
    3336             :              * phase so requires no extra locking.
    3337             :              */
    3338           0 :             batch->skip_unmatched = true;
    3339             :         }
    3340             : 
    3341             :         /*
    3342             :          * Even if we aren't doing a full/right outer join, we'll step through
    3343             :          * the PHJ_BATCH_SCAN phase just to maintain the invariant that
    3344             :          * freeing happens in PHJ_BATCH_FREE, but that'll be wait-free.
    3345             :          */
    3346         790 :         if (BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE)
    3347         718 :             attached = BarrierArriveAndDetachExceptLast(&batch->batch_barrier);
    3348         790 :         if (attached && BarrierArriveAndDetach(&batch->batch_barrier))
    3349             :         {
    3350             :             /*
    3351             :              * We are not longer attached to the batch barrier, but we're the
    3352             :              * process that was chosen to free resources and it's safe to
    3353             :              * assert the current phase.  The ParallelHashJoinBatch can't go
    3354             :              * away underneath us while we are attached to the build barrier,
    3355             :              * making this access safe.
    3356             :              */
    3357             :             Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_FREE);
    3358             : 
    3359             :             /* Free shared chunks and buckets. */
    3360        3768 :             while (DsaPointerIsValid(batch->chunks))
    3361             :             {
    3362             :                 HashMemoryChunk chunk =
    3363        3140 :                     dsa_get_address(hashtable->area, batch->chunks);
    3364        3140 :                 dsa_pointer next = chunk->next.shared;
    3365             : 
    3366        3140 :                 dsa_free(hashtable->area, batch->chunks);
    3367        3140 :                 batch->chunks = next;
    3368             :             }
    3369         628 :             if (DsaPointerIsValid(batch->buckets))
    3370             :             {
    3371         628 :                 dsa_free(hashtable->area, batch->buckets);
    3372         628 :                 batch->buckets = InvalidDsaPointer;
    3373             :             }
    3374             :         }
    3375             : 
    3376             :         /*
    3377             :          * Track the largest batch we've been attached to.  Though each
    3378             :          * backend might see a different subset of batches, explain.c will
    3379             :          * scan the results from all backends to find the largest value.
    3380             :          */
    3381         790 :         hashtable->spacePeak =
    3382         790 :             Max(hashtable->spacePeak,
    3383             :                 batch->size + sizeof(dsa_pointer_atomic) * hashtable->nbuckets);
    3384             : 
    3385             :         /* Remember that we are not attached to a batch. */
    3386         790 :         hashtable->curbatch = -1;
    3387             :     }
    3388       25138 : }
    3389             : 
    3390             : /*
    3391             :  * Detach from all shared resources.  If we are last to detach, clean up.
    3392             :  */
    3393             : void
    3394       24348 : ExecHashTableDetach(HashJoinTable hashtable)
    3395             : {
    3396       24348 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    3397             : 
    3398             :     /*
    3399             :      * If we're involved in a parallel query, we must either have gotten all
    3400             :      * the way to PHJ_BUILD_RUN, or joined too late and be in PHJ_BUILD_FREE.
    3401             :      */
    3402             :     Assert(!pstate ||
    3403             :            BarrierPhase(&pstate->build_barrier) >= PHJ_BUILD_RUN);
    3404             : 
    3405       24348 :     if (pstate && BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_RUN)
    3406             :     {
    3407             :         int         i;
    3408             : 
    3409             :         /* Make sure any temporary files are closed. */
    3410         412 :         if (hashtable->batches)
    3411             :         {
    3412        1942 :             for (i = 0; i < hashtable->nbatch; ++i)
    3413             :             {
    3414        1530 :                 sts_end_write(hashtable->batches[i].inner_tuples);
    3415        1530 :                 sts_end_write(hashtable->batches[i].outer_tuples);
    3416        1530 :                 sts_end_parallel_scan(hashtable->batches[i].inner_tuples);
    3417        1530 :                 sts_end_parallel_scan(hashtable->batches[i].outer_tuples);
    3418             :             }
    3419             :         }
    3420             : 
    3421             :         /* If we're last to detach, clean up shared memory. */
    3422         412 :         if (BarrierArriveAndDetach(&pstate->build_barrier))
    3423             :         {
    3424             :             /*
    3425             :              * Late joining processes will see this state and give up
    3426             :              * immediately.
    3427             :              */
    3428             :             Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_FREE);
    3429             : 
    3430         174 :             if (DsaPointerIsValid(pstate->batches))
    3431             :             {
    3432         174 :                 dsa_free(hashtable->area, pstate->batches);
    3433         174 :                 pstate->batches = InvalidDsaPointer;
    3434             :             }
    3435             :         }
    3436             :     }
    3437       24348 :     hashtable->parallel_state = NULL;
    3438       24348 : }
    3439             : 
    3440             : /*
    3441             :  * Get the first tuple in a given bucket identified by number.
    3442             :  */
    3443             : static inline HashJoinTuple
    3444     2780430 : ExecParallelHashFirstTuple(HashJoinTable hashtable, int bucketno)
    3445             : {
    3446             :     HashJoinTuple tuple;
    3447             :     dsa_pointer p;
    3448             : 
    3449             :     Assert(hashtable->parallel_state);
    3450     2780430 :     p = dsa_pointer_atomic_read(&hashtable->buckets.shared[bucketno]);
    3451     2780430 :     tuple = (HashJoinTuple) dsa_get_address(hashtable->area, p);
    3452             : 
    3453     2780430 :     return tuple;
    3454             : }
    3455             : 
    3456             : /*
    3457             :  * Get the next tuple in the same bucket as 'tuple'.
    3458             :  */
    3459             : static inline HashJoinTuple
    3460     3794594 : ExecParallelHashNextTuple(HashJoinTable hashtable, HashJoinTuple tuple)
    3461             : {
    3462             :     HashJoinTuple next;
    3463             : 
    3464             :     Assert(hashtable->parallel_state);
    3465     3794594 :     next = (HashJoinTuple) dsa_get_address(hashtable->area, tuple->next.shared);
    3466             : 
    3467     3794594 :     return next;
    3468             : }
    3469             : 
    3470             : /*
    3471             :  * Insert a tuple at the front of a chain of tuples in DSA memory atomically.
    3472             :  */
    3473             : static inline void
    3474     2940774 : ExecParallelHashPushTuple(dsa_pointer_atomic *head,
    3475             :                           HashJoinTuple tuple,
    3476             :                           dsa_pointer tuple_shared)
    3477             : {
    3478             :     for (;;)
    3479             :     {
    3480     2951586 :         tuple->next.shared = dsa_pointer_atomic_read(head);
    3481     2951586 :         if (dsa_pointer_atomic_compare_exchange(head,
    3482     2951586 :                                                 &tuple->next.shared,
    3483             :                                                 tuple_shared))
    3484     2940774 :             break;
    3485             :     }
    3486     2940774 : }
    3487             : 
    3488             : /*
    3489             :  * Prepare to work on a given batch.
    3490             :  */
    3491             : void
    3492        1816 : ExecParallelHashTableSetCurrentBatch(HashJoinTable hashtable, int batchno)
    3493             : {
    3494             :     Assert(hashtable->batches[batchno].shared->buckets != InvalidDsaPointer);
    3495             : 
    3496        1816 :     hashtable->curbatch = batchno;
    3497        1816 :     hashtable->buckets.shared = (dsa_pointer_atomic *)
    3498        1816 :         dsa_get_address(hashtable->area,
    3499        1816 :                         hashtable->batches[batchno].shared->buckets);
    3500        1816 :     hashtable->nbuckets = hashtable->parallel_state->nbuckets;
    3501        1816 :     hashtable->log2_nbuckets = pg_ceil_log2_32(hashtable->nbuckets);
    3502        1816 :     hashtable->current_chunk = NULL;
    3503        1816 :     hashtable->current_chunk_shared = InvalidDsaPointer;
    3504        1816 :     hashtable->batches[batchno].at_least_one_chunk = false;
    3505        1816 : }
    3506             : 
    3507             : /*
    3508             :  * Take the next available chunk from the queue of chunks being worked on in
    3509             :  * parallel.  Return NULL if there are none left.  Otherwise return a pointer
    3510             :  * to the chunk, and set *shared to the DSA pointer to the chunk.
    3511             :  */
    3512             : static HashMemoryChunk
    3513        1168 : ExecParallelHashPopChunkQueue(HashJoinTable hashtable, dsa_pointer *shared)
    3514             : {
    3515        1168 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    3516             :     HashMemoryChunk chunk;
    3517             : 
    3518        1168 :     LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
    3519        1168 :     if (DsaPointerIsValid(pstate->chunk_work_queue))
    3520             :     {
    3521         976 :         *shared = pstate->chunk_work_queue;
    3522             :         chunk = (HashMemoryChunk)
    3523         976 :             dsa_get_address(hashtable->area, *shared);
    3524         976 :         pstate->chunk_work_queue = chunk->next.shared;
    3525             :     }
    3526             :     else
    3527         192 :         chunk = NULL;
    3528        1168 :     LWLockRelease(&pstate->lock);
    3529             : 
    3530        1168 :     return chunk;
    3531             : }
    3532             : 
    3533             : /*
    3534             :  * Increase the space preallocated in this backend for a given inner batch by
    3535             :  * at least a given amount.  This allows us to track whether a given batch
    3536             :  * would fit in memory when loaded back in.  Also increase the number of
    3537             :  * batches or buckets if required.
    3538             :  *
    3539             :  * This maintains a running estimation of how much space will be taken when we
    3540             :  * load the batch back into memory by simulating the way chunks will be handed
    3541             :  * out to workers.  It's not perfectly accurate because the tuples will be
    3542             :  * packed into memory chunks differently by ExecParallelHashTupleAlloc(), but
    3543             :  * it should be pretty close.  It tends to overestimate by a fraction of a
    3544             :  * chunk per worker since all workers gang up to preallocate during hashing,
    3545             :  * but workers tend to reload batches alone if there are enough to go around,
    3546             :  * leaving fewer partially filled chunks.  This effect is bounded by
    3547             :  * nparticipants.
    3548             :  *
    3549             :  * Return false if the number of batches or buckets has changed, and the
    3550             :  * caller should reconsider which batch a given tuple now belongs in and call
    3551             :  * again.
    3552             :  */
    3553             : static bool
    3554        1528 : ExecParallelHashTuplePrealloc(HashJoinTable hashtable, int batchno, size_t size)
    3555             : {
    3556        1528 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    3557        1528 :     ParallelHashJoinBatchAccessor *batch = &hashtable->batches[batchno];
    3558        1528 :     size_t      want = Max(size, HASH_CHUNK_SIZE - HASH_CHUNK_HEADER_SIZE);
    3559             : 
    3560             :     Assert(batchno > 0);
    3561             :     Assert(batchno < hashtable->nbatch);
    3562             :     Assert(size == MAXALIGN(size));
    3563             : 
    3564        1528 :     LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
    3565             : 
    3566             :     /* Has another participant commanded us to help grow? */
    3567        1528 :     if (pstate->growth == PHJ_GROWTH_NEED_MORE_BATCHES ||
    3568        1504 :         pstate->growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
    3569             :     {
    3570          24 :         ParallelHashGrowth growth = pstate->growth;
    3571             : 
    3572          24 :         LWLockRelease(&pstate->lock);
    3573          24 :         if (growth == PHJ_GROWTH_NEED_MORE_BATCHES)
    3574          24 :             ExecParallelHashIncreaseNumBatches(hashtable);
    3575           0 :         else if (growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
    3576           0 :             ExecParallelHashIncreaseNumBuckets(hashtable);
    3577             : 
    3578          24 :         return false;
    3579             :     }
    3580             : 
    3581        1504 :     if (pstate->growth != PHJ_GROWTH_DISABLED &&
    3582        1276 :         batch->at_least_one_chunk &&
    3583         620 :         (batch->shared->estimated_size + want + HASH_CHUNK_HEADER_SIZE
    3584         620 :          > pstate->space_allowed))
    3585             :     {
    3586             :         /*
    3587             :          * We have determined that this batch would exceed the space budget if
    3588             :          * loaded into memory.  Command all participants to help repartition.
    3589             :          */
    3590          16 :         batch->shared->space_exhausted = true;
    3591          16 :         pstate->growth = PHJ_GROWTH_NEED_MORE_BATCHES;
    3592          16 :         LWLockRelease(&pstate->lock);
    3593             : 
    3594          16 :         return false;
    3595             :     }
    3596             : 
    3597        1488 :     batch->at_least_one_chunk = true;
    3598        1488 :     batch->shared->estimated_size += want + HASH_CHUNK_HEADER_SIZE;
    3599        1488 :     batch->preallocated = want;
    3600        1488 :     LWLockRelease(&pstate->lock);
    3601             : 
    3602        1488 :     return true;
    3603             : }
    3604             : 
    3605             : /*
    3606             :  * Calculate the limit on how much memory can be used by Hash and similar
    3607             :  * plan types.  This is work_mem times hash_mem_multiplier, and is
    3608             :  * expressed in bytes.
    3609             :  *
    3610             :  * Exported for use by the planner, as well as other hash-like executor
    3611             :  * nodes.  This is a rather random place for this, but there is no better
    3612             :  * place.
    3613             :  */
    3614             : size_t
    3615     1729636 : get_hash_memory_limit(void)
    3616             : {
    3617             :     double      mem_limit;
    3618             : 
    3619             :     /* Do initial calculation in double arithmetic */
    3620     1729636 :     mem_limit = (double) work_mem * hash_mem_multiplier * 1024.0;
    3621             : 
    3622             :     /* Clamp in case it doesn't fit in size_t */
    3623     1729636 :     mem_limit = Min(mem_limit, (double) SIZE_MAX);
    3624             : 
    3625     1729636 :     return (size_t) mem_limit;
    3626             : }

Generated by: LCOV version 1.16