LCOV - code coverage report
Current view: top level - src/backend/executor - nodeHash.c (source / functions) Coverage Total Hit
Test: PostgreSQL 19devel Lines: 94.8 % 1109 1051
Test Date: 2026-03-01 08:15:02 Functions: 98.2 % 55 54
Legend: Lines:     hit not hit

            Line data    Source code
       1              : /*-------------------------------------------------------------------------
       2              :  *
       3              :  * nodeHash.c
       4              :  *    Routines to hash relations for hashjoin
       5              :  *
       6              :  * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
       7              :  * Portions Copyright (c) 1994, Regents of the University of California
       8              :  *
       9              :  *
      10              :  * IDENTIFICATION
      11              :  *    src/backend/executor/nodeHash.c
      12              :  *
      13              :  * See note on parallelism in nodeHashjoin.c.
      14              :  *
      15              :  *-------------------------------------------------------------------------
      16              :  */
      17              : /*
      18              :  * INTERFACE ROUTINES
      19              :  *      MultiExecHash   - generate an in-memory hash table of the relation
      20              :  *      ExecInitHash    - initialize node and subnodes
      21              :  *      ExecEndHash     - shutdown node and subnodes
      22              :  */
      23              : 
      24              : #include "postgres.h"
      25              : 
      26              : #include <math.h>
      27              : #include <limits.h>
      28              : 
      29              : #include "access/htup_details.h"
      30              : #include "access/parallel.h"
      31              : #include "catalog/pg_statistic.h"
      32              : #include "commands/tablespace.h"
      33              : #include "executor/executor.h"
      34              : #include "executor/hashjoin.h"
      35              : #include "executor/nodeHash.h"
      36              : #include "executor/nodeHashjoin.h"
      37              : #include "miscadmin.h"
      38              : #include "port/pg_bitutils.h"
      39              : #include "utils/lsyscache.h"
      40              : #include "utils/memutils.h"
      41              : #include "utils/syscache.h"
      42              : #include "utils/wait_event.h"
      43              : 
      44              : static void ExecHashIncreaseNumBatches(HashJoinTable hashtable);
      45              : static void ExecHashIncreaseNumBuckets(HashJoinTable hashtable);
      46              : static void ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable);
      47              : static void ExecParallelHashIncreaseNumBuckets(HashJoinTable hashtable);
      48              : static void ExecHashBuildSkewHash(HashState *hashstate,
      49              :                                   HashJoinTable hashtable, Hash *node,
      50              :                                   int mcvsToUse);
      51              : static void ExecHashSkewTableInsert(HashJoinTable hashtable,
      52              :                                     TupleTableSlot *slot,
      53              :                                     uint32 hashvalue,
      54              :                                     int bucketNumber);
      55              : static void ExecHashRemoveNextSkewBucket(HashJoinTable hashtable);
      56              : 
      57              : static void *dense_alloc(HashJoinTable hashtable, Size size);
      58              : static HashJoinTuple ExecParallelHashTupleAlloc(HashJoinTable hashtable,
      59              :                                                 size_t size,
      60              :                                                 dsa_pointer *shared);
      61              : static void MultiExecPrivateHash(HashState *node);
      62              : static void MultiExecParallelHash(HashState *node);
      63              : static inline HashJoinTuple ExecParallelHashFirstTuple(HashJoinTable hashtable,
      64              :                                                        int bucketno);
      65              : static inline HashJoinTuple ExecParallelHashNextTuple(HashJoinTable hashtable,
      66              :                                                       HashJoinTuple tuple);
      67              : static inline void ExecParallelHashPushTuple(dsa_pointer_atomic *head,
      68              :                                              HashJoinTuple tuple,
      69              :                                              dsa_pointer tuple_shared);
      70              : static void ExecParallelHashJoinSetUpBatches(HashJoinTable hashtable, int nbatch);
      71              : static void ExecParallelHashEnsureBatchAccessors(HashJoinTable hashtable);
      72              : static void ExecParallelHashRepartitionFirst(HashJoinTable hashtable);
      73              : static void ExecParallelHashRepartitionRest(HashJoinTable hashtable);
      74              : static HashMemoryChunk ExecParallelHashPopChunkQueue(HashJoinTable hashtable,
      75              :                                                      dsa_pointer *shared);
      76              : static bool ExecParallelHashTuplePrealloc(HashJoinTable hashtable,
      77              :                                           int batchno,
      78              :                                           size_t size);
      79              : static void ExecParallelHashMergeCounters(HashJoinTable hashtable);
      80              : static void ExecParallelHashCloseBatchAccessors(HashJoinTable hashtable);
      81              : 
      82              : 
      83              : /* ----------------------------------------------------------------
      84              :  *      ExecHash
      85              :  *
      86              :  *      stub for pro forma compliance
      87              :  * ----------------------------------------------------------------
      88              :  */
      89              : static TupleTableSlot *
      90            0 : ExecHash(PlanState *pstate)
      91              : {
      92            0 :     elog(ERROR, "Hash node does not support ExecProcNode call convention");
      93              :     return NULL;
      94              : }
      95              : 
      96              : /* ----------------------------------------------------------------
      97              :  *      MultiExecHash
      98              :  *
      99              :  *      build hash table for hashjoin, doing partitioning if more
     100              :  *      than one batch is required.
     101              :  * ----------------------------------------------------------------
     102              :  */
     103              : Node *
     104        13748 : MultiExecHash(HashState *node)
     105              : {
     106              :     /* must provide our own instrumentation support */
     107        13748 :     if (node->ps.instrument)
     108          173 :         InstrStartNode(node->ps.instrument);
     109              : 
     110        13748 :     if (node->parallel_state != NULL)
     111          207 :         MultiExecParallelHash(node);
     112              :     else
     113        13541 :         MultiExecPrivateHash(node);
     114              : 
     115              :     /* must provide our own instrumentation support */
     116        13748 :     if (node->ps.instrument)
     117          173 :         InstrStopNode(node->ps.instrument, node->hashtable->partialTuples);
     118              : 
     119              :     /*
     120              :      * We do not return the hash table directly because it's not a subtype of
     121              :      * Node, and so would violate the MultiExecProcNode API.  Instead, our
     122              :      * parent Hashjoin node is expected to know how to fish it out of our node
     123              :      * state.  Ugly but not really worth cleaning up, since Hashjoin knows
     124              :      * quite a bit more about Hash besides that.
     125              :      */
     126        13748 :     return NULL;
     127              : }
     128              : 
     129              : /* ----------------------------------------------------------------
     130              :  *      MultiExecPrivateHash
     131              :  *
     132              :  *      parallel-oblivious version, building a backend-private
     133              :  *      hash table and (if necessary) batch files.
     134              :  * ----------------------------------------------------------------
     135              :  */
     136              : static void
     137        13541 : MultiExecPrivateHash(HashState *node)
     138              : {
     139              :     PlanState  *outerNode;
     140              :     HashJoinTable hashtable;
     141              :     TupleTableSlot *slot;
     142              :     ExprContext *econtext;
     143              : 
     144              :     /*
     145              :      * get state info from node
     146              :      */
     147        13541 :     outerNode = outerPlanState(node);
     148        13541 :     hashtable = node->hashtable;
     149              : 
     150              :     /*
     151              :      * set expression context
     152              :      */
     153        13541 :     econtext = node->ps.ps_ExprContext;
     154              : 
     155              :     /*
     156              :      * Get all tuples from the node below the Hash node and insert into the
     157              :      * hash table (or temp files).
     158              :      */
     159              :     for (;;)
     160      4439309 :     {
     161              :         bool        isnull;
     162              :         Datum       hashdatum;
     163              : 
     164      4452850 :         slot = ExecProcNode(outerNode);
     165      4452850 :         if (TupIsNull(slot))
     166              :             break;
     167              :         /* We have to compute the hash value */
     168      4439309 :         econtext->ecxt_outertuple = slot;
     169              : 
     170      4439309 :         ResetExprContext(econtext);
     171              : 
     172      4439309 :         hashdatum = ExecEvalExprSwitchContext(node->hash_expr, econtext,
     173              :                                               &isnull);
     174              : 
     175      4439309 :         if (!isnull)
     176              :         {
     177      4439265 :             uint32      hashvalue = DatumGetUInt32(hashdatum);
     178              :             int         bucketNumber;
     179              : 
     180      4439265 :             bucketNumber = ExecHashGetSkewBucket(hashtable, hashvalue);
     181      4439265 :             if (bucketNumber != INVALID_SKEW_BUCKET_NO)
     182              :             {
     183              :                 /* It's a skew tuple, so put it into that hash table */
     184          294 :                 ExecHashSkewTableInsert(hashtable, slot, hashvalue,
     185              :                                         bucketNumber);
     186          294 :                 hashtable->skewTuples += 1;
     187              :             }
     188              :             else
     189              :             {
     190              :                 /* Not subject to skew optimization, so insert normally */
     191      4438971 :                 ExecHashTableInsert(hashtable, slot, hashvalue);
     192              :             }
     193      4439265 :             hashtable->totalTuples += 1;
     194              :         }
     195              :     }
     196              : 
     197              :     /* resize the hash table if needed (NTUP_PER_BUCKET exceeded) */
     198        13541 :     if (hashtable->nbuckets != hashtable->nbuckets_optimal)
     199           42 :         ExecHashIncreaseNumBuckets(hashtable);
     200              : 
     201              :     /* Account for the buckets in spaceUsed (reported in EXPLAIN ANALYZE) */
     202        13541 :     hashtable->spaceUsed += hashtable->nbuckets * sizeof(HashJoinTuple);
     203        13541 :     if (hashtable->spaceUsed > hashtable->spacePeak)
     204        13519 :         hashtable->spacePeak = hashtable->spaceUsed;
     205              : 
     206        13541 :     hashtable->partialTuples = hashtable->totalTuples;
     207        13541 : }
     208              : 
     209              : /* ----------------------------------------------------------------
     210              :  *      MultiExecParallelHash
     211              :  *
     212              :  *      parallel-aware version, building a shared hash table and
     213              :  *      (if necessary) batch files using the combined effort of
     214              :  *      a set of co-operating backends.
     215              :  * ----------------------------------------------------------------
     216              :  */
     217              : static void
     218          207 : MultiExecParallelHash(HashState *node)
     219              : {
     220              :     ParallelHashJoinState *pstate;
     221              :     PlanState  *outerNode;
     222              :     HashJoinTable hashtable;
     223              :     TupleTableSlot *slot;
     224              :     ExprContext *econtext;
     225              :     uint32      hashvalue;
     226              :     Barrier    *build_barrier;
     227              :     int         i;
     228              : 
     229              :     /*
     230              :      * get state info from node
     231              :      */
     232          207 :     outerNode = outerPlanState(node);
     233          207 :     hashtable = node->hashtable;
     234              : 
     235              :     /*
     236              :      * set expression context
     237              :      */
     238          207 :     econtext = node->ps.ps_ExprContext;
     239              : 
     240              :     /*
     241              :      * Synchronize the parallel hash table build.  At this stage we know that
     242              :      * the shared hash table has been or is being set up by
     243              :      * ExecHashTableCreate(), but we don't know if our peers have returned
     244              :      * from there or are here in MultiExecParallelHash(), and if so how far
     245              :      * through they are.  To find out, we check the build_barrier phase then
     246              :      * and jump to the right step in the build algorithm.
     247              :      */
     248          207 :     pstate = hashtable->parallel_state;
     249          207 :     build_barrier = &pstate->build_barrier;
     250              :     Assert(BarrierPhase(build_barrier) >= PHJ_BUILD_ALLOCATE);
     251          207 :     switch (BarrierPhase(build_barrier))
     252              :     {
     253           90 :         case PHJ_BUILD_ALLOCATE:
     254              : 
     255              :             /*
     256              :              * Either I just allocated the initial hash table in
     257              :              * ExecHashTableCreate(), or someone else is doing that.  Either
     258              :              * way, wait for everyone to arrive here so we can proceed.
     259              :              */
     260           90 :             BarrierArriveAndWait(build_barrier, WAIT_EVENT_HASH_BUILD_ALLOCATE);
     261              :             pg_fallthrough;
     262              : 
     263          168 :         case PHJ_BUILD_HASH_INNER:
     264              : 
     265              :             /*
     266              :              * It's time to begin hashing, or if we just arrived here then
     267              :              * hashing is already underway, so join in that effort.  While
     268              :              * hashing we have to be prepared to help increase the number of
     269              :              * batches or buckets at any time, and if we arrived here when
     270              :              * that was already underway we'll have to help complete that work
     271              :              * immediately so that it's safe to access batches and buckets
     272              :              * below.
     273              :              */
     274          168 :             if (PHJ_GROW_BATCHES_PHASE(BarrierAttach(&pstate->grow_batches_barrier)) !=
     275              :                 PHJ_GROW_BATCHES_ELECT)
     276            2 :                 ExecParallelHashIncreaseNumBatches(hashtable);
     277          168 :             if (PHJ_GROW_BUCKETS_PHASE(BarrierAttach(&pstate->grow_buckets_barrier)) !=
     278              :                 PHJ_GROW_BUCKETS_ELECT)
     279            0 :                 ExecParallelHashIncreaseNumBuckets(hashtable);
     280          168 :             ExecParallelHashEnsureBatchAccessors(hashtable);
     281          168 :             ExecParallelHashTableSetCurrentBatch(hashtable, 0);
     282              :             for (;;)
     283      1080096 :             {
     284              :                 bool        isnull;
     285              : 
     286      1080264 :                 slot = ExecProcNode(outerNode);
     287      1080264 :                 if (TupIsNull(slot))
     288              :                     break;
     289      1080096 :                 econtext->ecxt_outertuple = slot;
     290              : 
     291      1080096 :                 ResetExprContext(econtext);
     292              : 
     293      1080096 :                 hashvalue = DatumGetUInt32(ExecEvalExprSwitchContext(node->hash_expr,
     294              :                                                                      econtext,
     295              :                                                                      &isnull));
     296              : 
     297      1080096 :                 if (!isnull)
     298      1080096 :                     ExecParallelHashTableInsert(hashtable, slot, hashvalue);
     299      1080096 :                 hashtable->partialTuples++;
     300              :             }
     301              : 
     302              :             /*
     303              :              * Make sure that any tuples we wrote to disk are visible to
     304              :              * others before anyone tries to load them.
     305              :              */
     306          905 :             for (i = 0; i < hashtable->nbatch; ++i)
     307          737 :                 sts_end_write(hashtable->batches[i].inner_tuples);
     308              : 
     309              :             /*
     310              :              * Update shared counters.  We need an accurate total tuple count
     311              :              * to control the empty table optimization.
     312              :              */
     313          168 :             ExecParallelHashMergeCounters(hashtable);
     314              : 
     315          168 :             BarrierDetach(&pstate->grow_buckets_barrier);
     316          168 :             BarrierDetach(&pstate->grow_batches_barrier);
     317              : 
     318              :             /*
     319              :              * Wait for everyone to finish building and flushing files and
     320              :              * counters.
     321              :              */
     322          168 :             if (BarrierArriveAndWait(build_barrier,
     323              :                                      WAIT_EVENT_HASH_BUILD_HASH_INNER))
     324              :             {
     325              :                 /*
     326              :                  * Elect one backend to disable any further growth.  Batches
     327              :                  * are now fixed.  While building them we made sure they'd fit
     328              :                  * in our memory budget when we load them back in later (or we
     329              :                  * tried to do that and gave up because we detected extreme
     330              :                  * skew).
     331              :                  */
     332           87 :                 pstate->growth = PHJ_GROWTH_DISABLED;
     333              :             }
     334              :     }
     335              : 
     336              :     /*
     337              :      * We're not yet attached to a batch.  We all agree on the dimensions and
     338              :      * number of inner tuples (for the empty table optimization).
     339              :      */
     340          207 :     hashtable->curbatch = -1;
     341          207 :     hashtable->nbuckets = pstate->nbuckets;
     342          207 :     hashtable->log2_nbuckets = pg_ceil_log2_32(hashtable->nbuckets);
     343          207 :     hashtable->totalTuples = pstate->total_tuples;
     344              : 
     345              :     /*
     346              :      * Unless we're completely done and the batch state has been freed, make
     347              :      * sure we have accessors.
     348              :      */
     349          207 :     if (BarrierPhase(build_barrier) < PHJ_BUILD_FREE)
     350          207 :         ExecParallelHashEnsureBatchAccessors(hashtable);
     351              : 
     352              :     /*
     353              :      * The next synchronization point is in ExecHashJoin's HJ_BUILD_HASHTABLE
     354              :      * case, which will bring the build phase to PHJ_BUILD_RUN (if it isn't
     355              :      * there already).
     356              :      */
     357              :     Assert(BarrierPhase(build_barrier) == PHJ_BUILD_HASH_OUTER ||
     358              :            BarrierPhase(build_barrier) == PHJ_BUILD_RUN ||
     359              :            BarrierPhase(build_barrier) == PHJ_BUILD_FREE);
     360          207 : }
     361              : 
     362              : /* ----------------------------------------------------------------
     363              :  *      ExecInitHash
     364              :  *
     365              :  *      Init routine for Hash node
     366              :  * ----------------------------------------------------------------
     367              :  */
     368              : HashState *
     369        18692 : ExecInitHash(Hash *node, EState *estate, int eflags)
     370              : {
     371              :     HashState  *hashstate;
     372              : 
     373              :     /* check for unsupported flags */
     374              :     Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
     375              : 
     376              :     /*
     377              :      * create state structure
     378              :      */
     379        18692 :     hashstate = makeNode(HashState);
     380        18692 :     hashstate->ps.plan = (Plan *) node;
     381        18692 :     hashstate->ps.state = estate;
     382        18692 :     hashstate->ps.ExecProcNode = ExecHash;
     383              :     /* delay building hashtable until ExecHashTableCreate() in executor run */
     384        18692 :     hashstate->hashtable = NULL;
     385              : 
     386              :     /*
     387              :      * Miscellaneous initialization
     388              :      *
     389              :      * create expression context for node
     390              :      */
     391        18692 :     ExecAssignExprContext(estate, &hashstate->ps);
     392              : 
     393              :     /*
     394              :      * initialize child nodes
     395              :      */
     396        18692 :     outerPlanState(hashstate) = ExecInitNode(outerPlan(node), estate, eflags);
     397              : 
     398              :     /*
     399              :      * initialize our result slot and type. No need to build projection
     400              :      * because this node doesn't do projections.
     401              :      */
     402        18692 :     ExecInitResultTupleSlotTL(&hashstate->ps, &TTSOpsMinimalTuple);
     403        18692 :     hashstate->ps.ps_ProjInfo = NULL;
     404              : 
     405              :     Assert(node->plan.qual == NIL);
     406              : 
     407              :     /*
     408              :      * Delay initialization of hash_expr until ExecInitHashJoin().  We cannot
     409              :      * build the ExprState here as we don't yet know the join type we're going
     410              :      * to be hashing values for and we need to know that before calling
     411              :      * ExecBuildHash32Expr as the keep_nulls parameter depends on the join
     412              :      * type.
     413              :      */
     414        18692 :     hashstate->hash_expr = NULL;
     415              : 
     416        18692 :     return hashstate;
     417              : }
     418              : 
     419              : /* ---------------------------------------------------------------
     420              :  *      ExecEndHash
     421              :  *
     422              :  *      clean up routine for Hash node
     423              :  * ----------------------------------------------------------------
     424              :  */
     425              : void
     426        18636 : ExecEndHash(HashState *node)
     427              : {
     428              :     PlanState  *outerPlan;
     429              : 
     430              :     /*
     431              :      * shut down the subplan
     432              :      */
     433        18636 :     outerPlan = outerPlanState(node);
     434        18636 :     ExecEndNode(outerPlan);
     435        18636 : }
     436              : 
     437              : 
     438              : /* ----------------------------------------------------------------
     439              :  *      ExecHashTableCreate
     440              :  *
     441              :  *      create an empty hashtable data structure for hashjoin.
     442              :  * ----------------------------------------------------------------
     443              :  */
     444              : HashJoinTable
     445        13748 : ExecHashTableCreate(HashState *state)
     446              : {
     447              :     Hash       *node;
     448              :     HashJoinTable hashtable;
     449              :     Plan       *outerNode;
     450              :     size_t      space_allowed;
     451              :     int         nbuckets;
     452              :     int         nbatch;
     453              :     double      rows;
     454              :     int         num_skew_mcvs;
     455              :     int         log2_nbuckets;
     456              :     MemoryContext oldcxt;
     457              : 
     458              :     /*
     459              :      * Get information about the size of the relation to be hashed (it's the
     460              :      * "outer" subtree of this node, but the inner relation of the hashjoin).
     461              :      * Compute the appropriate size of the hash table.
     462              :      */
     463        13748 :     node = (Hash *) state->ps.plan;
     464        13748 :     outerNode = outerPlan(node);
     465              : 
     466              :     /*
     467              :      * If this is shared hash table with a partial plan, then we can't use
     468              :      * outerNode->plan_rows to estimate its size.  We need an estimate of the
     469              :      * total number of rows across all copies of the partial plan.
     470              :      */
     471        13748 :     rows = node->plan.parallel_aware ? node->rows_total : outerNode->plan_rows;
     472              : 
     473        13541 :     ExecChooseHashTableSize(rows, outerNode->plan_width,
     474        13748 :                             OidIsValid(node->skewTable),
     475        13748 :                             state->parallel_state != NULL,
     476        13748 :                             state->parallel_state != NULL ?
     477          207 :                             state->parallel_state->nparticipants - 1 : 0,
     478              :                             &space_allowed,
     479              :                             &nbuckets, &nbatch, &num_skew_mcvs);
     480              : 
     481              :     /* nbuckets must be a power of 2 */
     482        13748 :     log2_nbuckets = pg_ceil_log2_32(nbuckets);
     483              :     Assert(nbuckets == (1 << log2_nbuckets));
     484              : 
     485              :     /*
     486              :      * Initialize the hash table control block.
     487              :      *
     488              :      * The hashtable control block is just palloc'd from the executor's
     489              :      * per-query memory context.  Everything else should be kept inside the
     490              :      * subsidiary hashCxt, batchCxt or spillCxt.
     491              :      */
     492        13748 :     hashtable = palloc_object(HashJoinTableData);
     493        13748 :     hashtable->nbuckets = nbuckets;
     494        13748 :     hashtable->nbuckets_original = nbuckets;
     495        13748 :     hashtable->nbuckets_optimal = nbuckets;
     496        13748 :     hashtable->log2_nbuckets = log2_nbuckets;
     497        13748 :     hashtable->log2_nbuckets_optimal = log2_nbuckets;
     498        13748 :     hashtable->buckets.unshared = NULL;
     499        13748 :     hashtable->skewEnabled = false;
     500        13748 :     hashtable->skewBucket = NULL;
     501        13748 :     hashtable->skewBucketLen = 0;
     502        13748 :     hashtable->nSkewBuckets = 0;
     503        13748 :     hashtable->skewBucketNums = NULL;
     504        13748 :     hashtable->nbatch = nbatch;
     505        13748 :     hashtable->curbatch = 0;
     506        13748 :     hashtable->nbatch_original = nbatch;
     507        13748 :     hashtable->nbatch_outstart = nbatch;
     508        13748 :     hashtable->growEnabled = true;
     509        13748 :     hashtable->totalTuples = 0;
     510        13748 :     hashtable->partialTuples = 0;
     511        13748 :     hashtable->skewTuples = 0;
     512        13748 :     hashtable->innerBatchFile = NULL;
     513        13748 :     hashtable->outerBatchFile = NULL;
     514        13748 :     hashtable->spaceUsed = 0;
     515        13748 :     hashtable->spacePeak = 0;
     516        13748 :     hashtable->spaceAllowed = space_allowed;
     517        13748 :     hashtable->spaceUsedSkew = 0;
     518        13748 :     hashtable->spaceAllowedSkew =
     519        13748 :         hashtable->spaceAllowed * SKEW_HASH_MEM_PERCENT / 100;
     520        13748 :     hashtable->chunks = NULL;
     521        13748 :     hashtable->current_chunk = NULL;
     522        13748 :     hashtable->parallel_state = state->parallel_state;
     523        13748 :     hashtable->area = state->ps.state->es_query_dsa;
     524        13748 :     hashtable->batches = NULL;
     525              : 
     526              : #ifdef HJDEBUG
     527              :     printf("Hashjoin %p: initial nbatch = %d, nbuckets = %d\n",
     528              :            hashtable, nbatch, nbuckets);
     529              : #endif
     530              : 
     531              :     /*
     532              :      * Create temporary memory contexts in which to keep the hashtable working
     533              :      * storage.  See notes in executor/hashjoin.h.
     534              :      */
     535        13748 :     hashtable->hashCxt = AllocSetContextCreate(CurrentMemoryContext,
     536              :                                                "HashTableContext",
     537              :                                                ALLOCSET_DEFAULT_SIZES);
     538              : 
     539        13748 :     hashtable->batchCxt = AllocSetContextCreate(hashtable->hashCxt,
     540              :                                                 "HashBatchContext",
     541              :                                                 ALLOCSET_DEFAULT_SIZES);
     542              : 
     543        13748 :     hashtable->spillCxt = AllocSetContextCreate(hashtable->hashCxt,
     544              :                                                 "HashSpillContext",
     545              :                                                 ALLOCSET_DEFAULT_SIZES);
     546              : 
     547              :     /* Allocate data that will live for the life of the hashjoin */
     548              : 
     549        13748 :     oldcxt = MemoryContextSwitchTo(hashtable->hashCxt);
     550              : 
     551        13748 :     if (nbatch > 1 && hashtable->parallel_state == NULL)
     552              :     {
     553              :         MemoryContext oldctx;
     554              : 
     555              :         /*
     556              :          * allocate and initialize the file arrays in hashCxt (not needed for
     557              :          * parallel case which uses shared tuplestores instead of raw files)
     558              :          */
     559           63 :         oldctx = MemoryContextSwitchTo(hashtable->spillCxt);
     560              : 
     561           63 :         hashtable->innerBatchFile = palloc0_array(BufFile *, nbatch);
     562           63 :         hashtable->outerBatchFile = palloc0_array(BufFile *, nbatch);
     563              : 
     564           63 :         MemoryContextSwitchTo(oldctx);
     565              : 
     566              :         /* The files will not be opened until needed... */
     567              :         /* ... but make sure we have temp tablespaces established for them */
     568           63 :         PrepareTempTablespaces();
     569              :     }
     570              : 
     571        13748 :     MemoryContextSwitchTo(oldcxt);
     572              : 
     573        13748 :     if (hashtable->parallel_state)
     574              :     {
     575          207 :         ParallelHashJoinState *pstate = hashtable->parallel_state;
     576              :         Barrier    *build_barrier;
     577              : 
     578              :         /*
     579              :          * Attach to the build barrier.  The corresponding detach operation is
     580              :          * in ExecHashTableDetach.  Note that we won't attach to the
     581              :          * batch_barrier for batch 0 yet.  We'll attach later and start it out
     582              :          * in PHJ_BATCH_PROBE phase, because batch 0 is allocated up front and
     583              :          * then loaded while hashing (the standard hybrid hash join
     584              :          * algorithm), and we'll coordinate that using build_barrier.
     585              :          */
     586          207 :         build_barrier = &pstate->build_barrier;
     587          207 :         BarrierAttach(build_barrier);
     588              : 
     589              :         /*
     590              :          * So far we have no idea whether there are any other participants,
     591              :          * and if so, what phase they are working on.  The only thing we care
     592              :          * about at this point is whether someone has already created the
     593              :          * SharedHashJoinBatch objects and the hash table for batch 0.  One
     594              :          * backend will be elected to do that now if necessary.
     595              :          */
     596          294 :         if (BarrierPhase(build_barrier) == PHJ_BUILD_ELECT &&
     597           87 :             BarrierArriveAndWait(build_barrier, WAIT_EVENT_HASH_BUILD_ELECT))
     598              :         {
     599           87 :             pstate->nbatch = nbatch;
     600           87 :             pstate->space_allowed = space_allowed;
     601           87 :             pstate->growth = PHJ_GROWTH_OK;
     602              : 
     603              :             /* Set up the shared state for coordinating batches. */
     604           87 :             ExecParallelHashJoinSetUpBatches(hashtable, nbatch);
     605              : 
     606              :             /*
     607              :              * Allocate batch 0's hash table up front so we can load it
     608              :              * directly while hashing.
     609              :              */
     610           87 :             pstate->nbuckets = nbuckets;
     611           87 :             ExecParallelHashTableAlloc(hashtable, 0);
     612              :         }
     613              : 
     614              :         /*
     615              :          * The next Parallel Hash synchronization point is in
     616              :          * MultiExecParallelHash(), which will progress it all the way to
     617              :          * PHJ_BUILD_RUN.  The caller must not return control from this
     618              :          * executor node between now and then.
     619              :          */
     620              :     }
     621              :     else
     622              :     {
     623              :         /*
     624              :          * Prepare context for the first-scan space allocations; allocate the
     625              :          * hashbucket array therein, and set each bucket "empty".
     626              :          */
     627        13541 :         MemoryContextSwitchTo(hashtable->batchCxt);
     628              : 
     629        13541 :         hashtable->buckets.unshared = palloc0_array(HashJoinTuple, nbuckets);
     630              : 
     631              :         /*
     632              :          * Set up for skew optimization, if possible and there's a need for
     633              :          * more than one batch.  (In a one-batch join, there's no point in
     634              :          * it.)
     635              :          */
     636        13541 :         if (nbatch > 1)
     637           63 :             ExecHashBuildSkewHash(state, hashtable, node, num_skew_mcvs);
     638              : 
     639        13541 :         MemoryContextSwitchTo(oldcxt);
     640              :     }
     641              : 
     642        13748 :     return hashtable;
     643              : }
     644              : 
     645              : 
     646              : /*
     647              :  * Compute appropriate size for hashtable given the estimated size of the
     648              :  * relation to be hashed (number of rows and average row width).
     649              :  *
     650              :  * This is exported so that the planner's costsize.c can use it.
     651              :  */
     652              : 
     653              : /* Target bucket loading (tuples per bucket) */
     654              : #define NTUP_PER_BUCKET         1
     655              : 
     656              : void
     657       500589 : ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
     658              :                         bool try_combined_hash_mem,
     659              :                         int parallel_workers,
     660              :                         size_t *space_allowed,
     661              :                         int *numbuckets,
     662              :                         int *numbatches,
     663              :                         int *num_skew_mcvs)
     664              : {
     665              :     int         tupsize;
     666              :     double      inner_rel_bytes;
     667              :     size_t      hash_table_bytes;
     668              :     size_t      bucket_bytes;
     669              :     size_t      max_pointers;
     670       500589 :     int         nbatch = 1;
     671              :     int         nbuckets;
     672              :     double      dbuckets;
     673              : 
     674              :     /* Force a plausible relation size if no info */
     675       500589 :     if (ntuples <= 0.0)
     676           75 :         ntuples = 1000.0;
     677              : 
     678              :     /*
     679              :      * Estimate tupsize based on footprint of tuple in hashtable... note this
     680              :      * does not allow for any palloc overhead.  The manipulations of spaceUsed
     681              :      * don't count palloc overhead either.
     682              :      */
     683       500589 :     tupsize = HJTUPLE_OVERHEAD +
     684       500589 :         MAXALIGN(SizeofMinimalTupleHeader) +
     685       500589 :         MAXALIGN(tupwidth);
     686       500589 :     inner_rel_bytes = ntuples * tupsize;
     687              : 
     688              :     /*
     689              :      * Compute in-memory hashtable size limit from GUCs.
     690              :      */
     691       500589 :     hash_table_bytes = get_hash_memory_limit();
     692              : 
     693              :     /*
     694              :      * Parallel Hash tries to use the combined hash_mem of all workers to
     695              :      * avoid the need to batch.  If that won't work, it falls back to hash_mem
     696              :      * per worker and tries to process batches in parallel.
     697              :      */
     698       500589 :     if (try_combined_hash_mem)
     699              :     {
     700              :         /* Careful, this could overflow size_t */
     701              :         double      newlimit;
     702              : 
     703        37800 :         newlimit = (double) hash_table_bytes * (double) (parallel_workers + 1);
     704        37800 :         newlimit = Min(newlimit, (double) SIZE_MAX);
     705        37800 :         hash_table_bytes = (size_t) newlimit;
     706              :     }
     707              : 
     708       500589 :     *space_allowed = hash_table_bytes;
     709              : 
     710              :     /*
     711              :      * If skew optimization is possible, estimate the number of skew buckets
     712              :      * that will fit in the memory allowed, and decrement the assumed space
     713              :      * available for the main hash table accordingly.
     714              :      *
     715              :      * We make the optimistic assumption that each skew bucket will contain
     716              :      * one inner-relation tuple.  If that turns out to be low, we will recover
     717              :      * at runtime by reducing the number of skew buckets.
     718              :      *
     719              :      * hashtable->skewBucket will have up to 8 times as many HashSkewBucket
     720              :      * pointers as the number of MCVs we allow, since ExecHashBuildSkewHash
     721              :      * will round up to the next power of 2 and then multiply by 4 to reduce
     722              :      * collisions.
     723              :      */
     724       500589 :     if (useskew)
     725              :     {
     726              :         size_t      bytes_per_mcv;
     727              :         size_t      skew_mcvs;
     728              : 
     729              :         /*----------
     730              :          * Compute number of MCVs we could hold in hash_table_bytes
     731              :          *
     732              :          * Divisor is:
     733              :          * size of a hash tuple +
     734              :          * worst-case size of skewBucket[] per MCV +
     735              :          * size of skewBucketNums[] entry +
     736              :          * size of skew bucket struct itself
     737              :          *----------
     738              :          */
     739       497598 :         bytes_per_mcv = tupsize +
     740              :             (8 * sizeof(HashSkewBucket *)) +
     741       497598 :             sizeof(int) +
     742              :             SKEW_BUCKET_OVERHEAD;
     743       497598 :         skew_mcvs = hash_table_bytes / bytes_per_mcv;
     744              : 
     745              :         /*
     746              :          * Now scale by SKEW_HASH_MEM_PERCENT (we do it in this order so as
     747              :          * not to worry about size_t overflow in the multiplication)
     748              :          */
     749       497598 :         skew_mcvs = (skew_mcvs * SKEW_HASH_MEM_PERCENT) / 100;
     750              : 
     751              :         /* Now clamp to integer range */
     752       497598 :         skew_mcvs = Min(skew_mcvs, INT_MAX);
     753              : 
     754       497598 :         *num_skew_mcvs = (int) skew_mcvs;
     755              : 
     756              :         /* Reduce hash_table_bytes by the amount needed for the skew table */
     757       497598 :         if (skew_mcvs > 0)
     758       497598 :             hash_table_bytes -= skew_mcvs * bytes_per_mcv;
     759              :     }
     760              :     else
     761         2991 :         *num_skew_mcvs = 0;
     762              : 
     763              :     /*
     764              :      * Set nbuckets to achieve an average bucket load of NTUP_PER_BUCKET when
     765              :      * memory is filled, assuming a single batch; but limit the value so that
     766              :      * the pointer arrays we'll try to allocate do not exceed hash_table_bytes
     767              :      * nor MaxAllocSize.
     768              :      *
     769              :      * Note that both nbuckets and nbatch must be powers of 2 to make
     770              :      * ExecHashGetBucketAndBatch fast.
     771              :      */
     772       500589 :     max_pointers = hash_table_bytes / sizeof(HashJoinTuple);
     773       500589 :     max_pointers = Min(max_pointers, MaxAllocSize / sizeof(HashJoinTuple));
     774              :     /* If max_pointers isn't a power of 2, must round it down to one */
     775       500589 :     max_pointers = pg_prevpower2_size_t(max_pointers);
     776              : 
     777              :     /* Also ensure we avoid integer overflow in nbatch and nbuckets */
     778              :     /* (this step is redundant given the current value of MaxAllocSize) */
     779       500589 :     max_pointers = Min(max_pointers, INT_MAX / 2 + 1);
     780              : 
     781       500589 :     dbuckets = ceil(ntuples / NTUP_PER_BUCKET);
     782       500589 :     dbuckets = Min(dbuckets, max_pointers);
     783       500589 :     nbuckets = (int) dbuckets;
     784              :     /* don't let nbuckets be really small, though ... */
     785       500589 :     nbuckets = Max(nbuckets, 1024);
     786              :     /* ... and force it to be a power of 2. */
     787       500589 :     nbuckets = pg_nextpower2_32(nbuckets);
     788              : 
     789              :     /*
     790              :      * If there's not enough space to store the projected number of tuples and
     791              :      * the required bucket headers, we will need multiple batches.
     792              :      */
     793       500589 :     bucket_bytes = sizeof(HashJoinTuple) * nbuckets;
     794       500589 :     if (inner_rel_bytes + bucket_bytes > hash_table_bytes)
     795              :     {
     796              :         /* We'll need multiple batches */
     797              :         size_t      sbuckets;
     798              :         double      dbatch;
     799              :         int         minbatch;
     800              :         size_t      bucket_size;
     801              : 
     802              :         /*
     803              :          * If Parallel Hash with combined hash_mem would still need multiple
     804              :          * batches, we'll have to fall back to regular hash_mem budget.
     805              :          */
     806         3043 :         if (try_combined_hash_mem)
     807              :         {
     808          123 :             ExecChooseHashTableSize(ntuples, tupwidth, useskew,
     809              :                                     false, parallel_workers,
     810              :                                     space_allowed,
     811              :                                     numbuckets,
     812              :                                     numbatches,
     813              :                                     num_skew_mcvs);
     814          123 :             return;
     815              :         }
     816              : 
     817              :         /*
     818              :          * Estimate the number of buckets we'll want to have when hash_mem is
     819              :          * entirely full.  Each bucket will contain a bucket pointer plus
     820              :          * NTUP_PER_BUCKET tuples, whose projected size already includes
     821              :          * overhead for the hash code, pointer to the next tuple, etc.
     822              :          */
     823         2920 :         bucket_size = (tupsize * NTUP_PER_BUCKET + sizeof(HashJoinTuple));
     824         2920 :         if (hash_table_bytes <= bucket_size)
     825            0 :             sbuckets = 1;       /* avoid pg_nextpower2_size_t(0) */
     826              :         else
     827         2920 :             sbuckets = pg_nextpower2_size_t(hash_table_bytes / bucket_size);
     828         2920 :         sbuckets = Min(sbuckets, max_pointers);
     829         2920 :         nbuckets = (int) sbuckets;
     830         2920 :         nbuckets = pg_nextpower2_32(nbuckets);
     831         2920 :         bucket_bytes = nbuckets * sizeof(HashJoinTuple);
     832              : 
     833              :         /*
     834              :          * Buckets are simple pointers to hashjoin tuples, while tupsize
     835              :          * includes the pointer, hash code, and MinimalTupleData.  So buckets
     836              :          * should never really exceed 25% of hash_mem (even for
     837              :          * NTUP_PER_BUCKET=1); except maybe for hash_mem values that are not
     838              :          * 2^N bytes, where we might get more because of doubling. So let's
     839              :          * look for 50% here.
     840              :          */
     841              :         Assert(bucket_bytes <= hash_table_bytes / 2);
     842              : 
     843              :         /* Calculate required number of batches. */
     844         2920 :         dbatch = ceil(inner_rel_bytes / (hash_table_bytes - bucket_bytes));
     845         2920 :         dbatch = Min(dbatch, max_pointers);
     846         2920 :         minbatch = (int) dbatch;
     847         2920 :         nbatch = pg_nextpower2_32(Max(2, minbatch));
     848              :     }
     849              : 
     850              :     /*
     851              :      * Optimize the total amount of memory consumed by the hash node.
     852              :      *
     853              :      * The nbatch calculation above focuses on the in-memory hash table,
     854              :      * assuming no per-batch overhead. But each batch may have two files, each
     855              :      * with a BLCKSZ buffer. For large nbatch values these buffers may use
     856              :      * significantly more memory than the hash table.
     857              :      *
     858              :      * The total memory usage may be expressed by this formula:
     859              :      *
     860              :      * (inner_rel_bytes / nbatch) + (2 * nbatch * BLCKSZ)
     861              :      *
     862              :      * where (inner_rel_bytes / nbatch) is the size of the in-memory hash
     863              :      * table and (2 * nbatch * BLCKSZ) is the amount of memory used by file
     864              :      * buffers.
     865              :      *
     866              :      * The nbatch calculation however ignores the second part. And for very
     867              :      * large inner_rel_bytes, there may be no nbatch that keeps total memory
     868              :      * usage under the budget (work_mem * hash_mem_multiplier). To deal with
     869              :      * that, we will adjust nbatch to minimize total memory consumption across
     870              :      * both the hashtable and file buffers.
     871              :      *
     872              :      * As we increase the size of the hashtable, the number of batches
     873              :      * decreases, and the total memory usage follows a U-shaped curve. We find
     874              :      * the minimum nbatch by "walking back" -- checking if halving nbatch
     875              :      * would lower the total memory usage. We stop when it no longer helps.
     876              :      *
     877              :      * We only reduce the number of batches. Adding batches reduces memory
     878              :      * usage only when most of the memory is used by the hash table, with
     879              :      * total memory usage within the limit or not far from it. We don't want
     880              :      * to start batching when not needed, even if that would reduce memory
     881              :      * usage.
     882              :      *
     883              :      * While growing the hashtable, we also adjust the number of buckets to
     884              :      * maintain a load factor of NTUP_PER_BUCKET while squeezing tuples back
     885              :      * from batches into the hashtable.
     886              :      *
     887              :      * Note that we can only change nbuckets during initial hashtable sizing.
     888              :      * Once we start building the hash, nbuckets is fixed (we may still grow
     889              :      * the hash table).
     890              :      *
     891              :      * We double several parameters (space_allowed, nbuckets, num_skew_mcvs),
     892              :      * which introduces a risk of overflow. We avoid this by exiting the loop.
     893              :      * We could do something smarter (e.g. capping nbuckets and continue), but
     894              :      * the complexity is not worth it. Such cases are extremely rare, and this
     895              :      * is a best-effort attempt to reduce memory usage.
     896              :      */
     897       500898 :     while (nbatch > 1)
     898              :     {
     899              :         /* Check that buckets won't overflow MaxAllocSize */
     900         3352 :         if (nbuckets > (MaxAllocSize / sizeof(HashJoinTuple) / 2))
     901            0 :             break;
     902              : 
     903              :         /* num_skew_mcvs should be less than nbuckets */
     904              :         Assert((*num_skew_mcvs) < (INT_MAX / 2));
     905              : 
     906              :         /*
     907              :          * Check that space_allowed won't overflow SIZE_MAX.
     908              :          *
     909              :          * We don't use hash_table_bytes here, because it does not include the
     910              :          * skew buckets. And we want to limit the overall memory limit.
     911              :          */
     912         3352 :         if ((*space_allowed) > (SIZE_MAX / 2))
     913            0 :             break;
     914              : 
     915              :         /*
     916              :          * Will halving the number of batches and doubling the size of the
     917              :          * hashtable reduce overall memory usage?
     918              :          *
     919              :          * This is the same as (S = space_allowed):
     920              :          *
     921              :          * (S + 2 * nbatch * BLCKSZ) < (S * 2 + nbatch * BLCKSZ)
     922              :          *
     923              :          * but avoiding intermediate overflow.
     924              :          */
     925         3352 :         if (nbatch < (*space_allowed) / BLCKSZ)
     926         2920 :             break;
     927              : 
     928              :         /*
     929              :          * MaxAllocSize is sufficiently small that we are not worried about
     930              :          * overflowing nbuckets.
     931              :          */
     932          432 :         nbuckets *= 2;
     933              : 
     934          432 :         *num_skew_mcvs = (*num_skew_mcvs) * 2;
     935          432 :         *space_allowed = (*space_allowed) * 2;
     936              : 
     937          432 :         nbatch /= 2;
     938              :     }
     939              : 
     940              :     Assert(nbuckets > 0);
     941              :     Assert(nbatch > 0);
     942              : 
     943       500466 :     *numbuckets = nbuckets;
     944       500466 :     *numbatches = nbatch;
     945              : }
     946              : 
     947              : 
     948              : /* ----------------------------------------------------------------
     949              :  *      ExecHashTableDestroy
     950              :  *
     951              :  *      destroy a hash table
     952              :  * ----------------------------------------------------------------
     953              :  */
     954              : void
     955        13693 : ExecHashTableDestroy(HashJoinTable hashtable)
     956              : {
     957              :     int         i;
     958              : 
     959              :     /*
     960              :      * Make sure all the temp files are closed.  We skip batch 0, since it
     961              :      * can't have any temp files (and the arrays might not even exist if
     962              :      * nbatch is only 1).  Parallel hash joins don't use these files.
     963              :      */
     964        13693 :     if (hashtable->innerBatchFile != NULL)
     965              :     {
     966          728 :         for (i = 1; i < hashtable->nbatch; i++)
     967              :         {
     968          619 :             if (hashtable->innerBatchFile[i])
     969            0 :                 BufFileClose(hashtable->innerBatchFile[i]);
     970          619 :             if (hashtable->outerBatchFile[i])
     971            0 :                 BufFileClose(hashtable->outerBatchFile[i]);
     972              :         }
     973              :     }
     974              : 
     975              :     /* Release working memory (batchCxt is a child, so it goes away too) */
     976        13693 :     MemoryContextDelete(hashtable->hashCxt);
     977              : 
     978              :     /* And drop the control block */
     979        13693 :     pfree(hashtable);
     980        13693 : }
     981              : 
     982              : /*
     983              :  * Consider adjusting the allowed hash table size, depending on the number
     984              :  * of batches, to minimize the overall memory usage (for both the hashtable
     985              :  * and batch files).
     986              :  *
     987              :  * We're adjusting the size of the hash table, not the (optimal) number of
     988              :  * buckets. We can't change that once we start building the hash, due to how
     989              :  * ExecHashGetBucketAndBatch calculates batchno/bucketno from the hash. This
     990              :  * means the load factor may not be optimal, but we're in damage control so
     991              :  * we accept slower lookups. It's still much better than batch explosion.
     992              :  *
     993              :  * Returns true if we chose to increase the batch size (and thus we don't
     994              :  * need to add batches), and false if we should increase nbatch.
     995              :  */
     996              : static bool
     997           97 : ExecHashIncreaseBatchSize(HashJoinTable hashtable)
     998              : {
     999              :     /*
    1000              :      * How much additional memory would doubling nbatch use? Each batch may
    1001              :      * require two buffered files (inner/outer), with a BLCKSZ buffer.
    1002              :      */
    1003           97 :     size_t      batchSpace = (hashtable->nbatch * 2 * (size_t) BLCKSZ);
    1004              : 
    1005              :     /*
    1006              :      * Compare the new space needed for doubling nbatch and for enlarging the
    1007              :      * in-memory hash table. If doubling the hash table needs less memory,
    1008              :      * just do that. Otherwise, continue with doubling the nbatch.
    1009              :      *
    1010              :      * We're either doubling spaceAllowed or batchSpace, so which of those
    1011              :      * increases the memory usage the least is the same as comparing the
    1012              :      * values directly.
    1013              :      */
    1014           97 :     if (hashtable->spaceAllowed <= batchSpace)
    1015              :     {
    1016            0 :         hashtable->spaceAllowed *= 2;
    1017            0 :         return true;
    1018              :     }
    1019              : 
    1020           97 :     return false;
    1021              : }
    1022              : 
    1023              : /*
    1024              :  * ExecHashIncreaseNumBatches
    1025              :  *      increase the original number of batches in order to reduce
    1026              :  *      current memory consumption
    1027              :  */
    1028              : static void
    1029       380037 : ExecHashIncreaseNumBatches(HashJoinTable hashtable)
    1030              : {
    1031       380037 :     int         oldnbatch = hashtable->nbatch;
    1032       380037 :     int         curbatch = hashtable->curbatch;
    1033              :     int         nbatch;
    1034              :     long        ninmemory;
    1035              :     long        nfreed;
    1036              :     HashMemoryChunk oldchunks;
    1037              : 
    1038              :     /* do nothing if we've decided to shut off growth */
    1039       380037 :     if (!hashtable->growEnabled)
    1040       379940 :         return;
    1041              : 
    1042              :     /* safety check to avoid overflow */
    1043           97 :     if (oldnbatch > Min(INT_MAX / 2, MaxAllocSize / (sizeof(void *) * 2)))
    1044            0 :         return;
    1045              : 
    1046              :     /* consider increasing size of the in-memory hash table instead */
    1047           97 :     if (ExecHashIncreaseBatchSize(hashtable))
    1048            0 :         return;
    1049              : 
    1050           97 :     nbatch = oldnbatch * 2;
    1051              :     Assert(nbatch > 1);
    1052              : 
    1053              : #ifdef HJDEBUG
    1054              :     printf("Hashjoin %p: increasing nbatch to %d because space = %zu\n",
    1055              :            hashtable, nbatch, hashtable->spaceUsed);
    1056              : #endif
    1057              : 
    1058           97 :     if (hashtable->innerBatchFile == NULL)
    1059              :     {
    1060           46 :         MemoryContext oldcxt = MemoryContextSwitchTo(hashtable->spillCxt);
    1061              : 
    1062              :         /* we had no file arrays before */
    1063           46 :         hashtable->innerBatchFile = palloc0_array(BufFile *, nbatch);
    1064           46 :         hashtable->outerBatchFile = palloc0_array(BufFile *, nbatch);
    1065              : 
    1066           46 :         MemoryContextSwitchTo(oldcxt);
    1067              : 
    1068              :         /* time to establish the temp tablespaces, too */
    1069           46 :         PrepareTempTablespaces();
    1070              :     }
    1071              :     else
    1072              :     {
    1073              :         /* enlarge arrays and zero out added entries */
    1074           51 :         hashtable->innerBatchFile = repalloc0_array(hashtable->innerBatchFile, BufFile *, oldnbatch, nbatch);
    1075           51 :         hashtable->outerBatchFile = repalloc0_array(hashtable->outerBatchFile, BufFile *, oldnbatch, nbatch);
    1076              :     }
    1077              : 
    1078           97 :     hashtable->nbatch = nbatch;
    1079              : 
    1080              :     /*
    1081              :      * Scan through the existing hash table entries and dump out any that are
    1082              :      * no longer of the current batch.
    1083              :      */
    1084           97 :     ninmemory = nfreed = 0;
    1085              : 
    1086              :     /* If know we need to resize nbuckets, we can do it while rebatching. */
    1087           97 :     if (hashtable->nbuckets_optimal != hashtable->nbuckets)
    1088              :     {
    1089              :         /* we never decrease the number of buckets */
    1090              :         Assert(hashtable->nbuckets_optimal > hashtable->nbuckets);
    1091              : 
    1092           46 :         hashtable->nbuckets = hashtable->nbuckets_optimal;
    1093           46 :         hashtable->log2_nbuckets = hashtable->log2_nbuckets_optimal;
    1094              : 
    1095           46 :         hashtable->buckets.unshared =
    1096           46 :             repalloc_array(hashtable->buckets.unshared,
    1097              :                            HashJoinTuple, hashtable->nbuckets);
    1098              :     }
    1099              : 
    1100              :     /*
    1101              :      * We will scan through the chunks directly, so that we can reset the
    1102              :      * buckets now and not have to keep track which tuples in the buckets have
    1103              :      * already been processed. We will free the old chunks as we go.
    1104              :      */
    1105           97 :     memset(hashtable->buckets.unshared, 0,
    1106           97 :            sizeof(HashJoinTuple) * hashtable->nbuckets);
    1107           97 :     oldchunks = hashtable->chunks;
    1108           97 :     hashtable->chunks = NULL;
    1109              : 
    1110              :     /* so, let's scan through the old chunks, and all tuples in each chunk */
    1111          485 :     while (oldchunks != NULL)
    1112              :     {
    1113          388 :         HashMemoryChunk nextchunk = oldchunks->next.unshared;
    1114              : 
    1115              :         /* position within the buffer (up to oldchunks->used) */
    1116          388 :         size_t      idx = 0;
    1117              : 
    1118              :         /* process all tuples stored in this chunk (and then free it) */
    1119       265079 :         while (idx < oldchunks->used)
    1120              :         {
    1121       264691 :             HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(oldchunks) + idx);
    1122       264691 :             MinimalTuple tuple = HJTUPLE_MINTUPLE(hashTuple);
    1123       264691 :             int         hashTupleSize = (HJTUPLE_OVERHEAD + tuple->t_len);
    1124              :             int         bucketno;
    1125              :             int         batchno;
    1126              : 
    1127       264691 :             ninmemory++;
    1128       264691 :             ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
    1129              :                                       &bucketno, &batchno);
    1130              : 
    1131       264691 :             if (batchno == curbatch)
    1132              :             {
    1133              :                 /* keep tuple in memory - copy it into the new chunk */
    1134              :                 HashJoinTuple copyTuple;
    1135              : 
    1136       101433 :                 copyTuple = (HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
    1137       101433 :                 memcpy(copyTuple, hashTuple, hashTupleSize);
    1138              : 
    1139              :                 /* and add it back to the appropriate bucket */
    1140       101433 :                 copyTuple->next.unshared = hashtable->buckets.unshared[bucketno];
    1141       101433 :                 hashtable->buckets.unshared[bucketno] = copyTuple;
    1142              :             }
    1143              :             else
    1144              :             {
    1145              :                 /* dump it out */
    1146              :                 Assert(batchno > curbatch);
    1147       163258 :                 ExecHashJoinSaveTuple(HJTUPLE_MINTUPLE(hashTuple),
    1148              :                                       hashTuple->hashvalue,
    1149       163258 :                                       &hashtable->innerBatchFile[batchno],
    1150              :                                       hashtable);
    1151              : 
    1152       163258 :                 hashtable->spaceUsed -= hashTupleSize;
    1153       163258 :                 nfreed++;
    1154              :             }
    1155              : 
    1156              :             /* next tuple in this chunk */
    1157       264691 :             idx += MAXALIGN(hashTupleSize);
    1158              : 
    1159              :             /* allow this loop to be cancellable */
    1160       264691 :             CHECK_FOR_INTERRUPTS();
    1161              :         }
    1162              : 
    1163              :         /* we're done with this chunk - free it and proceed to the next one */
    1164          388 :         pfree(oldchunks);
    1165          388 :         oldchunks = nextchunk;
    1166              :     }
    1167              : 
    1168              : #ifdef HJDEBUG
    1169              :     printf("Hashjoin %p: freed %ld of %ld tuples, space now %zu\n",
    1170              :            hashtable, nfreed, ninmemory, hashtable->spaceUsed);
    1171              : #endif
    1172              : 
    1173              :     /*
    1174              :      * If we dumped out either all or none of the tuples in the table, disable
    1175              :      * further expansion of nbatch.  This situation implies that we have
    1176              :      * enough tuples of identical hashvalues to overflow spaceAllowed.
    1177              :      * Increasing nbatch will not fix it since there's no way to subdivide the
    1178              :      * group any more finely. We have to just gut it out and hope the server
    1179              :      * has enough RAM.
    1180              :      */
    1181           97 :     if (nfreed == 0 || nfreed == ninmemory)
    1182              :     {
    1183           22 :         hashtable->growEnabled = false;
    1184              : #ifdef HJDEBUG
    1185              :         printf("Hashjoin %p: disabling further increase of nbatch\n",
    1186              :                hashtable);
    1187              : #endif
    1188              :     }
    1189              : }
    1190              : 
    1191              : /*
    1192              :  * ExecParallelHashIncreaseNumBatches
    1193              :  *      Every participant attached to grow_batches_barrier must run this
    1194              :  *      function when it observes growth == PHJ_GROWTH_NEED_MORE_BATCHES.
    1195              :  */
    1196              : static void
    1197           34 : ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
    1198              : {
    1199           34 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    1200              : 
    1201              :     Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASH_INNER);
    1202              : 
    1203              :     /*
    1204              :      * It's unlikely, but we need to be prepared for new participants to show
    1205              :      * up while we're in the middle of this operation so we need to switch on
    1206              :      * barrier phase here.
    1207              :      */
    1208           34 :     switch (PHJ_GROW_BATCHES_PHASE(BarrierPhase(&pstate->grow_batches_barrier)))
    1209              :     {
    1210           32 :         case PHJ_GROW_BATCHES_ELECT:
    1211              : 
    1212              :             /*
    1213              :              * Elect one participant to prepare to grow the number of batches.
    1214              :              * This involves reallocating or resetting the buckets of batch 0
    1215              :              * in preparation for all participants to begin repartitioning the
    1216              :              * tuples.
    1217              :              */
    1218           32 :             if (BarrierArriveAndWait(&pstate->grow_batches_barrier,
    1219              :                                      WAIT_EVENT_HASH_GROW_BATCHES_ELECT))
    1220              :             {
    1221              :                 dsa_pointer_atomic *buckets;
    1222              :                 ParallelHashJoinBatch *old_batch0;
    1223              :                 int         new_nbatch;
    1224              :                 int         i;
    1225              : 
    1226              :                 /* Move the old batch out of the way. */
    1227           24 :                 old_batch0 = hashtable->batches[0].shared;
    1228           24 :                 pstate->old_batches = pstate->batches;
    1229           24 :                 pstate->old_nbatch = hashtable->nbatch;
    1230           24 :                 pstate->batches = InvalidDsaPointer;
    1231              : 
    1232              :                 /* Free this backend's old accessors. */
    1233           24 :                 ExecParallelHashCloseBatchAccessors(hashtable);
    1234              : 
    1235              :                 /* Figure out how many batches to use. */
    1236           24 :                 if (hashtable->nbatch == 1)
    1237              :                 {
    1238              :                     /*
    1239              :                      * We are going from single-batch to multi-batch.  We need
    1240              :                      * to switch from one large combined memory budget to the
    1241              :                      * regular hash_mem budget.
    1242              :                      */
    1243           18 :                     pstate->space_allowed = get_hash_memory_limit();
    1244              : 
    1245              :                     /*
    1246              :                      * The combined hash_mem of all participants wasn't
    1247              :                      * enough. Therefore one batch per participant would be
    1248              :                      * approximately equivalent and would probably also be
    1249              :                      * insufficient.  So try two batches per participant,
    1250              :                      * rounded up to a power of two.
    1251              :                      */
    1252           18 :                     new_nbatch = pg_nextpower2_32(pstate->nparticipants * 2);
    1253              :                 }
    1254              :                 else
    1255              :                 {
    1256              :                     /*
    1257              :                      * We were already multi-batched.  Try doubling the number
    1258              :                      * of batches.
    1259              :                      */
    1260            6 :                     new_nbatch = hashtable->nbatch * 2;
    1261              :                 }
    1262              : 
    1263              :                 /* Allocate new larger generation of batches. */
    1264              :                 Assert(hashtable->nbatch == pstate->nbatch);
    1265           24 :                 ExecParallelHashJoinSetUpBatches(hashtable, new_nbatch);
    1266              :                 Assert(hashtable->nbatch == pstate->nbatch);
    1267              : 
    1268              :                 /* Replace or recycle batch 0's bucket array. */
    1269           24 :                 if (pstate->old_nbatch == 1)
    1270              :                 {
    1271              :                     double      dtuples;
    1272              :                     double      dbuckets;
    1273              :                     int         new_nbuckets;
    1274              :                     uint32      max_buckets;
    1275              : 
    1276              :                     /*
    1277              :                      * We probably also need a smaller bucket array.  How many
    1278              :                      * tuples do we expect per batch, assuming we have only
    1279              :                      * half of them so far?  Normally we don't need to change
    1280              :                      * the bucket array's size, because the size of each batch
    1281              :                      * stays the same as we add more batches, but in this
    1282              :                      * special case we move from a large batch to many smaller
    1283              :                      * batches and it would be wasteful to keep the large
    1284              :                      * array.
    1285              :                      */
    1286           18 :                     dtuples = (old_batch0->ntuples * 2.0) / new_nbatch;
    1287              : 
    1288              :                     /*
    1289              :                      * We need to calculate the maximum number of buckets to
    1290              :                      * stay within the MaxAllocSize boundary.  Round the
    1291              :                      * maximum number to the previous power of 2 given that
    1292              :                      * later we round the number to the next power of 2.
    1293              :                      */
    1294           18 :                     max_buckets = pg_prevpower2_32((uint32)
    1295              :                                                    (MaxAllocSize / sizeof(dsa_pointer_atomic)));
    1296           18 :                     dbuckets = ceil(dtuples / NTUP_PER_BUCKET);
    1297           18 :                     dbuckets = Min(dbuckets, max_buckets);
    1298           18 :                     new_nbuckets = (int) dbuckets;
    1299           18 :                     new_nbuckets = Max(new_nbuckets, 1024);
    1300           18 :                     new_nbuckets = pg_nextpower2_32(new_nbuckets);
    1301           18 :                     dsa_free(hashtable->area, old_batch0->buckets);
    1302           36 :                     hashtable->batches[0].shared->buckets =
    1303           18 :                         dsa_allocate(hashtable->area,
    1304              :                                      sizeof(dsa_pointer_atomic) * new_nbuckets);
    1305              :                     buckets = (dsa_pointer_atomic *)
    1306           18 :                         dsa_get_address(hashtable->area,
    1307           18 :                                         hashtable->batches[0].shared->buckets);
    1308        53266 :                     for (i = 0; i < new_nbuckets; ++i)
    1309        53248 :                         dsa_pointer_atomic_init(&buckets[i], InvalidDsaPointer);
    1310           18 :                     pstate->nbuckets = new_nbuckets;
    1311              :                 }
    1312              :                 else
    1313              :                 {
    1314              :                     /* Recycle the existing bucket array. */
    1315            6 :                     hashtable->batches[0].shared->buckets = old_batch0->buckets;
    1316              :                     buckets = (dsa_pointer_atomic *)
    1317            6 :                         dsa_get_address(hashtable->area, old_batch0->buckets);
    1318        24582 :                     for (i = 0; i < hashtable->nbuckets; ++i)
    1319        24576 :                         dsa_pointer_atomic_write(&buckets[i], InvalidDsaPointer);
    1320              :                 }
    1321              : 
    1322              :                 /* Move all chunks to the work queue for parallel processing. */
    1323           24 :                 pstate->chunk_work_queue = old_batch0->chunks;
    1324              : 
    1325              :                 /* Disable further growth temporarily while we're growing. */
    1326           24 :                 pstate->growth = PHJ_GROWTH_DISABLED;
    1327              :             }
    1328              :             else
    1329              :             {
    1330              :                 /* All other participants just flush their tuples to disk. */
    1331            8 :                 ExecParallelHashCloseBatchAccessors(hashtable);
    1332              :             }
    1333              :             pg_fallthrough;
    1334              : 
    1335              :         case PHJ_GROW_BATCHES_REALLOCATE:
    1336              :             /* Wait for the above to be finished. */
    1337           32 :             BarrierArriveAndWait(&pstate->grow_batches_barrier,
    1338              :                                  WAIT_EVENT_HASH_GROW_BATCHES_REALLOCATE);
    1339              :             pg_fallthrough;
    1340              : 
    1341           34 :         case PHJ_GROW_BATCHES_REPARTITION:
    1342              :             /* Make sure that we have the current dimensions and buckets. */
    1343           34 :             ExecParallelHashEnsureBatchAccessors(hashtable);
    1344           34 :             ExecParallelHashTableSetCurrentBatch(hashtable, 0);
    1345              :             /* Then partition, flush counters. */
    1346           34 :             ExecParallelHashRepartitionFirst(hashtable);
    1347           34 :             ExecParallelHashRepartitionRest(hashtable);
    1348           34 :             ExecParallelHashMergeCounters(hashtable);
    1349              :             /* Wait for the above to be finished. */
    1350           34 :             BarrierArriveAndWait(&pstate->grow_batches_barrier,
    1351              :                                  WAIT_EVENT_HASH_GROW_BATCHES_REPARTITION);
    1352              :             pg_fallthrough;
    1353              : 
    1354           34 :         case PHJ_GROW_BATCHES_DECIDE:
    1355              : 
    1356              :             /*
    1357              :              * Elect one participant to clean up and decide whether further
    1358              :              * repartitioning is needed, or should be disabled because it's
    1359              :              * not helping.
    1360              :              */
    1361           34 :             if (BarrierArriveAndWait(&pstate->grow_batches_barrier,
    1362              :                                      WAIT_EVENT_HASH_GROW_BATCHES_DECIDE))
    1363              :             {
    1364              :                 ParallelHashJoinBatch *old_batches;
    1365           24 :                 bool        space_exhausted = false;
    1366           24 :                 bool        extreme_skew_detected = false;
    1367              : 
    1368              :                 /* Make sure that we have the current dimensions and buckets. */
    1369           24 :                 ExecParallelHashEnsureBatchAccessors(hashtable);
    1370           24 :                 ExecParallelHashTableSetCurrentBatch(hashtable, 0);
    1371              : 
    1372           24 :                 old_batches = dsa_get_address(hashtable->area, pstate->old_batches);
    1373              : 
    1374              :                 /* Are any of the new generation of batches exhausted? */
    1375          168 :                 for (int i = 0; i < hashtable->nbatch; ++i)
    1376              :                 {
    1377              :                     ParallelHashJoinBatch *batch;
    1378              :                     ParallelHashJoinBatch *old_batch;
    1379              :                     int         parent;
    1380              : 
    1381          144 :                     batch = hashtable->batches[i].shared;
    1382          144 :                     if (batch->space_exhausted ||
    1383          144 :                         batch->estimated_size > pstate->space_allowed)
    1384           12 :                         space_exhausted = true;
    1385              : 
    1386          144 :                     parent = i % pstate->old_nbatch;
    1387          144 :                     old_batch = NthParallelHashJoinBatch(old_batches, parent);
    1388          144 :                     if (old_batch->space_exhausted ||
    1389           36 :                         batch->estimated_size > pstate->space_allowed)
    1390              :                     {
    1391              :                         /*
    1392              :                          * Did this batch receive ALL of the tuples from its
    1393              :                          * parent batch?  That would indicate that further
    1394              :                          * repartitioning isn't going to help (the hash values
    1395              :                          * are probably all the same).
    1396              :                          */
    1397          108 :                         if (batch->ntuples == hashtable->batches[parent].shared->old_ntuples)
    1398           12 :                             extreme_skew_detected = true;
    1399              :                     }
    1400              :                 }
    1401              : 
    1402              :                 /* Don't keep growing if it's not helping or we'd overflow. */
    1403           24 :                 if (extreme_skew_detected || hashtable->nbatch >= INT_MAX / 2)
    1404           12 :                     pstate->growth = PHJ_GROWTH_DISABLED;
    1405           12 :                 else if (space_exhausted)
    1406            0 :                     pstate->growth = PHJ_GROWTH_NEED_MORE_BATCHES;
    1407              :                 else
    1408           12 :                     pstate->growth = PHJ_GROWTH_OK;
    1409              : 
    1410              :                 /* Free the old batches in shared memory. */
    1411           24 :                 dsa_free(hashtable->area, pstate->old_batches);
    1412           24 :                 pstate->old_batches = InvalidDsaPointer;
    1413              :             }
    1414              :             pg_fallthrough;
    1415              : 
    1416              :         case PHJ_GROW_BATCHES_FINISH:
    1417              :             /* Wait for the above to complete. */
    1418           34 :             BarrierArriveAndWait(&pstate->grow_batches_barrier,
    1419              :                                  WAIT_EVENT_HASH_GROW_BATCHES_FINISH);
    1420              :     }
    1421           34 : }
    1422              : 
    1423              : /*
    1424              :  * Repartition the tuples currently loaded into memory for inner batch 0
    1425              :  * because the number of batches has been increased.  Some tuples are retained
    1426              :  * in memory and some are written out to a later batch.
    1427              :  */
    1428              : static void
    1429           34 : ExecParallelHashRepartitionFirst(HashJoinTable hashtable)
    1430              : {
    1431              :     dsa_pointer chunk_shared;
    1432              :     HashMemoryChunk chunk;
    1433              : 
    1434              :     Assert(hashtable->nbatch == hashtable->parallel_state->nbatch);
    1435              : 
    1436          212 :     while ((chunk = ExecParallelHashPopChunkQueue(hashtable, &chunk_shared)))
    1437              :     {
    1438          144 :         size_t      idx = 0;
    1439              : 
    1440              :         /* Repartition all tuples in this chunk. */
    1441       110021 :         while (idx < chunk->used)
    1442              :         {
    1443       109877 :             HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
    1444       109877 :             MinimalTuple tuple = HJTUPLE_MINTUPLE(hashTuple);
    1445              :             HashJoinTuple copyTuple;
    1446              :             dsa_pointer shared;
    1447              :             int         bucketno;
    1448              :             int         batchno;
    1449              : 
    1450       109877 :             ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
    1451              :                                       &bucketno, &batchno);
    1452              : 
    1453              :             Assert(batchno < hashtable->nbatch);
    1454       109877 :             if (batchno == 0)
    1455              :             {
    1456              :                 /* It still belongs in batch 0.  Copy to a new chunk. */
    1457              :                 copyTuple =
    1458        25120 :                     ExecParallelHashTupleAlloc(hashtable,
    1459        25120 :                                                HJTUPLE_OVERHEAD + tuple->t_len,
    1460              :                                                &shared);
    1461        25120 :                 copyTuple->hashvalue = hashTuple->hashvalue;
    1462        25120 :                 memcpy(HJTUPLE_MINTUPLE(copyTuple), tuple, tuple->t_len);
    1463        25120 :                 ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
    1464              :                                           copyTuple, shared);
    1465              :             }
    1466              :             else
    1467              :             {
    1468        84757 :                 size_t      tuple_size =
    1469        84757 :                     MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
    1470              : 
    1471              :                 /* It belongs in a later batch. */
    1472        84757 :                 hashtable->batches[batchno].estimated_size += tuple_size;
    1473        84757 :                 sts_puttuple(hashtable->batches[batchno].inner_tuples,
    1474        84757 :                              &hashTuple->hashvalue, tuple);
    1475              :             }
    1476              : 
    1477              :             /* Count this tuple. */
    1478       109877 :             ++hashtable->batches[0].old_ntuples;
    1479       109877 :             ++hashtable->batches[batchno].ntuples;
    1480              : 
    1481       109877 :             idx += MAXALIGN(HJTUPLE_OVERHEAD +
    1482              :                             HJTUPLE_MINTUPLE(hashTuple)->t_len);
    1483              :         }
    1484              : 
    1485              :         /* Free this chunk. */
    1486          144 :         dsa_free(hashtable->area, chunk_shared);
    1487              : 
    1488          144 :         CHECK_FOR_INTERRUPTS();
    1489              :     }
    1490           34 : }
    1491              : 
    1492              : /*
    1493              :  * Help repartition inner batches 1..n.
    1494              :  */
    1495              : static void
    1496           34 : ExecParallelHashRepartitionRest(HashJoinTable hashtable)
    1497              : {
    1498           34 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    1499           34 :     int         old_nbatch = pstate->old_nbatch;
    1500              :     SharedTuplestoreAccessor **old_inner_tuples;
    1501              :     ParallelHashJoinBatch *old_batches;
    1502              :     int         i;
    1503              : 
    1504              :     /* Get our hands on the previous generation of batches. */
    1505              :     old_batches = (ParallelHashJoinBatch *)
    1506           34 :         dsa_get_address(hashtable->area, pstate->old_batches);
    1507           34 :     old_inner_tuples = palloc0_array(SharedTuplestoreAccessor *, old_nbatch);
    1508           70 :     for (i = 1; i < old_nbatch; ++i)
    1509              :     {
    1510           36 :         ParallelHashJoinBatch *shared =
    1511           36 :             NthParallelHashJoinBatch(old_batches, i);
    1512              : 
    1513           36 :         old_inner_tuples[i] = sts_attach(ParallelHashJoinBatchInner(shared),
    1514              :                                          ParallelWorkerNumber + 1,
    1515              :                                          &pstate->fileset);
    1516              :     }
    1517              : 
    1518              :     /* Join in the effort to repartition them. */
    1519           70 :     for (i = 1; i < old_nbatch; ++i)
    1520              :     {
    1521              :         MinimalTuple tuple;
    1522              :         uint32      hashvalue;
    1523              : 
    1524              :         /* Scan one partition from the previous generation. */
    1525           36 :         sts_begin_parallel_scan(old_inner_tuples[i]);
    1526        79562 :         while ((tuple = sts_parallel_scan_next(old_inner_tuples[i], &hashvalue)))
    1527              :         {
    1528        79526 :             size_t      tuple_size = MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
    1529              :             int         bucketno;
    1530              :             int         batchno;
    1531              : 
    1532              :             /* Decide which partition it goes to in the new generation. */
    1533        79526 :             ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno,
    1534              :                                       &batchno);
    1535              : 
    1536        79526 :             hashtable->batches[batchno].estimated_size += tuple_size;
    1537        79526 :             ++hashtable->batches[batchno].ntuples;
    1538        79526 :             ++hashtable->batches[i].old_ntuples;
    1539              : 
    1540              :             /* Store the tuple its new batch. */
    1541        79526 :             sts_puttuple(hashtable->batches[batchno].inner_tuples,
    1542              :                          &hashvalue, tuple);
    1543              : 
    1544        79526 :             CHECK_FOR_INTERRUPTS();
    1545              :         }
    1546           36 :         sts_end_parallel_scan(old_inner_tuples[i]);
    1547              :     }
    1548              : 
    1549           34 :     pfree(old_inner_tuples);
    1550           34 : }
    1551              : 
    1552              : /*
    1553              :  * Transfer the backend-local per-batch counters to the shared totals.
    1554              :  */
    1555              : static void
    1556          202 : ExecParallelHashMergeCounters(HashJoinTable hashtable)
    1557              : {
    1558          202 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    1559              :     int         i;
    1560              : 
    1561          202 :     LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
    1562          202 :     pstate->total_tuples = 0;
    1563         1147 :     for (i = 0; i < hashtable->nbatch; ++i)
    1564              :     {
    1565          945 :         ParallelHashJoinBatchAccessor *batch = &hashtable->batches[i];
    1566              : 
    1567          945 :         batch->shared->size += batch->size;
    1568          945 :         batch->shared->estimated_size += batch->estimated_size;
    1569          945 :         batch->shared->ntuples += batch->ntuples;
    1570          945 :         batch->shared->old_ntuples += batch->old_ntuples;
    1571          945 :         batch->size = 0;
    1572          945 :         batch->estimated_size = 0;
    1573          945 :         batch->ntuples = 0;
    1574          945 :         batch->old_ntuples = 0;
    1575          945 :         pstate->total_tuples += batch->shared->ntuples;
    1576              :     }
    1577          202 :     LWLockRelease(&pstate->lock);
    1578          202 : }
    1579              : 
    1580              : /*
    1581              :  * ExecHashIncreaseNumBuckets
    1582              :  *      increase the original number of buckets in order to reduce
    1583              :  *      number of tuples per bucket
    1584              :  */
    1585              : static void
    1586           42 : ExecHashIncreaseNumBuckets(HashJoinTable hashtable)
    1587              : {
    1588              :     HashMemoryChunk chunk;
    1589              : 
    1590              :     /* do nothing if not an increase (it's called increase for a reason) */
    1591           42 :     if (hashtable->nbuckets >= hashtable->nbuckets_optimal)
    1592            0 :         return;
    1593              : 
    1594              : #ifdef HJDEBUG
    1595              :     printf("Hashjoin %p: increasing nbuckets %d => %d\n",
    1596              :            hashtable, hashtable->nbuckets, hashtable->nbuckets_optimal);
    1597              : #endif
    1598              : 
    1599           42 :     hashtable->nbuckets = hashtable->nbuckets_optimal;
    1600           42 :     hashtable->log2_nbuckets = hashtable->log2_nbuckets_optimal;
    1601              : 
    1602              :     Assert(hashtable->nbuckets > 1);
    1603              :     Assert(hashtable->nbuckets <= (INT_MAX / 2));
    1604              :     Assert(hashtable->nbuckets == (1 << hashtable->log2_nbuckets));
    1605              : 
    1606              :     /*
    1607              :      * Just reallocate the proper number of buckets - we don't need to walk
    1608              :      * through them - we can walk the dense-allocated chunks (just like in
    1609              :      * ExecHashIncreaseNumBatches, but without all the copying into new
    1610              :      * chunks)
    1611              :      */
    1612           42 :     hashtable->buckets.unshared =
    1613           42 :         repalloc_array(hashtable->buckets.unshared,
    1614              :                        HashJoinTuple, hashtable->nbuckets);
    1615              : 
    1616           42 :     memset(hashtable->buckets.unshared, 0,
    1617           42 :            hashtable->nbuckets * sizeof(HashJoinTuple));
    1618              : 
    1619              :     /* scan through all tuples in all chunks to rebuild the hash table */
    1620          202 :     for (chunk = hashtable->chunks; chunk != NULL; chunk = chunk->next.unshared)
    1621              :     {
    1622              :         /* process all tuples stored in this chunk */
    1623          160 :         size_t      idx = 0;
    1624              : 
    1625        87612 :         while (idx < chunk->used)
    1626              :         {
    1627        87452 :             HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
    1628              :             int         bucketno;
    1629              :             int         batchno;
    1630              : 
    1631        87452 :             ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
    1632              :                                       &bucketno, &batchno);
    1633              : 
    1634              :             /* add the tuple to the proper bucket */
    1635        87452 :             hashTuple->next.unshared = hashtable->buckets.unshared[bucketno];
    1636        87452 :             hashtable->buckets.unshared[bucketno] = hashTuple;
    1637              : 
    1638              :             /* advance index past the tuple */
    1639        87452 :             idx += MAXALIGN(HJTUPLE_OVERHEAD +
    1640              :                             HJTUPLE_MINTUPLE(hashTuple)->t_len);
    1641              :         }
    1642              : 
    1643              :         /* allow this loop to be cancellable */
    1644          160 :         CHECK_FOR_INTERRUPTS();
    1645              :     }
    1646              : }
    1647              : 
    1648              : static void
    1649           36 : ExecParallelHashIncreaseNumBuckets(HashJoinTable hashtable)
    1650              : {
    1651           36 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    1652              :     int         i;
    1653              :     HashMemoryChunk chunk;
    1654              :     dsa_pointer chunk_s;
    1655              : 
    1656              :     Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASH_INNER);
    1657              : 
    1658              :     /*
    1659              :      * It's unlikely, but we need to be prepared for new participants to show
    1660              :      * up while we're in the middle of this operation so we need to switch on
    1661              :      * barrier phase here.
    1662              :      */
    1663           36 :     switch (PHJ_GROW_BUCKETS_PHASE(BarrierPhase(&pstate->grow_buckets_barrier)))
    1664              :     {
    1665           36 :         case PHJ_GROW_BUCKETS_ELECT:
    1666              :             /* Elect one participant to prepare to increase nbuckets. */
    1667           36 :             if (BarrierArriveAndWait(&pstate->grow_buckets_barrier,
    1668              :                                      WAIT_EVENT_HASH_GROW_BUCKETS_ELECT))
    1669              :             {
    1670              :                 size_t      size;
    1671              :                 dsa_pointer_atomic *buckets;
    1672              : 
    1673              :                 /* Double the size of the bucket array. */
    1674           35 :                 pstate->nbuckets *= 2;
    1675           35 :                 size = pstate->nbuckets * sizeof(dsa_pointer_atomic);
    1676           35 :                 hashtable->batches[0].shared->size += size / 2;
    1677           35 :                 dsa_free(hashtable->area, hashtable->batches[0].shared->buckets);
    1678           70 :                 hashtable->batches[0].shared->buckets =
    1679           35 :                     dsa_allocate(hashtable->area, size);
    1680              :                 buckets = (dsa_pointer_atomic *)
    1681           35 :                     dsa_get_address(hashtable->area,
    1682           35 :                                     hashtable->batches[0].shared->buckets);
    1683       163875 :                 for (i = 0; i < pstate->nbuckets; ++i)
    1684       163840 :                     dsa_pointer_atomic_init(&buckets[i], InvalidDsaPointer);
    1685              : 
    1686              :                 /* Put the chunk list onto the work queue. */
    1687           35 :                 pstate->chunk_work_queue = hashtable->batches[0].shared->chunks;
    1688              : 
    1689              :                 /* Clear the flag. */
    1690           35 :                 pstate->growth = PHJ_GROWTH_OK;
    1691              :             }
    1692              :             pg_fallthrough;
    1693              : 
    1694              :         case PHJ_GROW_BUCKETS_REALLOCATE:
    1695              :             /* Wait for the above to complete. */
    1696           36 :             BarrierArriveAndWait(&pstate->grow_buckets_barrier,
    1697              :                                  WAIT_EVENT_HASH_GROW_BUCKETS_REALLOCATE);
    1698              :             pg_fallthrough;
    1699              : 
    1700           36 :         case PHJ_GROW_BUCKETS_REINSERT:
    1701              :             /* Reinsert all tuples into the hash table. */
    1702           36 :             ExecParallelHashEnsureBatchAccessors(hashtable);
    1703           36 :             ExecParallelHashTableSetCurrentBatch(hashtable, 0);
    1704          199 :             while ((chunk = ExecParallelHashPopChunkQueue(hashtable, &chunk_s)))
    1705              :             {
    1706          127 :                 size_t      idx = 0;
    1707              : 
    1708       104013 :                 while (idx < chunk->used)
    1709              :                 {
    1710       103886 :                     HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
    1711       103886 :                     dsa_pointer shared = chunk_s + HASH_CHUNK_HEADER_SIZE + idx;
    1712              :                     int         bucketno;
    1713              :                     int         batchno;
    1714              : 
    1715       103886 :                     ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
    1716              :                                               &bucketno, &batchno);
    1717              :                     Assert(batchno == 0);
    1718              : 
    1719              :                     /* add the tuple to the proper bucket */
    1720       103886 :                     ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
    1721              :                                               hashTuple, shared);
    1722              : 
    1723              :                     /* advance index past the tuple */
    1724       103886 :                     idx += MAXALIGN(HJTUPLE_OVERHEAD +
    1725              :                                     HJTUPLE_MINTUPLE(hashTuple)->t_len);
    1726              :                 }
    1727              : 
    1728              :                 /* allow this loop to be cancellable */
    1729          127 :                 CHECK_FOR_INTERRUPTS();
    1730              :             }
    1731           36 :             BarrierArriveAndWait(&pstate->grow_buckets_barrier,
    1732              :                                  WAIT_EVENT_HASH_GROW_BUCKETS_REINSERT);
    1733              :     }
    1734           36 : }
    1735              : 
    1736              : /*
    1737              :  * ExecHashTableInsert
    1738              :  *      insert a tuple into the hash table depending on the hash value
    1739              :  *      it may just go to a temp file for later batches
    1740              :  *
    1741              :  * Note: the passed TupleTableSlot may contain a regular, minimal, or virtual
    1742              :  * tuple; the minimal case in particular is certain to happen while reloading
    1743              :  * tuples from batch files.  We could save some cycles in the regular-tuple
    1744              :  * case by not forcing the slot contents into minimal form; not clear if it's
    1745              :  * worth the messiness required.
    1746              :  */
    1747              : void
    1748      6176558 : ExecHashTableInsert(HashJoinTable hashtable,
    1749              :                     TupleTableSlot *slot,
    1750              :                     uint32 hashvalue)
    1751              : {
    1752              :     bool        shouldFree;
    1753      6176558 :     MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
    1754              :     int         bucketno;
    1755              :     int         batchno;
    1756              : 
    1757      6176558 :     ExecHashGetBucketAndBatch(hashtable, hashvalue,
    1758              :                               &bucketno, &batchno);
    1759              : 
    1760              :     /*
    1761              :      * decide whether to put the tuple in the hash table or a temp file
    1762              :      */
    1763      6176558 :     if (batchno == hashtable->curbatch)
    1764              :     {
    1765              :         /*
    1766              :          * put the tuple in hash table
    1767              :          */
    1768              :         HashJoinTuple hashTuple;
    1769              :         int         hashTupleSize;
    1770      4602334 :         double      ntuples = (hashtable->totalTuples - hashtable->skewTuples);
    1771              : 
    1772              :         /* Create the HashJoinTuple */
    1773      4602334 :         hashTupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
    1774      4602334 :         hashTuple = (HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
    1775              : 
    1776      4602334 :         hashTuple->hashvalue = hashvalue;
    1777      4602334 :         memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
    1778              : 
    1779              :         /*
    1780              :          * We always reset the tuple-matched flag on insertion.  This is okay
    1781              :          * even when reloading a tuple from a batch file, since the tuple
    1782              :          * could not possibly have been matched to an outer tuple before it
    1783              :          * went into the batch file.
    1784              :          */
    1785      4602334 :         HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
    1786              : 
    1787              :         /* Push it onto the front of the bucket's list */
    1788      4602334 :         hashTuple->next.unshared = hashtable->buckets.unshared[bucketno];
    1789      4602334 :         hashtable->buckets.unshared[bucketno] = hashTuple;
    1790              : 
    1791              :         /*
    1792              :          * Increase the (optimal) number of buckets if we just exceeded the
    1793              :          * NTUP_PER_BUCKET threshold, but only when there's still a single
    1794              :          * batch.
    1795              :          */
    1796      4602334 :         if (hashtable->nbatch == 1 &&
    1797      2774891 :             ntuples > (hashtable->nbuckets_optimal * NTUP_PER_BUCKET))
    1798              :         {
    1799              :             /* Guard against integer overflow and alloc size overflow */
    1800          134 :             if (hashtable->nbuckets_optimal <= INT_MAX / 2 &&
    1801          134 :                 hashtable->nbuckets_optimal * 2 <= MaxAllocSize / sizeof(HashJoinTuple))
    1802              :             {
    1803          134 :                 hashtable->nbuckets_optimal *= 2;
    1804          134 :                 hashtable->log2_nbuckets_optimal += 1;
    1805              :             }
    1806              :         }
    1807              : 
    1808              :         /* Account for space used, and back off if we've used too much */
    1809      4602334 :         hashtable->spaceUsed += hashTupleSize;
    1810      4602334 :         if (hashtable->spaceUsed > hashtable->spacePeak)
    1811      3267672 :             hashtable->spacePeak = hashtable->spaceUsed;
    1812      4602334 :         if (hashtable->spaceUsed +
    1813      4602334 :             hashtable->nbuckets_optimal * sizeof(HashJoinTuple)
    1814      4602334 :             > hashtable->spaceAllowed)
    1815       380037 :             ExecHashIncreaseNumBatches(hashtable);
    1816              :     }
    1817              :     else
    1818              :     {
    1819              :         /*
    1820              :          * put the tuple into a temp file for later batches
    1821              :          */
    1822              :         Assert(batchno > hashtable->curbatch);
    1823      1574224 :         ExecHashJoinSaveTuple(tuple,
    1824              :                               hashvalue,
    1825      1574224 :                               &hashtable->innerBatchFile[batchno],
    1826              :                               hashtable);
    1827              :     }
    1828              : 
    1829      6176558 :     if (shouldFree)
    1830      4394036 :         heap_free_minimal_tuple(tuple);
    1831      6176558 : }
    1832              : 
    1833              : /*
    1834              :  * ExecParallelHashTableInsert
    1835              :  *      insert a tuple into a shared hash table or shared batch tuplestore
    1836              :  */
    1837              : void
    1838      1080096 : ExecParallelHashTableInsert(HashJoinTable hashtable,
    1839              :                             TupleTableSlot *slot,
    1840              :                             uint32 hashvalue)
    1841              : {
    1842              :     bool        shouldFree;
    1843      1080096 :     MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
    1844              :     dsa_pointer shared;
    1845              :     int         bucketno;
    1846              :     int         batchno;
    1847              : 
    1848          127 : retry:
    1849      1080223 :     ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
    1850              : 
    1851      1080223 :     if (batchno == 0)
    1852              :     {
    1853              :         HashJoinTuple hashTuple;
    1854              : 
    1855              :         /* Try to load it into memory. */
    1856              :         Assert(BarrierPhase(&hashtable->parallel_state->build_barrier) ==
    1857              :                PHJ_BUILD_HASH_INNER);
    1858       624132 :         hashTuple = ExecParallelHashTupleAlloc(hashtable,
    1859       624132 :                                                HJTUPLE_OVERHEAD + tuple->t_len,
    1860              :                                                &shared);
    1861       624132 :         if (hashTuple == NULL)
    1862          113 :             goto retry;
    1863              : 
    1864              :         /* Store the hash value in the HashJoinTuple header. */
    1865       624019 :         hashTuple->hashvalue = hashvalue;
    1866       624019 :         memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
    1867       624019 :         HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
    1868              : 
    1869              :         /* Push it onto the front of the bucket's list */
    1870       624019 :         ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
    1871              :                                   hashTuple, shared);
    1872              :     }
    1873              :     else
    1874              :     {
    1875       456091 :         size_t      tuple_size = MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
    1876              : 
    1877              :         Assert(batchno > 0);
    1878              : 
    1879              :         /* Try to preallocate space in the batch if necessary. */
    1880       456091 :         if (hashtable->batches[batchno].preallocated < tuple_size)
    1881              :         {
    1882          868 :             if (!ExecParallelHashTuplePrealloc(hashtable, batchno, tuple_size))
    1883           14 :                 goto retry;
    1884              :         }
    1885              : 
    1886              :         Assert(hashtable->batches[batchno].preallocated >= tuple_size);
    1887       456077 :         hashtable->batches[batchno].preallocated -= tuple_size;
    1888       456077 :         sts_puttuple(hashtable->batches[batchno].inner_tuples, &hashvalue,
    1889              :                      tuple);
    1890              :     }
    1891      1080096 :     ++hashtable->batches[batchno].ntuples;
    1892              : 
    1893      1080096 :     if (shouldFree)
    1894      1080096 :         heap_free_minimal_tuple(tuple);
    1895      1080096 : }
    1896              : 
    1897              : /*
    1898              :  * Insert a tuple into the current hash table.  Unlike
    1899              :  * ExecParallelHashTableInsert, this version is not prepared to send the tuple
    1900              :  * to other batches or to run out of memory, and should only be called with
    1901              :  * tuples that belong in the current batch once growth has been disabled.
    1902              :  */
    1903              : void
    1904       540834 : ExecParallelHashTableInsertCurrentBatch(HashJoinTable hashtable,
    1905              :                                         TupleTableSlot *slot,
    1906              :                                         uint32 hashvalue)
    1907              : {
    1908              :     bool        shouldFree;
    1909       540834 :     MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
    1910              :     HashJoinTuple hashTuple;
    1911              :     dsa_pointer shared;
    1912              :     int         batchno;
    1913              :     int         bucketno;
    1914              : 
    1915       540834 :     ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
    1916              :     Assert(batchno == hashtable->curbatch);
    1917       540834 :     hashTuple = ExecParallelHashTupleAlloc(hashtable,
    1918       540834 :                                            HJTUPLE_OVERHEAD + tuple->t_len,
    1919              :                                            &shared);
    1920       540834 :     hashTuple->hashvalue = hashvalue;
    1921       540834 :     memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
    1922       540834 :     HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
    1923       540834 :     ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
    1924              :                               hashTuple, shared);
    1925              : 
    1926       540834 :     if (shouldFree)
    1927            0 :         heap_free_minimal_tuple(tuple);
    1928       540834 : }
    1929              : 
    1930              : 
    1931              : /*
    1932              :  * ExecHashGetBucketAndBatch
    1933              :  *      Determine the bucket number and batch number for a hash value
    1934              :  *
    1935              :  * Note: on-the-fly increases of nbatch must not change the bucket number
    1936              :  * for a given hash code (since we don't move tuples to different hash
    1937              :  * chains), and must only cause the batch number to remain the same or
    1938              :  * increase.  Our algorithm is
    1939              :  *      bucketno = hashvalue MOD nbuckets
    1940              :  *      batchno = ROR(hashvalue, log2_nbuckets) MOD nbatch
    1941              :  * where nbuckets and nbatch are both expected to be powers of 2, so we can
    1942              :  * do the computations by shifting and masking.  (This assumes that all hash
    1943              :  * functions are good about randomizing all their output bits, else we are
    1944              :  * likely to have very skewed bucket or batch occupancy.)
    1945              :  *
    1946              :  * nbuckets and log2_nbuckets may change while nbatch == 1 because of dynamic
    1947              :  * bucket count growth.  Once we start batching, the value is fixed and does
    1948              :  * not change over the course of the join (making it possible to compute batch
    1949              :  * number the way we do here).
    1950              :  *
    1951              :  * nbatch is always a power of 2; we increase it only by doubling it.  This
    1952              :  * effectively adds one more bit to the top of the batchno.  In very large
    1953              :  * joins, we might run out of bits to add, so we do this by rotating the hash
    1954              :  * value.  This causes batchno to steal bits from bucketno when the number of
    1955              :  * virtual buckets exceeds 2^32.  It's better to have longer bucket chains
    1956              :  * than to lose the ability to divide batches.
    1957              :  */
    1958              : void
    1959     19934772 : ExecHashGetBucketAndBatch(HashJoinTable hashtable,
    1960              :                           uint32 hashvalue,
    1961              :                           int *bucketno,
    1962              :                           int *batchno)
    1963              : {
    1964     19934772 :     uint32      nbuckets = (uint32) hashtable->nbuckets;
    1965     19934772 :     uint32      nbatch = (uint32) hashtable->nbatch;
    1966              : 
    1967     19934772 :     if (nbatch > 1)
    1968              :     {
    1969      7762399 :         *bucketno = hashvalue & (nbuckets - 1);
    1970      7762399 :         *batchno = pg_rotate_right32(hashvalue,
    1971      7762399 :                                      hashtable->log2_nbuckets) & (nbatch - 1);
    1972              :     }
    1973              :     else
    1974              :     {
    1975     12172373 :         *bucketno = hashvalue & (nbuckets - 1);
    1976     12172373 :         *batchno = 0;
    1977              :     }
    1978     19934772 : }
    1979              : 
    1980              : /*
    1981              :  * ExecScanHashBucket
    1982              :  *      scan a hash bucket for matches to the current outer tuple
    1983              :  *
    1984              :  * The current outer tuple must be stored in econtext->ecxt_outertuple.
    1985              :  *
    1986              :  * On success, the inner tuple is stored into hjstate->hj_CurTuple and
    1987              :  * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
    1988              :  * for the latter.
    1989              :  */
    1990              : bool
    1991     11670805 : ExecScanHashBucket(HashJoinState *hjstate,
    1992              :                    ExprContext *econtext)
    1993              : {
    1994     11670805 :     ExprState  *hjclauses = hjstate->hashclauses;
    1995     11670805 :     HashJoinTable hashtable = hjstate->hj_HashTable;
    1996     11670805 :     HashJoinTuple hashTuple = hjstate->hj_CurTuple;
    1997     11670805 :     uint32      hashvalue = hjstate->hj_CurHashValue;
    1998              : 
    1999              :     /*
    2000              :      * hj_CurTuple is the address of the tuple last returned from the current
    2001              :      * bucket, or NULL if it's time to start scanning a new bucket.
    2002              :      *
    2003              :      * If the tuple hashed to a skew bucket then scan the skew bucket
    2004              :      * otherwise scan the standard hashtable bucket.
    2005              :      */
    2006     11670805 :     if (hashTuple != NULL)
    2007      2581300 :         hashTuple = hashTuple->next.unshared;
    2008      9089505 :     else if (hjstate->hj_CurSkewBucketNo != INVALID_SKEW_BUCKET_NO)
    2009         1200 :         hashTuple = hashtable->skewBucket[hjstate->hj_CurSkewBucketNo]->tuples;
    2010              :     else
    2011      9088305 :         hashTuple = hashtable->buckets.unshared[hjstate->hj_CurBucketNo];
    2012              : 
    2013     13824141 :     while (hashTuple != NULL)
    2014              :     {
    2015      7520791 :         if (hashTuple->hashvalue == hashvalue)
    2016              :         {
    2017              :             TupleTableSlot *inntuple;
    2018              : 
    2019              :             /* insert hashtable's tuple into exec slot so ExecQual sees it */
    2020      5367467 :             inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
    2021              :                                              hjstate->hj_HashTupleSlot,
    2022              :                                              false);    /* do not pfree */
    2023      5367467 :             econtext->ecxt_innertuple = inntuple;
    2024              : 
    2025      5367467 :             if (ExecQualAndReset(hjclauses, econtext))
    2026              :             {
    2027      5367455 :                 hjstate->hj_CurTuple = hashTuple;
    2028      5367455 :                 return true;
    2029              :             }
    2030              :         }
    2031              : 
    2032      2153336 :         hashTuple = hashTuple->next.unshared;
    2033              :     }
    2034              : 
    2035              :     /*
    2036              :      * no match
    2037              :      */
    2038      6303350 :     return false;
    2039              : }
    2040              : 
    2041              : /*
    2042              :  * ExecParallelScanHashBucket
    2043              :  *      scan a hash bucket for matches to the current outer tuple
    2044              :  *
    2045              :  * The current outer tuple must be stored in econtext->ecxt_outertuple.
    2046              :  *
    2047              :  * On success, the inner tuple is stored into hjstate->hj_CurTuple and
    2048              :  * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
    2049              :  * for the latter.
    2050              :  */
    2051              : bool
    2052      2103054 : ExecParallelScanHashBucket(HashJoinState *hjstate,
    2053              :                            ExprContext *econtext)
    2054              : {
    2055      2103054 :     ExprState  *hjclauses = hjstate->hashclauses;
    2056      2103054 :     HashJoinTable hashtable = hjstate->hj_HashTable;
    2057      2103054 :     HashJoinTuple hashTuple = hjstate->hj_CurTuple;
    2058      2103054 :     uint32      hashvalue = hjstate->hj_CurHashValue;
    2059              : 
    2060              :     /*
    2061              :      * hj_CurTuple is the address of the tuple last returned from the current
    2062              :      * bucket, or NULL if it's time to start scanning a new bucket.
    2063              :      */
    2064      2103054 :     if (hashTuple != NULL)
    2065      1020039 :         hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
    2066              :     else
    2067      1083015 :         hashTuple = ExecParallelHashFirstTuple(hashtable,
    2068              :                                                hjstate->hj_CurBucketNo);
    2069              : 
    2070      2769365 :     while (hashTuple != NULL)
    2071              :     {
    2072      1686350 :         if (hashTuple->hashvalue == hashvalue)
    2073              :         {
    2074              :             TupleTableSlot *inntuple;
    2075              : 
    2076              :             /* insert hashtable's tuple into exec slot so ExecQual sees it */
    2077      1020039 :             inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
    2078              :                                              hjstate->hj_HashTupleSlot,
    2079              :                                              false);    /* do not pfree */
    2080      1020039 :             econtext->ecxt_innertuple = inntuple;
    2081              : 
    2082      1020039 :             if (ExecQualAndReset(hjclauses, econtext))
    2083              :             {
    2084      1020039 :                 hjstate->hj_CurTuple = hashTuple;
    2085      1020039 :                 return true;
    2086              :             }
    2087              :         }
    2088              : 
    2089       666311 :         hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
    2090              :     }
    2091              : 
    2092              :     /*
    2093              :      * no match
    2094              :      */
    2095      1083015 :     return false;
    2096              : }
    2097              : 
    2098              : /*
    2099              :  * ExecPrepHashTableForUnmatched
    2100              :  *      set up for a series of ExecScanHashTableForUnmatched calls
    2101              :  */
    2102              : void
    2103         2099 : ExecPrepHashTableForUnmatched(HashJoinState *hjstate)
    2104              : {
    2105              :     /*----------
    2106              :      * During this scan we use the HashJoinState fields as follows:
    2107              :      *
    2108              :      * hj_CurBucketNo: next regular bucket to scan
    2109              :      * hj_CurSkewBucketNo: next skew bucket (an index into skewBucketNums)
    2110              :      * hj_CurTuple: last tuple returned, or NULL to start next bucket
    2111              :      *----------
    2112              :      */
    2113         2099 :     hjstate->hj_CurBucketNo = 0;
    2114         2099 :     hjstate->hj_CurSkewBucketNo = 0;
    2115         2099 :     hjstate->hj_CurTuple = NULL;
    2116         2099 : }
    2117              : 
    2118              : /*
    2119              :  * Decide if this process is allowed to run the unmatched scan.  If so, the
    2120              :  * batch barrier is advanced to PHJ_BATCH_SCAN and true is returned.
    2121              :  * Otherwise the batch is detached and false is returned.
    2122              :  */
    2123              : bool
    2124           50 : ExecParallelPrepHashTableForUnmatched(HashJoinState *hjstate)
    2125              : {
    2126           50 :     HashJoinTable hashtable = hjstate->hj_HashTable;
    2127           50 :     int         curbatch = hashtable->curbatch;
    2128           50 :     ParallelHashJoinBatch *batch = hashtable->batches[curbatch].shared;
    2129              : 
    2130              :     Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE);
    2131              : 
    2132              :     /*
    2133              :      * It would not be deadlock-free to wait on the batch barrier, because it
    2134              :      * is in PHJ_BATCH_PROBE phase, and thus processes attached to it have
    2135              :      * already emitted tuples.  Therefore, we'll hold a wait-free election:
    2136              :      * only one process can continue to the next phase, and all others detach
    2137              :      * from this batch.  They can still go any work on other batches, if there
    2138              :      * are any.
    2139              :      */
    2140           50 :     if (!BarrierArriveAndDetachExceptLast(&batch->batch_barrier))
    2141              :     {
    2142              :         /* This process considers the batch to be done. */
    2143           17 :         hashtable->batches[hashtable->curbatch].done = true;
    2144              : 
    2145              :         /* Make sure any temporary files are closed. */
    2146           17 :         sts_end_parallel_scan(hashtable->batches[curbatch].inner_tuples);
    2147           17 :         sts_end_parallel_scan(hashtable->batches[curbatch].outer_tuples);
    2148              : 
    2149              :         /*
    2150              :          * Track largest batch we've seen, which would normally happen in
    2151              :          * ExecHashTableDetachBatch().
    2152              :          */
    2153           17 :         hashtable->spacePeak =
    2154           17 :             Max(hashtable->spacePeak,
    2155              :                 batch->size + sizeof(dsa_pointer_atomic) * hashtable->nbuckets);
    2156           17 :         hashtable->curbatch = -1;
    2157           17 :         return false;
    2158              :     }
    2159              : 
    2160              :     /* Now we are alone with this batch. */
    2161              :     Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_SCAN);
    2162              : 
    2163              :     /*
    2164              :      * Has another process decided to give up early and command all processes
    2165              :      * to skip the unmatched scan?
    2166              :      */
    2167           33 :     if (batch->skip_unmatched)
    2168              :     {
    2169            0 :         hashtable->batches[hashtable->curbatch].done = true;
    2170            0 :         ExecHashTableDetachBatch(hashtable);
    2171            0 :         return false;
    2172              :     }
    2173              : 
    2174              :     /* Now prepare the process local state, just as for non-parallel join. */
    2175           33 :     ExecPrepHashTableForUnmatched(hjstate);
    2176              : 
    2177           33 :     return true;
    2178              : }
    2179              : 
    2180              : /*
    2181              :  * ExecScanHashTableForUnmatched
    2182              :  *      scan the hash table for unmatched inner tuples
    2183              :  *
    2184              :  * On success, the inner tuple is stored into hjstate->hj_CurTuple and
    2185              :  * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
    2186              :  * for the latter.
    2187              :  */
    2188              : bool
    2189       217395 : ExecScanHashTableForUnmatched(HashJoinState *hjstate, ExprContext *econtext)
    2190              : {
    2191       217395 :     HashJoinTable hashtable = hjstate->hj_HashTable;
    2192       217395 :     HashJoinTuple hashTuple = hjstate->hj_CurTuple;
    2193              : 
    2194              :     for (;;)
    2195              :     {
    2196              :         /*
    2197              :          * hj_CurTuple is the address of the tuple last returned from the
    2198              :          * current bucket, or NULL if it's time to start scanning a new
    2199              :          * bucket.
    2200              :          */
    2201      2906661 :         if (hashTuple != NULL)
    2202       215329 :             hashTuple = hashTuple->next.unshared;
    2203      2691332 :         else if (hjstate->hj_CurBucketNo < hashtable->nbuckets)
    2204              :         {
    2205      2689272 :             hashTuple = hashtable->buckets.unshared[hjstate->hj_CurBucketNo];
    2206      2689272 :             hjstate->hj_CurBucketNo++;
    2207              :         }
    2208         2060 :         else if (hjstate->hj_CurSkewBucketNo < hashtable->nSkewBuckets)
    2209              :         {
    2210            0 :             int         j = hashtable->skewBucketNums[hjstate->hj_CurSkewBucketNo];
    2211              : 
    2212            0 :             hashTuple = hashtable->skewBucket[j]->tuples;
    2213            0 :             hjstate->hj_CurSkewBucketNo++;
    2214              :         }
    2215              :         else
    2216         2060 :             break;              /* finished all buckets */
    2217              : 
    2218      3108653 :         while (hashTuple != NULL)
    2219              :         {
    2220       419387 :             if (!HeapTupleHeaderHasMatch(HJTUPLE_MINTUPLE(hashTuple)))
    2221              :             {
    2222              :                 TupleTableSlot *inntuple;
    2223              : 
    2224              :                 /* insert hashtable's tuple into exec slot */
    2225       215335 :                 inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
    2226              :                                                  hjstate->hj_HashTupleSlot,
    2227              :                                                  false);    /* do not pfree */
    2228       215335 :                 econtext->ecxt_innertuple = inntuple;
    2229              : 
    2230              :                 /*
    2231              :                  * Reset temp memory each time; although this function doesn't
    2232              :                  * do any qual eval, the caller will, so let's keep it
    2233              :                  * parallel to ExecScanHashBucket.
    2234              :                  */
    2235       215335 :                 ResetExprContext(econtext);
    2236              : 
    2237       215335 :                 hjstate->hj_CurTuple = hashTuple;
    2238       215335 :                 return true;
    2239              :             }
    2240              : 
    2241       204052 :             hashTuple = hashTuple->next.unshared;
    2242              :         }
    2243              : 
    2244              :         /* allow this loop to be cancellable */
    2245      2689266 :         CHECK_FOR_INTERRUPTS();
    2246              :     }
    2247              : 
    2248              :     /*
    2249              :      * no more unmatched tuples
    2250              :      */
    2251         2060 :     return false;
    2252              : }
    2253              : 
    2254              : /*
    2255              :  * ExecParallelScanHashTableForUnmatched
    2256              :  *      scan the hash table for unmatched inner tuples, in parallel join
    2257              :  *
    2258              :  * On success, the inner tuple is stored into hjstate->hj_CurTuple and
    2259              :  * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
    2260              :  * for the latter.
    2261              :  */
    2262              : bool
    2263        60036 : ExecParallelScanHashTableForUnmatched(HashJoinState *hjstate,
    2264              :                                       ExprContext *econtext)
    2265              : {
    2266        60036 :     HashJoinTable hashtable = hjstate->hj_HashTable;
    2267        60036 :     HashJoinTuple hashTuple = hjstate->hj_CurTuple;
    2268              : 
    2269              :     for (;;)
    2270              :     {
    2271              :         /*
    2272              :          * hj_CurTuple is the address of the tuple last returned from the
    2273              :          * current bucket, or NULL if it's time to start scanning a new
    2274              :          * bucket.
    2275              :          */
    2276       367236 :         if (hashTuple != NULL)
    2277        60003 :             hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
    2278       307233 :         else if (hjstate->hj_CurBucketNo < hashtable->nbuckets)
    2279       307200 :             hashTuple = ExecParallelHashFirstTuple(hashtable,
    2280       307200 :                                                    hjstate->hj_CurBucketNo++);
    2281              :         else
    2282           33 :             break;              /* finished all buckets */
    2283              : 
    2284       487203 :         while (hashTuple != NULL)
    2285              :         {
    2286       180003 :             if (!HeapTupleHeaderHasMatch(HJTUPLE_MINTUPLE(hashTuple)))
    2287              :             {
    2288              :                 TupleTableSlot *inntuple;
    2289              : 
    2290              :                 /* insert hashtable's tuple into exec slot */
    2291        60003 :                 inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
    2292              :                                                  hjstate->hj_HashTupleSlot,
    2293              :                                                  false);    /* do not pfree */
    2294        60003 :                 econtext->ecxt_innertuple = inntuple;
    2295              : 
    2296              :                 /*
    2297              :                  * Reset temp memory each time; although this function doesn't
    2298              :                  * do any qual eval, the caller will, so let's keep it
    2299              :                  * parallel to ExecScanHashBucket.
    2300              :                  */
    2301        60003 :                 ResetExprContext(econtext);
    2302              : 
    2303        60003 :                 hjstate->hj_CurTuple = hashTuple;
    2304        60003 :                 return true;
    2305              :             }
    2306              : 
    2307       120000 :             hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
    2308              :         }
    2309              : 
    2310              :         /* allow this loop to be cancellable */
    2311       307200 :         CHECK_FOR_INTERRUPTS();
    2312              :     }
    2313              : 
    2314              :     /*
    2315              :      * no more unmatched tuples
    2316              :      */
    2317           33 :     return false;
    2318              : }
    2319              : 
    2320              : /*
    2321              :  * ExecHashTableReset
    2322              :  *
    2323              :  *      reset hash table header for new batch
    2324              :  */
    2325              : void
    2326          619 : ExecHashTableReset(HashJoinTable hashtable)
    2327              : {
    2328              :     MemoryContext oldcxt;
    2329          619 :     int         nbuckets = hashtable->nbuckets;
    2330              : 
    2331              :     /*
    2332              :      * Release all the hash buckets and tuples acquired in the prior pass, and
    2333              :      * reinitialize the context for a new pass.
    2334              :      */
    2335          619 :     MemoryContextReset(hashtable->batchCxt);
    2336          619 :     oldcxt = MemoryContextSwitchTo(hashtable->batchCxt);
    2337              : 
    2338              :     /* Reallocate and reinitialize the hash bucket headers. */
    2339          619 :     hashtable->buckets.unshared = palloc0_array(HashJoinTuple, nbuckets);
    2340              : 
    2341          619 :     hashtable->spaceUsed = 0;
    2342              : 
    2343          619 :     MemoryContextSwitchTo(oldcxt);
    2344              : 
    2345              :     /* Forget the chunks (the memory was freed by the context reset above). */
    2346          619 :     hashtable->chunks = NULL;
    2347          619 : }
    2348              : 
    2349              : /*
    2350              :  * ExecHashTableResetMatchFlags
    2351              :  *      Clear all the HeapTupleHeaderHasMatch flags in the table
    2352              :  */
    2353              : void
    2354           62 : ExecHashTableResetMatchFlags(HashJoinTable hashtable)
    2355              : {
    2356              :     HashJoinTuple tuple;
    2357              :     int         i;
    2358              : 
    2359              :     /* Reset all flags in the main table ... */
    2360        63550 :     for (i = 0; i < hashtable->nbuckets; i++)
    2361              :     {
    2362        63694 :         for (tuple = hashtable->buckets.unshared[i]; tuple != NULL;
    2363          206 :              tuple = tuple->next.unshared)
    2364          206 :             HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(tuple));
    2365              :     }
    2366              : 
    2367              :     /* ... and the same for the skew buckets, if any */
    2368           62 :     for (i = 0; i < hashtable->nSkewBuckets; i++)
    2369              :     {
    2370            0 :         int         j = hashtable->skewBucketNums[i];
    2371            0 :         HashSkewBucket *skewBucket = hashtable->skewBucket[j];
    2372              : 
    2373            0 :         for (tuple = skewBucket->tuples; tuple != NULL; tuple = tuple->next.unshared)
    2374            0 :             HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(tuple));
    2375              :     }
    2376           62 : }
    2377              : 
    2378              : 
    2379              : void
    2380          945 : ExecReScanHash(HashState *node)
    2381              : {
    2382          945 :     PlanState  *outerPlan = outerPlanState(node);
    2383              : 
    2384              :     /*
    2385              :      * if chgParam of subnode is not null then plan will be re-scanned by
    2386              :      * first ExecProcNode.
    2387              :      */
    2388          945 :     if (outerPlan->chgParam == NULL)
    2389           15 :         ExecReScan(outerPlan);
    2390          945 : }
    2391              : 
    2392              : 
    2393              : /*
    2394              :  * ExecHashBuildSkewHash
    2395              :  *
    2396              :  *      Set up for skew optimization if we can identify the most common values
    2397              :  *      (MCVs) of the outer relation's join key.  We make a skew hash bucket
    2398              :  *      for the hash value of each MCV, up to the number of slots allowed
    2399              :  *      based on available memory.
    2400              :  */
    2401              : static void
    2402           63 : ExecHashBuildSkewHash(HashState *hashstate, HashJoinTable hashtable,
    2403              :                       Hash *node, int mcvsToUse)
    2404              : {
    2405              :     HeapTupleData *statsTuple;
    2406              :     AttStatsSlot sslot;
    2407              : 
    2408              :     /* Do nothing if planner didn't identify the outer relation's join key */
    2409           63 :     if (!OidIsValid(node->skewTable))
    2410            0 :         return;
    2411              :     /* Also, do nothing if we don't have room for at least one skew bucket */
    2412           63 :     if (mcvsToUse <= 0)
    2413            0 :         return;
    2414              : 
    2415              :     /*
    2416              :      * Try to find the MCV statistics for the outer relation's join key.
    2417              :      */
    2418           63 :     statsTuple = SearchSysCache3(STATRELATTINH,
    2419              :                                  ObjectIdGetDatum(node->skewTable),
    2420           63 :                                  Int16GetDatum(node->skewColumn),
    2421           63 :                                  BoolGetDatum(node->skewInherit));
    2422           63 :     if (!HeapTupleIsValid(statsTuple))
    2423            0 :         return;
    2424              : 
    2425           63 :     if (get_attstatsslot(&sslot, statsTuple,
    2426              :                          STATISTIC_KIND_MCV, InvalidOid,
    2427              :                          ATTSTATSSLOT_VALUES | ATTSTATSSLOT_NUMBERS))
    2428              :     {
    2429              :         double      frac;
    2430              :         int         nbuckets;
    2431              :         int         i;
    2432              : 
    2433            3 :         if (mcvsToUse > sslot.nvalues)
    2434            0 :             mcvsToUse = sslot.nvalues;
    2435              : 
    2436              :         /*
    2437              :          * Calculate the expected fraction of outer relation that will
    2438              :          * participate in the skew optimization.  If this isn't at least
    2439              :          * SKEW_MIN_OUTER_FRACTION, don't use skew optimization.
    2440              :          */
    2441            3 :         frac = 0;
    2442           66 :         for (i = 0; i < mcvsToUse; i++)
    2443           63 :             frac += sslot.numbers[i];
    2444            3 :         if (frac < SKEW_MIN_OUTER_FRACTION)
    2445              :         {
    2446            0 :             free_attstatsslot(&sslot);
    2447            0 :             ReleaseSysCache(statsTuple);
    2448            0 :             return;
    2449              :         }
    2450              : 
    2451              :         /*
    2452              :          * Okay, set up the skew hashtable.
    2453              :          *
    2454              :          * skewBucket[] is an open addressing hashtable with a power of 2 size
    2455              :          * that is greater than the number of MCV values.  (This ensures there
    2456              :          * will be at least one null entry, so searches will always
    2457              :          * terminate.)
    2458              :          *
    2459              :          * Note: this code could fail if mcvsToUse exceeds INT_MAX/8 or
    2460              :          * MaxAllocSize/sizeof(void *)/8, but that is not currently possible
    2461              :          * since we limit pg_statistic entries to much less than that.
    2462              :          */
    2463            3 :         nbuckets = pg_nextpower2_32(mcvsToUse + 1);
    2464              :         /* use two more bits just to help avoid collisions */
    2465            3 :         nbuckets <<= 2;
    2466              : 
    2467            3 :         hashtable->skewEnabled = true;
    2468            3 :         hashtable->skewBucketLen = nbuckets;
    2469              : 
    2470              :         /*
    2471              :          * We allocate the bucket memory in the hashtable's batch context. It
    2472              :          * is only needed during the first batch, and this ensures it will be
    2473              :          * automatically removed once the first batch is done.
    2474              :          */
    2475            3 :         hashtable->skewBucket = (HashSkewBucket **)
    2476            3 :             MemoryContextAllocZero(hashtable->batchCxt,
    2477              :                                    nbuckets * sizeof(HashSkewBucket *));
    2478            3 :         hashtable->skewBucketNums = (int *)
    2479            3 :             MemoryContextAllocZero(hashtable->batchCxt,
    2480              :                                    mcvsToUse * sizeof(int));
    2481              : 
    2482            3 :         hashtable->spaceUsed += nbuckets * sizeof(HashSkewBucket *)
    2483            3 :             + mcvsToUse * sizeof(int);
    2484            3 :         hashtable->spaceUsedSkew += nbuckets * sizeof(HashSkewBucket *)
    2485            3 :             + mcvsToUse * sizeof(int);
    2486            3 :         if (hashtable->spaceUsed > hashtable->spacePeak)
    2487            3 :             hashtable->spacePeak = hashtable->spaceUsed;
    2488              : 
    2489              :         /*
    2490              :          * Create a skew bucket for each MCV hash value.
    2491              :          *
    2492              :          * Note: it is very important that we create the buckets in order of
    2493              :          * decreasing MCV frequency.  If we have to remove some buckets, they
    2494              :          * must be removed in reverse order of creation (see notes in
    2495              :          * ExecHashRemoveNextSkewBucket) and we want the least common MCVs to
    2496              :          * be removed first.
    2497              :          */
    2498              : 
    2499           66 :         for (i = 0; i < mcvsToUse; i++)
    2500              :         {
    2501              :             uint32      hashvalue;
    2502              :             int         bucket;
    2503              : 
    2504           63 :             hashvalue = DatumGetUInt32(FunctionCall1Coll(hashstate->skew_hashfunction,
    2505              :                                                          hashstate->skew_collation,
    2506           63 :                                                          sslot.values[i]));
    2507              : 
    2508              :             /*
    2509              :              * While we have not hit a hole in the hashtable and have not hit
    2510              :              * the desired bucket, we have collided with some previous hash
    2511              :              * value, so try the next bucket location.  NB: this code must
    2512              :              * match ExecHashGetSkewBucket.
    2513              :              */
    2514           63 :             bucket = hashvalue & (nbuckets - 1);
    2515           63 :             while (hashtable->skewBucket[bucket] != NULL &&
    2516            0 :                    hashtable->skewBucket[bucket]->hashvalue != hashvalue)
    2517            0 :                 bucket = (bucket + 1) & (nbuckets - 1);
    2518              : 
    2519              :             /*
    2520              :              * If we found an existing bucket with the same hashvalue, leave
    2521              :              * it alone.  It's okay for two MCVs to share a hashvalue.
    2522              :              */
    2523           63 :             if (hashtable->skewBucket[bucket] != NULL)
    2524            0 :                 continue;
    2525              : 
    2526              :             /* Okay, create a new skew bucket for this hashvalue. */
    2527          126 :             hashtable->skewBucket[bucket] = (HashSkewBucket *)
    2528           63 :                 MemoryContextAlloc(hashtable->batchCxt,
    2529              :                                    sizeof(HashSkewBucket));
    2530           63 :             hashtable->skewBucket[bucket]->hashvalue = hashvalue;
    2531           63 :             hashtable->skewBucket[bucket]->tuples = NULL;
    2532           63 :             hashtable->skewBucketNums[hashtable->nSkewBuckets] = bucket;
    2533           63 :             hashtable->nSkewBuckets++;
    2534           63 :             hashtable->spaceUsed += SKEW_BUCKET_OVERHEAD;
    2535           63 :             hashtable->spaceUsedSkew += SKEW_BUCKET_OVERHEAD;
    2536           63 :             if (hashtable->spaceUsed > hashtable->spacePeak)
    2537           63 :                 hashtable->spacePeak = hashtable->spaceUsed;
    2538              :         }
    2539              : 
    2540            3 :         free_attstatsslot(&sslot);
    2541              :     }
    2542              : 
    2543           63 :     ReleaseSysCache(statsTuple);
    2544              : }
    2545              : 
    2546              : /*
    2547              :  * ExecHashGetSkewBucket
    2548              :  *
    2549              :  *      Returns the index of the skew bucket for this hashvalue,
    2550              :  *      or INVALID_SKEW_BUCKET_NO if the hashvalue is not
    2551              :  *      associated with any active skew bucket.
    2552              :  */
    2553              : int
    2554     15330927 : ExecHashGetSkewBucket(HashJoinTable hashtable, uint32 hashvalue)
    2555              : {
    2556              :     int         bucket;
    2557              : 
    2558              :     /*
    2559              :      * Always return INVALID_SKEW_BUCKET_NO if not doing skew optimization (in
    2560              :      * particular, this happens after the initial batch is done).
    2561              :      */
    2562     15330927 :     if (!hashtable->skewEnabled)
    2563     15270927 :         return INVALID_SKEW_BUCKET_NO;
    2564              : 
    2565              :     /*
    2566              :      * Since skewBucketLen is a power of 2, we can do a modulo by ANDing.
    2567              :      */
    2568        60000 :     bucket = hashvalue & (hashtable->skewBucketLen - 1);
    2569              : 
    2570              :     /*
    2571              :      * While we have not hit a hole in the hashtable and have not hit the
    2572              :      * desired bucket, we have collided with some other hash value, so try the
    2573              :      * next bucket location.
    2574              :      */
    2575        63915 :     while (hashtable->skewBucket[bucket] != NULL &&
    2576         5409 :            hashtable->skewBucket[bucket]->hashvalue != hashvalue)
    2577         3915 :         bucket = (bucket + 1) & (hashtable->skewBucketLen - 1);
    2578              : 
    2579              :     /*
    2580              :      * Found the desired bucket?
    2581              :      */
    2582        60000 :     if (hashtable->skewBucket[bucket] != NULL)
    2583         1494 :         return bucket;
    2584              : 
    2585              :     /*
    2586              :      * There must not be any hashtable entry for this hash value.
    2587              :      */
    2588        58506 :     return INVALID_SKEW_BUCKET_NO;
    2589              : }
    2590              : 
    2591              : /*
    2592              :  * ExecHashSkewTableInsert
    2593              :  *
    2594              :  *      Insert a tuple into the skew hashtable.
    2595              :  *
    2596              :  * This should generally match up with the current-batch case in
    2597              :  * ExecHashTableInsert.
    2598              :  */
    2599              : static void
    2600          294 : ExecHashSkewTableInsert(HashJoinTable hashtable,
    2601              :                         TupleTableSlot *slot,
    2602              :                         uint32 hashvalue,
    2603              :                         int bucketNumber)
    2604              : {
    2605              :     bool        shouldFree;
    2606          294 :     MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
    2607              :     HashJoinTuple hashTuple;
    2608              :     int         hashTupleSize;
    2609              : 
    2610              :     /* Create the HashJoinTuple */
    2611          294 :     hashTupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
    2612          294 :     hashTuple = (HashJoinTuple) MemoryContextAlloc(hashtable->batchCxt,
    2613              :                                                    hashTupleSize);
    2614          294 :     hashTuple->hashvalue = hashvalue;
    2615          294 :     memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
    2616          294 :     HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
    2617              : 
    2618              :     /* Push it onto the front of the skew bucket's list */
    2619          294 :     hashTuple->next.unshared = hashtable->skewBucket[bucketNumber]->tuples;
    2620          294 :     hashtable->skewBucket[bucketNumber]->tuples = hashTuple;
    2621              :     Assert(hashTuple != hashTuple->next.unshared);
    2622              : 
    2623              :     /* Account for space used, and back off if we've used too much */
    2624          294 :     hashtable->spaceUsed += hashTupleSize;
    2625          294 :     hashtable->spaceUsedSkew += hashTupleSize;
    2626          294 :     if (hashtable->spaceUsed > hashtable->spacePeak)
    2627          216 :         hashtable->spacePeak = hashtable->spaceUsed;
    2628          345 :     while (hashtable->spaceUsedSkew > hashtable->spaceAllowedSkew)
    2629           51 :         ExecHashRemoveNextSkewBucket(hashtable);
    2630              : 
    2631              :     /* Check we are not over the total spaceAllowed, either */
    2632          294 :     if (hashtable->spaceUsed > hashtable->spaceAllowed)
    2633            0 :         ExecHashIncreaseNumBatches(hashtable);
    2634              : 
    2635          294 :     if (shouldFree)
    2636          294 :         heap_free_minimal_tuple(tuple);
    2637          294 : }
    2638              : 
    2639              : /*
    2640              :  *      ExecHashRemoveNextSkewBucket
    2641              :  *
    2642              :  *      Remove the least valuable skew bucket by pushing its tuples into
    2643              :  *      the main hash table.
    2644              :  */
    2645              : static void
    2646           51 : ExecHashRemoveNextSkewBucket(HashJoinTable hashtable)
    2647              : {
    2648              :     int         bucketToRemove;
    2649              :     HashSkewBucket *bucket;
    2650              :     uint32      hashvalue;
    2651              :     int         bucketno;
    2652              :     int         batchno;
    2653              :     HashJoinTuple hashTuple;
    2654              : 
    2655              :     /* Locate the bucket to remove */
    2656           51 :     bucketToRemove = hashtable->skewBucketNums[hashtable->nSkewBuckets - 1];
    2657           51 :     bucket = hashtable->skewBucket[bucketToRemove];
    2658              : 
    2659              :     /*
    2660              :      * Calculate which bucket and batch the tuples belong to in the main
    2661              :      * hashtable.  They all have the same hash value, so it's the same for all
    2662              :      * of them.  Also note that it's not possible for nbatch to increase while
    2663              :      * we are processing the tuples.
    2664              :      */
    2665           51 :     hashvalue = bucket->hashvalue;
    2666           51 :     ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
    2667              : 
    2668              :     /* Process all tuples in the bucket */
    2669           51 :     hashTuple = bucket->tuples;
    2670          225 :     while (hashTuple != NULL)
    2671              :     {
    2672          174 :         HashJoinTuple nextHashTuple = hashTuple->next.unshared;
    2673              :         MinimalTuple tuple;
    2674              :         Size        tupleSize;
    2675              : 
    2676              :         /*
    2677              :          * This code must agree with ExecHashTableInsert.  We do not use
    2678              :          * ExecHashTableInsert directly as ExecHashTableInsert expects a
    2679              :          * TupleTableSlot while we already have HashJoinTuples.
    2680              :          */
    2681          174 :         tuple = HJTUPLE_MINTUPLE(hashTuple);
    2682          174 :         tupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
    2683              : 
    2684              :         /* Decide whether to put the tuple in the hash table or a temp file */
    2685          174 :         if (batchno == hashtable->curbatch)
    2686              :         {
    2687              :             /* Move the tuple to the main hash table */
    2688              :             HashJoinTuple copyTuple;
    2689              : 
    2690              :             /*
    2691              :              * We must copy the tuple into the dense storage, else it will not
    2692              :              * be found by, eg, ExecHashIncreaseNumBatches.
    2693              :              */
    2694           69 :             copyTuple = (HashJoinTuple) dense_alloc(hashtable, tupleSize);
    2695           69 :             memcpy(copyTuple, hashTuple, tupleSize);
    2696           69 :             pfree(hashTuple);
    2697              : 
    2698           69 :             copyTuple->next.unshared = hashtable->buckets.unshared[bucketno];
    2699           69 :             hashtable->buckets.unshared[bucketno] = copyTuple;
    2700              : 
    2701              :             /* We have reduced skew space, but overall space doesn't change */
    2702           69 :             hashtable->spaceUsedSkew -= tupleSize;
    2703              :         }
    2704              :         else
    2705              :         {
    2706              :             /* Put the tuple into a temp file for later batches */
    2707              :             Assert(batchno > hashtable->curbatch);
    2708          105 :             ExecHashJoinSaveTuple(tuple, hashvalue,
    2709          105 :                                   &hashtable->innerBatchFile[batchno],
    2710              :                                   hashtable);
    2711          105 :             pfree(hashTuple);
    2712          105 :             hashtable->spaceUsed -= tupleSize;
    2713          105 :             hashtable->spaceUsedSkew -= tupleSize;
    2714              :         }
    2715              : 
    2716          174 :         hashTuple = nextHashTuple;
    2717              : 
    2718              :         /* allow this loop to be cancellable */
    2719          174 :         CHECK_FOR_INTERRUPTS();
    2720              :     }
    2721              : 
    2722              :     /*
    2723              :      * Free the bucket struct itself and reset the hashtable entry to NULL.
    2724              :      *
    2725              :      * NOTE: this is not nearly as simple as it looks on the surface, because
    2726              :      * of the possibility of collisions in the hashtable.  Suppose that hash
    2727              :      * values A and B collide at a particular hashtable entry, and that A was
    2728              :      * entered first so B gets shifted to a different table entry.  If we were
    2729              :      * to remove A first then ExecHashGetSkewBucket would mistakenly start
    2730              :      * reporting that B is not in the hashtable, because it would hit the NULL
    2731              :      * before finding B.  However, we always remove entries in the reverse
    2732              :      * order of creation, so this failure cannot happen.
    2733              :      */
    2734           51 :     hashtable->skewBucket[bucketToRemove] = NULL;
    2735           51 :     hashtable->nSkewBuckets--;
    2736           51 :     pfree(bucket);
    2737           51 :     hashtable->spaceUsed -= SKEW_BUCKET_OVERHEAD;
    2738           51 :     hashtable->spaceUsedSkew -= SKEW_BUCKET_OVERHEAD;
    2739              : 
    2740              :     /*
    2741              :      * If we have removed all skew buckets then give up on skew optimization.
    2742              :      * Release the arrays since they aren't useful any more.
    2743              :      */
    2744           51 :     if (hashtable->nSkewBuckets == 0)
    2745              :     {
    2746            0 :         hashtable->skewEnabled = false;
    2747            0 :         pfree(hashtable->skewBucket);
    2748            0 :         pfree(hashtable->skewBucketNums);
    2749            0 :         hashtable->skewBucket = NULL;
    2750            0 :         hashtable->skewBucketNums = NULL;
    2751            0 :         hashtable->spaceUsed -= hashtable->spaceUsedSkew;
    2752            0 :         hashtable->spaceUsedSkew = 0;
    2753              :     }
    2754           51 : }
    2755              : 
    2756              : /*
    2757              :  * Reserve space in the DSM segment for instrumentation data.
    2758              :  */
    2759              : void
    2760           99 : ExecHashEstimate(HashState *node, ParallelContext *pcxt)
    2761              : {
    2762              :     size_t      size;
    2763              : 
    2764              :     /* don't need this if not instrumenting or no workers */
    2765           99 :     if (!node->ps.instrument || pcxt->nworkers == 0)
    2766           57 :         return;
    2767              : 
    2768           42 :     size = mul_size(pcxt->nworkers, sizeof(HashInstrumentation));
    2769           42 :     size = add_size(size, offsetof(SharedHashInfo, hinstrument));
    2770           42 :     shm_toc_estimate_chunk(&pcxt->estimator, size);
    2771           42 :     shm_toc_estimate_keys(&pcxt->estimator, 1);
    2772              : }
    2773              : 
    2774              : /*
    2775              :  * Set up a space in the DSM for all workers to record instrumentation data
    2776              :  * about their hash table.
    2777              :  */
    2778              : void
    2779           99 : ExecHashInitializeDSM(HashState *node, ParallelContext *pcxt)
    2780              : {
    2781              :     size_t      size;
    2782              : 
    2783              :     /* don't need this if not instrumenting or no workers */
    2784           99 :     if (!node->ps.instrument || pcxt->nworkers == 0)
    2785           57 :         return;
    2786              : 
    2787           42 :     size = offsetof(SharedHashInfo, hinstrument) +
    2788           42 :         pcxt->nworkers * sizeof(HashInstrumentation);
    2789           42 :     node->shared_info = (SharedHashInfo *) shm_toc_allocate(pcxt->toc, size);
    2790              : 
    2791              :     /* Each per-worker area must start out as zeroes. */
    2792           42 :     memset(node->shared_info, 0, size);
    2793              : 
    2794           42 :     node->shared_info->num_workers = pcxt->nworkers;
    2795           42 :     shm_toc_insert(pcxt->toc, node->ps.plan->plan_node_id,
    2796           42 :                    node->shared_info);
    2797              : }
    2798              : 
    2799              : /*
    2800              :  * Locate the DSM space for hash table instrumentation data that we'll write
    2801              :  * to at shutdown time.
    2802              :  */
    2803              : void
    2804          279 : ExecHashInitializeWorker(HashState *node, ParallelWorkerContext *pwcxt)
    2805              : {
    2806              :     SharedHashInfo *shared_info;
    2807              : 
    2808              :     /* don't need this if not instrumenting */
    2809          279 :     if (!node->ps.instrument)
    2810          153 :         return;
    2811              : 
    2812              :     /*
    2813              :      * Find our entry in the shared area, and set up a pointer to it so that
    2814              :      * we'll accumulate stats there when shutting down or rebuilding the hash
    2815              :      * table.
    2816              :      */
    2817              :     shared_info = (SharedHashInfo *)
    2818          126 :         shm_toc_lookup(pwcxt->toc, node->ps.plan->plan_node_id, false);
    2819          126 :     node->hinstrument = &shared_info->hinstrument[ParallelWorkerNumber];
    2820              : }
    2821              : 
    2822              : /*
    2823              :  * Collect EXPLAIN stats if needed, saving them into DSM memory if
    2824              :  * ExecHashInitializeWorker was called, or local storage if not.  In the
    2825              :  * parallel case, this must be done in ExecShutdownHash() rather than
    2826              :  * ExecEndHash() because the latter runs after we've detached from the DSM
    2827              :  * segment.
    2828              :  */
    2829              : void
    2830        16541 : ExecShutdownHash(HashState *node)
    2831              : {
    2832              :     /* Allocate save space if EXPLAIN'ing and we didn't do so already */
    2833        16541 :     if (node->ps.instrument && !node->hinstrument)
    2834           60 :         node->hinstrument = palloc0_object(HashInstrumentation);
    2835              :     /* Now accumulate data for the current (final) hash table */
    2836        16541 :     if (node->hinstrument && node->hashtable)
    2837          171 :         ExecHashAccumInstrumentation(node->hinstrument, node->hashtable);
    2838        16541 : }
    2839              : 
    2840              : /*
    2841              :  * Retrieve instrumentation data from workers before the DSM segment is
    2842              :  * detached, so that EXPLAIN can access it.
    2843              :  */
    2844              : void
    2845           42 : ExecHashRetrieveInstrumentation(HashState *node)
    2846              : {
    2847           42 :     SharedHashInfo *shared_info = node->shared_info;
    2848              :     size_t      size;
    2849              : 
    2850           42 :     if (shared_info == NULL)
    2851            0 :         return;
    2852              : 
    2853              :     /* Replace node->shared_info with a copy in backend-local memory. */
    2854           42 :     size = offsetof(SharedHashInfo, hinstrument) +
    2855           42 :         shared_info->num_workers * sizeof(HashInstrumentation);
    2856           42 :     node->shared_info = palloc(size);
    2857           42 :     memcpy(node->shared_info, shared_info, size);
    2858              : }
    2859              : 
    2860              : /*
    2861              :  * Accumulate instrumentation data from 'hashtable' into an
    2862              :  * initially-zeroed HashInstrumentation struct.
    2863              :  *
    2864              :  * This is used to merge information across successive hash table instances
    2865              :  * within a single plan node.  We take the maximum values of each interesting
    2866              :  * number.  The largest nbuckets and largest nbatch values might have occurred
    2867              :  * in different instances, so there's some risk of confusion from reporting
    2868              :  * unrelated numbers; but there's a bigger risk of misdiagnosing a performance
    2869              :  * issue if we don't report the largest values.  Similarly, we want to report
    2870              :  * the largest spacePeak regardless of whether it happened in the same
    2871              :  * instance as the largest nbuckets or nbatch.  All the instances should have
    2872              :  * the same nbuckets_original and nbatch_original; but there's little value
    2873              :  * in depending on that here, so handle them the same way.
    2874              :  */
    2875              : void
    2876          171 : ExecHashAccumInstrumentation(HashInstrumentation *instrument,
    2877              :                              HashJoinTable hashtable)
    2878              : {
    2879          171 :     instrument->nbuckets = Max(instrument->nbuckets,
    2880              :                                hashtable->nbuckets);
    2881          171 :     instrument->nbuckets_original = Max(instrument->nbuckets_original,
    2882              :                                         hashtable->nbuckets_original);
    2883          171 :     instrument->nbatch = Max(instrument->nbatch,
    2884              :                              hashtable->nbatch);
    2885          171 :     instrument->nbatch_original = Max(instrument->nbatch_original,
    2886              :                                       hashtable->nbatch_original);
    2887          171 :     instrument->space_peak = Max(instrument->space_peak,
    2888              :                                  hashtable->spacePeak);
    2889          171 : }
    2890              : 
    2891              : /*
    2892              :  * Allocate 'size' bytes from the currently active HashMemoryChunk
    2893              :  */
    2894              : static void *
    2895      4703836 : dense_alloc(HashJoinTable hashtable, Size size)
    2896              : {
    2897              :     HashMemoryChunk newChunk;
    2898              :     char       *ptr;
    2899              : 
    2900              :     /* just in case the size is not already aligned properly */
    2901      4703836 :     size = MAXALIGN(size);
    2902              : 
    2903              :     /*
    2904              :      * If tuple size is larger than threshold, allocate a separate chunk.
    2905              :      */
    2906      4703836 :     if (size > HASH_CHUNK_THRESHOLD)
    2907              :     {
    2908              :         /* allocate new chunk and put it at the beginning of the list */
    2909            0 :         newChunk = (HashMemoryChunk) MemoryContextAlloc(hashtable->batchCxt,
    2910              :                                                         HASH_CHUNK_HEADER_SIZE + size);
    2911            0 :         newChunk->maxlen = size;
    2912            0 :         newChunk->used = size;
    2913            0 :         newChunk->ntuples = 1;
    2914              : 
    2915              :         /*
    2916              :          * Add this chunk to the list after the first existing chunk, so that
    2917              :          * we don't lose the remaining space in the "current" chunk.
    2918              :          */
    2919            0 :         if (hashtable->chunks != NULL)
    2920              :         {
    2921            0 :             newChunk->next = hashtable->chunks->next;
    2922            0 :             hashtable->chunks->next.unshared = newChunk;
    2923              :         }
    2924              :         else
    2925              :         {
    2926            0 :             newChunk->next.unshared = hashtable->chunks;
    2927            0 :             hashtable->chunks = newChunk;
    2928              :         }
    2929              : 
    2930            0 :         return HASH_CHUNK_DATA(newChunk);
    2931              :     }
    2932              : 
    2933              :     /*
    2934              :      * See if we have enough space for it in the current chunk (if any). If
    2935              :      * not, allocate a fresh chunk.
    2936              :      */
    2937      4703836 :     if ((hashtable->chunks == NULL) ||
    2938      4691349 :         (hashtable->chunks->maxlen - hashtable->chunks->used) < size)
    2939              :     {
    2940              :         /* allocate new chunk and put it at the beginning of the list */
    2941        18367 :         newChunk = (HashMemoryChunk) MemoryContextAlloc(hashtable->batchCxt,
    2942              :                                                         HASH_CHUNK_HEADER_SIZE + HASH_CHUNK_SIZE);
    2943              : 
    2944        18367 :         newChunk->maxlen = HASH_CHUNK_SIZE;
    2945        18367 :         newChunk->used = size;
    2946        18367 :         newChunk->ntuples = 1;
    2947              : 
    2948        18367 :         newChunk->next.unshared = hashtable->chunks;
    2949        18367 :         hashtable->chunks = newChunk;
    2950              : 
    2951        18367 :         return HASH_CHUNK_DATA(newChunk);
    2952              :     }
    2953              : 
    2954              :     /* There is enough space in the current chunk, let's add the tuple */
    2955      4685469 :     ptr = HASH_CHUNK_DATA(hashtable->chunks) + hashtable->chunks->used;
    2956      4685469 :     hashtable->chunks->used += size;
    2957      4685469 :     hashtable->chunks->ntuples += 1;
    2958              : 
    2959              :     /* return pointer to the start of the tuple memory */
    2960      4685469 :     return ptr;
    2961              : }
    2962              : 
    2963              : /*
    2964              :  * Allocate space for a tuple in shared dense storage.  This is equivalent to
    2965              :  * dense_alloc but for Parallel Hash using shared memory.
    2966              :  *
    2967              :  * While loading a tuple into shared memory, we might run out of memory and
    2968              :  * decide to repartition, or determine that the load factor is too high and
    2969              :  * decide to expand the bucket array, or discover that another participant has
    2970              :  * commanded us to help do that.  Return NULL if number of buckets or batches
    2971              :  * has changed, indicating that the caller must retry (considering the
    2972              :  * possibility that the tuple no longer belongs in the same batch).
    2973              :  */
    2974              : static HashJoinTuple
    2975      1190086 : ExecParallelHashTupleAlloc(HashJoinTable hashtable, size_t size,
    2976              :                            dsa_pointer *shared)
    2977              : {
    2978      1190086 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    2979              :     dsa_pointer chunk_shared;
    2980              :     HashMemoryChunk chunk;
    2981              :     Size        chunk_size;
    2982              :     HashJoinTuple result;
    2983      1190086 :     int         curbatch = hashtable->curbatch;
    2984              : 
    2985      1190086 :     size = MAXALIGN(size);
    2986              : 
    2987              :     /*
    2988              :      * Fast path: if there is enough space in this backend's current chunk,
    2989              :      * then we can allocate without any locking.
    2990              :      */
    2991      1190086 :     chunk = hashtable->current_chunk;
    2992      1190086 :     if (chunk != NULL &&
    2993      1189590 :         size <= HASH_CHUNK_THRESHOLD &&
    2994      1189590 :         chunk->maxlen - chunk->used >= size)
    2995              :     {
    2996              : 
    2997      1188248 :         chunk_shared = hashtable->current_chunk_shared;
    2998              :         Assert(chunk == dsa_get_address(hashtable->area, chunk_shared));
    2999      1188248 :         *shared = chunk_shared + HASH_CHUNK_HEADER_SIZE + chunk->used;
    3000      1188248 :         result = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + chunk->used);
    3001      1188248 :         chunk->used += size;
    3002              : 
    3003              :         Assert(chunk->used <= chunk->maxlen);
    3004              :         Assert(result == dsa_get_address(hashtable->area, *shared));
    3005              : 
    3006      1188248 :         return result;
    3007              :     }
    3008              : 
    3009              :     /* Slow path: try to allocate a new chunk. */
    3010         1838 :     LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
    3011              : 
    3012              :     /*
    3013              :      * Check if we need to help increase the number of buckets or batches.
    3014              :      */
    3015         1838 :     if (pstate->growth == PHJ_GROWTH_NEED_MORE_BATCHES ||
    3016         1814 :         pstate->growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
    3017              :     {
    3018           60 :         ParallelHashGrowth growth = pstate->growth;
    3019              : 
    3020           60 :         hashtable->current_chunk = NULL;
    3021           60 :         LWLockRelease(&pstate->lock);
    3022              : 
    3023              :         /* Another participant has commanded us to help grow. */
    3024           60 :         if (growth == PHJ_GROWTH_NEED_MORE_BATCHES)
    3025           24 :             ExecParallelHashIncreaseNumBatches(hashtable);
    3026           36 :         else if (growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
    3027           36 :             ExecParallelHashIncreaseNumBuckets(hashtable);
    3028              : 
    3029              :         /* The caller must retry. */
    3030           60 :         return NULL;
    3031              :     }
    3032              : 
    3033              :     /* Oversized tuples get their own chunk. */
    3034         1778 :     if (size > HASH_CHUNK_THRESHOLD)
    3035           24 :         chunk_size = size + HASH_CHUNK_HEADER_SIZE;
    3036              :     else
    3037         1754 :         chunk_size = HASH_CHUNK_SIZE;
    3038              : 
    3039              :     /* Check if it's time to grow batches or buckets. */
    3040         1778 :     if (pstate->growth != PHJ_GROWTH_DISABLED)
    3041              :     {
    3042              :         Assert(curbatch == 0);
    3043              :         Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASH_INNER);
    3044              : 
    3045              :         /*
    3046              :          * Check if our space limit would be exceeded.  To avoid choking on
    3047              :          * very large tuples or very low hash_mem setting, we'll always allow
    3048              :          * each backend to allocate at least one chunk.
    3049              :          */
    3050          905 :         if (hashtable->batches[0].at_least_one_chunk &&
    3051          692 :             hashtable->batches[0].shared->size +
    3052          692 :             chunk_size > pstate->space_allowed)
    3053              :         {
    3054           18 :             pstate->growth = PHJ_GROWTH_NEED_MORE_BATCHES;
    3055           18 :             hashtable->batches[0].shared->space_exhausted = true;
    3056           18 :             LWLockRelease(&pstate->lock);
    3057              : 
    3058           18 :             return NULL;
    3059              :         }
    3060              : 
    3061              :         /* Check if our load factor limit would be exceeded. */
    3062          887 :         if (hashtable->nbatch == 1)
    3063              :         {
    3064          774 :             hashtable->batches[0].shared->ntuples += hashtable->batches[0].ntuples;
    3065          774 :             hashtable->batches[0].ntuples = 0;
    3066              :             /* Guard against integer overflow and alloc size overflow */
    3067          774 :             if (hashtable->batches[0].shared->ntuples + 1 >
    3068          774 :                 hashtable->nbuckets * NTUP_PER_BUCKET &&
    3069           35 :                 hashtable->nbuckets < (INT_MAX / 2) &&
    3070           35 :                 hashtable->nbuckets * 2 <=
    3071              :                 MaxAllocSize / sizeof(dsa_pointer_atomic))
    3072              :             {
    3073           35 :                 pstate->growth = PHJ_GROWTH_NEED_MORE_BUCKETS;
    3074           35 :                 LWLockRelease(&pstate->lock);
    3075              : 
    3076           35 :                 return NULL;
    3077              :             }
    3078              :         }
    3079              :     }
    3080              : 
    3081              :     /* We are cleared to allocate a new chunk. */
    3082         1725 :     chunk_shared = dsa_allocate(hashtable->area, chunk_size);
    3083         1725 :     hashtable->batches[curbatch].shared->size += chunk_size;
    3084         1725 :     hashtable->batches[curbatch].at_least_one_chunk = true;
    3085              : 
    3086              :     /* Set up the chunk. */
    3087         1725 :     chunk = (HashMemoryChunk) dsa_get_address(hashtable->area, chunk_shared);
    3088         1725 :     *shared = chunk_shared + HASH_CHUNK_HEADER_SIZE;
    3089         1725 :     chunk->maxlen = chunk_size - HASH_CHUNK_HEADER_SIZE;
    3090         1725 :     chunk->used = size;
    3091              : 
    3092              :     /*
    3093              :      * Push it onto the list of chunks, so that it can be found if we need to
    3094              :      * increase the number of buckets or batches (batch 0 only) and later for
    3095              :      * freeing the memory (all batches).
    3096              :      */
    3097         1725 :     chunk->next.shared = hashtable->batches[curbatch].shared->chunks;
    3098         1725 :     hashtable->batches[curbatch].shared->chunks = chunk_shared;
    3099              : 
    3100         1725 :     if (size <= HASH_CHUNK_THRESHOLD)
    3101              :     {
    3102              :         /*
    3103              :          * Make this the current chunk so that we can use the fast path to
    3104              :          * fill the rest of it up in future calls.
    3105              :          */
    3106         1707 :         hashtable->current_chunk = chunk;
    3107         1707 :         hashtable->current_chunk_shared = chunk_shared;
    3108              :     }
    3109         1725 :     LWLockRelease(&pstate->lock);
    3110              : 
    3111              :     Assert(HASH_CHUNK_DATA(chunk) == dsa_get_address(hashtable->area, *shared));
    3112         1725 :     result = (HashJoinTuple) HASH_CHUNK_DATA(chunk);
    3113              : 
    3114         1725 :     return result;
    3115              : }
    3116              : 
    3117              : /*
    3118              :  * One backend needs to set up the shared batch state including tuplestores.
    3119              :  * Other backends will ensure they have correctly configured accessors by
    3120              :  * called ExecParallelHashEnsureBatchAccessors().
    3121              :  */
    3122              : static void
    3123          111 : ExecParallelHashJoinSetUpBatches(HashJoinTable hashtable, int nbatch)
    3124              : {
    3125          111 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    3126              :     ParallelHashJoinBatch *batches;
    3127              :     MemoryContext oldcxt;
    3128              :     int         i;
    3129              : 
    3130              :     Assert(hashtable->batches == NULL);
    3131              : 
    3132              :     /* Allocate space. */
    3133          111 :     pstate->batches =
    3134          111 :         dsa_allocate0(hashtable->area,
    3135              :                       EstimateParallelHashJoinBatch(hashtable) * nbatch);
    3136          111 :     pstate->nbatch = nbatch;
    3137          111 :     batches = dsa_get_address(hashtable->area, pstate->batches);
    3138              : 
    3139              :     /*
    3140              :      * Use hash join spill memory context to allocate accessors, including
    3141              :      * buffers for the temporary files.
    3142              :      */
    3143          111 :     oldcxt = MemoryContextSwitchTo(hashtable->spillCxt);
    3144              : 
    3145              :     /* Allocate this backend's accessor array. */
    3146          111 :     hashtable->nbatch = nbatch;
    3147          111 :     hashtable->batches =
    3148          111 :         palloc0_array(ParallelHashJoinBatchAccessor, hashtable->nbatch);
    3149              : 
    3150              :     /* Set up the shared state, tuplestores and backend-local accessors. */
    3151          531 :     for (i = 0; i < hashtable->nbatch; ++i)
    3152              :     {
    3153          420 :         ParallelHashJoinBatchAccessor *accessor = &hashtable->batches[i];
    3154          420 :         ParallelHashJoinBatch *shared = NthParallelHashJoinBatch(batches, i);
    3155              :         char        name[MAXPGPATH];
    3156              : 
    3157              :         /*
    3158              :          * All members of shared were zero-initialized.  We just need to set
    3159              :          * up the Barrier.
    3160              :          */
    3161          420 :         BarrierInit(&shared->batch_barrier, 0);
    3162          420 :         if (i == 0)
    3163              :         {
    3164              :             /* Batch 0 doesn't need to be loaded. */
    3165          111 :             BarrierAttach(&shared->batch_barrier);
    3166          444 :             while (BarrierPhase(&shared->batch_barrier) < PHJ_BATCH_PROBE)
    3167          333 :                 BarrierArriveAndWait(&shared->batch_barrier, 0);
    3168          111 :             BarrierDetach(&shared->batch_barrier);
    3169              :         }
    3170              : 
    3171              :         /* Initialize accessor state.  All members were zero-initialized. */
    3172          420 :         accessor->shared = shared;
    3173              : 
    3174              :         /* Initialize the shared tuplestores. */
    3175          420 :         snprintf(name, sizeof(name), "i%dof%d", i, hashtable->nbatch);
    3176          420 :         accessor->inner_tuples =
    3177          420 :             sts_initialize(ParallelHashJoinBatchInner(shared),
    3178              :                            pstate->nparticipants,
    3179              :                            ParallelWorkerNumber + 1,
    3180              :                            sizeof(uint32),
    3181              :                            SHARED_TUPLESTORE_SINGLE_PASS,
    3182              :                            &pstate->fileset,
    3183              :                            name);
    3184          420 :         snprintf(name, sizeof(name), "o%dof%d", i, hashtable->nbatch);
    3185          420 :         accessor->outer_tuples =
    3186          420 :             sts_initialize(ParallelHashJoinBatchOuter(shared,
    3187              :                                                       pstate->nparticipants),
    3188              :                            pstate->nparticipants,
    3189              :                            ParallelWorkerNumber + 1,
    3190              :                            sizeof(uint32),
    3191              :                            SHARED_TUPLESTORE_SINGLE_PASS,
    3192              :                            &pstate->fileset,
    3193              :                            name);
    3194              :     }
    3195              : 
    3196          111 :     MemoryContextSwitchTo(oldcxt);
    3197          111 : }
    3198              : 
    3199              : /*
    3200              :  * Free the current set of ParallelHashJoinBatchAccessor objects.
    3201              :  */
    3202              : static void
    3203           32 : ExecParallelHashCloseBatchAccessors(HashJoinTable hashtable)
    3204              : {
    3205              :     int         i;
    3206              : 
    3207           97 :     for (i = 0; i < hashtable->nbatch; ++i)
    3208              :     {
    3209              :         /* Make sure no files are left open. */
    3210           65 :         sts_end_write(hashtable->batches[i].inner_tuples);
    3211           65 :         sts_end_write(hashtable->batches[i].outer_tuples);
    3212           65 :         sts_end_parallel_scan(hashtable->batches[i].inner_tuples);
    3213           65 :         sts_end_parallel_scan(hashtable->batches[i].outer_tuples);
    3214              :     }
    3215           32 :     pfree(hashtable->batches);
    3216           32 :     hashtable->batches = NULL;
    3217           32 : }
    3218              : 
    3219              : /*
    3220              :  * Make sure this backend has up-to-date accessors for the current set of
    3221              :  * batches.
    3222              :  */
    3223              : static void
    3224          469 : ExecParallelHashEnsureBatchAccessors(HashJoinTable hashtable)
    3225              : {
    3226          469 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    3227              :     ParallelHashJoinBatch *batches;
    3228              :     MemoryContext oldcxt;
    3229              :     int         i;
    3230              : 
    3231          469 :     if (hashtable->batches != NULL)
    3232              :     {
    3233          341 :         if (hashtable->nbatch == pstate->nbatch)
    3234          341 :             return;
    3235            0 :         ExecParallelHashCloseBatchAccessors(hashtable);
    3236              :     }
    3237              : 
    3238              :     /*
    3239              :      * We should never see a state where the batch-tracking array is freed,
    3240              :      * because we should have given up sooner if we join when the build
    3241              :      * barrier has reached the PHJ_BUILD_FREE phase.
    3242              :      */
    3243              :     Assert(DsaPointerIsValid(pstate->batches));
    3244              : 
    3245              :     /*
    3246              :      * Use hash join spill memory context to allocate accessors, including
    3247              :      * buffers for the temporary files.
    3248              :      */
    3249          128 :     oldcxt = MemoryContextSwitchTo(hashtable->spillCxt);
    3250              : 
    3251              :     /* Allocate this backend's accessor array. */
    3252          128 :     hashtable->nbatch = pstate->nbatch;
    3253          128 :     hashtable->batches =
    3254          128 :         palloc0_array(ParallelHashJoinBatchAccessor, hashtable->nbatch);
    3255              : 
    3256              :     /* Find the base of the pseudo-array of ParallelHashJoinBatch objects. */
    3257              :     batches = (ParallelHashJoinBatch *)
    3258          128 :         dsa_get_address(hashtable->area, pstate->batches);
    3259              : 
    3260              :     /* Set up the accessor array and attach to the tuplestores. */
    3261          667 :     for (i = 0; i < hashtable->nbatch; ++i)
    3262              :     {
    3263          539 :         ParallelHashJoinBatchAccessor *accessor = &hashtable->batches[i];
    3264          539 :         ParallelHashJoinBatch *shared = NthParallelHashJoinBatch(batches, i);
    3265              : 
    3266          539 :         accessor->shared = shared;
    3267          539 :         accessor->preallocated = 0;
    3268          539 :         accessor->done = false;
    3269          539 :         accessor->outer_eof = false;
    3270          539 :         accessor->inner_tuples =
    3271          539 :             sts_attach(ParallelHashJoinBatchInner(shared),
    3272              :                        ParallelWorkerNumber + 1,
    3273              :                        &pstate->fileset);
    3274          539 :         accessor->outer_tuples =
    3275          539 :             sts_attach(ParallelHashJoinBatchOuter(shared,
    3276              :                                                   pstate->nparticipants),
    3277              :                        ParallelWorkerNumber + 1,
    3278              :                        &pstate->fileset);
    3279              :     }
    3280              : 
    3281          128 :     MemoryContextSwitchTo(oldcxt);
    3282              : }
    3283              : 
    3284              : /*
    3285              :  * Allocate an empty shared memory hash table for a given batch.
    3286              :  */
    3287              : void
    3288          378 : ExecParallelHashTableAlloc(HashJoinTable hashtable, int batchno)
    3289              : {
    3290          378 :     ParallelHashJoinBatch *batch = hashtable->batches[batchno].shared;
    3291              :     dsa_pointer_atomic *buckets;
    3292          378 :     int         nbuckets = hashtable->parallel_state->nbuckets;
    3293              :     int         i;
    3294              : 
    3295          378 :     batch->buckets =
    3296          378 :         dsa_allocate(hashtable->area, sizeof(dsa_pointer_atomic) * nbuckets);
    3297              :     buckets = (dsa_pointer_atomic *)
    3298          378 :         dsa_get_address(hashtable->area, batch->buckets);
    3299      1997178 :     for (i = 0; i < nbuckets; ++i)
    3300      1996800 :         dsa_pointer_atomic_init(&buckets[i], InvalidDsaPointer);
    3301          378 : }
    3302              : 
    3303              : /*
    3304              :  * If we are currently attached to a shared hash join batch, detach.  If we
    3305              :  * are last to detach, clean up.
    3306              :  */
    3307              : void
    3308        13323 : ExecHashTableDetachBatch(HashJoinTable hashtable)
    3309              : {
    3310        13323 :     if (hashtable->parallel_state != NULL &&
    3311          665 :         hashtable->curbatch >= 0)
    3312              :     {
    3313          458 :         int         curbatch = hashtable->curbatch;
    3314          458 :         ParallelHashJoinBatch *batch = hashtable->batches[curbatch].shared;
    3315          458 :         bool        attached = true;
    3316              : 
    3317              :         /* Make sure any temporary files are closed. */
    3318          458 :         sts_end_parallel_scan(hashtable->batches[curbatch].inner_tuples);
    3319          458 :         sts_end_parallel_scan(hashtable->batches[curbatch].outer_tuples);
    3320              : 
    3321              :         /* After attaching we always get at least to PHJ_BATCH_PROBE. */
    3322              :         Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE ||
    3323              :                BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_SCAN);
    3324              : 
    3325              :         /*
    3326              :          * If we're abandoning the PHJ_BATCH_PROBE phase early without having
    3327              :          * reached the end of it, it means the plan doesn't want any more
    3328              :          * tuples, and it is happy to abandon any tuples buffered in this
    3329              :          * process's subplans.  For correctness, we can't allow any process to
    3330              :          * execute the PHJ_BATCH_SCAN phase, because we will never have the
    3331              :          * complete set of match bits.  Therefore we skip emitting unmatched
    3332              :          * tuples in all backends (if this is a full/right join), as if those
    3333              :          * tuples were all due to be emitted by this process and it has
    3334              :          * abandoned them too.
    3335              :          */
    3336          458 :         if (BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE &&
    3337          425 :             !hashtable->batches[curbatch].outer_eof)
    3338              :         {
    3339              :             /*
    3340              :              * This flag may be written to by multiple backends during
    3341              :              * PHJ_BATCH_PROBE phase, but will only be read in PHJ_BATCH_SCAN
    3342              :              * phase so requires no extra locking.
    3343              :              */
    3344            0 :             batch->skip_unmatched = true;
    3345              :         }
    3346              : 
    3347              :         /*
    3348              :          * Even if we aren't doing a full/right outer join, we'll step through
    3349              :          * the PHJ_BATCH_SCAN phase just to maintain the invariant that
    3350              :          * freeing happens in PHJ_BATCH_FREE, but that'll be wait-free.
    3351              :          */
    3352          458 :         if (BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE)
    3353          425 :             attached = BarrierArriveAndDetachExceptLast(&batch->batch_barrier);
    3354          458 :         if (attached && BarrierArriveAndDetach(&batch->batch_barrier))
    3355              :         {
    3356              :             /*
    3357              :              * We are not longer attached to the batch barrier, but we're the
    3358              :              * process that was chosen to free resources and it's safe to
    3359              :              * assert the current phase.  The ParallelHashJoinBatch can't go
    3360              :              * away underneath us while we are attached to the build barrier,
    3361              :              * making this access safe.
    3362              :              */
    3363              :             Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_FREE);
    3364              : 
    3365              :             /* Free shared chunks and buckets. */
    3366         1959 :             while (DsaPointerIsValid(batch->chunks))
    3367              :             {
    3368              :                 HashMemoryChunk chunk =
    3369         1581 :                     dsa_get_address(hashtable->area, batch->chunks);
    3370         1581 :                 dsa_pointer next = chunk->next.shared;
    3371              : 
    3372         1581 :                 dsa_free(hashtable->area, batch->chunks);
    3373         1581 :                 batch->chunks = next;
    3374              :             }
    3375          378 :             if (DsaPointerIsValid(batch->buckets))
    3376              :             {
    3377          378 :                 dsa_free(hashtable->area, batch->buckets);
    3378          378 :                 batch->buckets = InvalidDsaPointer;
    3379              :             }
    3380              :         }
    3381              : 
    3382              :         /*
    3383              :          * Track the largest batch we've been attached to.  Though each
    3384              :          * backend might see a different subset of batches, explain.c will
    3385              :          * scan the results from all backends to find the largest value.
    3386              :          */
    3387          458 :         hashtable->spacePeak =
    3388          458 :             Max(hashtable->spacePeak,
    3389              :                 batch->size + sizeof(dsa_pointer_atomic) * hashtable->nbuckets);
    3390              : 
    3391              :         /* Remember that we are not attached to a batch. */
    3392          458 :         hashtable->curbatch = -1;
    3393              :     }
    3394        13323 : }
    3395              : 
    3396              : /*
    3397              :  * Detach from all shared resources.  If we are last to detach, clean up.
    3398              :  */
    3399              : void
    3400        12865 : ExecHashTableDetach(HashJoinTable hashtable)
    3401              : {
    3402        12865 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    3403              : 
    3404              :     /*
    3405              :      * If we're involved in a parallel query, we must either have gotten all
    3406              :      * the way to PHJ_BUILD_RUN, or joined too late and be in PHJ_BUILD_FREE.
    3407              :      */
    3408              :     Assert(!pstate ||
    3409              :            BarrierPhase(&pstate->build_barrier) >= PHJ_BUILD_RUN);
    3410              : 
    3411        12865 :     if (pstate && BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_RUN)
    3412              :     {
    3413              :         int         i;
    3414              : 
    3415              :         /* Make sure any temporary files are closed. */
    3416          207 :         if (hashtable->batches)
    3417              :         {
    3418         1101 :             for (i = 0; i < hashtable->nbatch; ++i)
    3419              :             {
    3420          894 :                 sts_end_write(hashtable->batches[i].inner_tuples);
    3421          894 :                 sts_end_write(hashtable->batches[i].outer_tuples);
    3422          894 :                 sts_end_parallel_scan(hashtable->batches[i].inner_tuples);
    3423          894 :                 sts_end_parallel_scan(hashtable->batches[i].outer_tuples);
    3424              :             }
    3425              :         }
    3426              : 
    3427              :         /* If we're last to detach, clean up shared memory. */
    3428          207 :         if (BarrierArriveAndDetach(&pstate->build_barrier))
    3429              :         {
    3430              :             /*
    3431              :              * Late joining processes will see this state and give up
    3432              :              * immediately.
    3433              :              */
    3434              :             Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_FREE);
    3435              : 
    3436           87 :             if (DsaPointerIsValid(pstate->batches))
    3437              :             {
    3438           87 :                 dsa_free(hashtable->area, pstate->batches);
    3439           87 :                 pstate->batches = InvalidDsaPointer;
    3440              :             }
    3441              :         }
    3442              :     }
    3443        12865 :     hashtable->parallel_state = NULL;
    3444        12865 : }
    3445              : 
    3446              : /*
    3447              :  * Get the first tuple in a given bucket identified by number.
    3448              :  */
    3449              : static inline HashJoinTuple
    3450      1390215 : ExecParallelHashFirstTuple(HashJoinTable hashtable, int bucketno)
    3451              : {
    3452              :     HashJoinTuple tuple;
    3453              :     dsa_pointer p;
    3454              : 
    3455              :     Assert(hashtable->parallel_state);
    3456      1390215 :     p = dsa_pointer_atomic_read(&hashtable->buckets.shared[bucketno]);
    3457      1390215 :     tuple = (HashJoinTuple) dsa_get_address(hashtable->area, p);
    3458              : 
    3459      1390215 :     return tuple;
    3460              : }
    3461              : 
    3462              : /*
    3463              :  * Get the next tuple in the same bucket as 'tuple'.
    3464              :  */
    3465              : static inline HashJoinTuple
    3466      1866353 : ExecParallelHashNextTuple(HashJoinTable hashtable, HashJoinTuple tuple)
    3467              : {
    3468              :     HashJoinTuple next;
    3469              : 
    3470              :     Assert(hashtable->parallel_state);
    3471      1866353 :     next = (HashJoinTuple) dsa_get_address(hashtable->area, tuple->next.shared);
    3472              : 
    3473      1866353 :     return next;
    3474              : }
    3475              : 
    3476              : /*
    3477              :  * Insert a tuple at the front of a chain of tuples in DSA memory atomically.
    3478              :  */
    3479              : static inline void
    3480      1293859 : ExecParallelHashPushTuple(dsa_pointer_atomic *head,
    3481              :                           HashJoinTuple tuple,
    3482              :                           dsa_pointer tuple_shared)
    3483              : {
    3484              :     for (;;)
    3485              :     {
    3486      1293859 :         tuple->next.shared = dsa_pointer_atomic_read(head);
    3487      1293859 :         if (dsa_pointer_atomic_compare_exchange(head,
    3488      1293859 :                                                 &tuple->next.shared,
    3489              :                                                 tuple_shared))
    3490      1293859 :             break;
    3491              :     }
    3492      1293859 : }
    3493              : 
    3494              : /*
    3495              :  * Prepare to work on a given batch.
    3496              :  */
    3497              : void
    3498         1040 : ExecParallelHashTableSetCurrentBatch(HashJoinTable hashtable, int batchno)
    3499              : {
    3500              :     Assert(hashtable->batches[batchno].shared->buckets != InvalidDsaPointer);
    3501              : 
    3502         1040 :     hashtable->curbatch = batchno;
    3503         1040 :     hashtable->buckets.shared = (dsa_pointer_atomic *)
    3504         1040 :         dsa_get_address(hashtable->area,
    3505         1040 :                         hashtable->batches[batchno].shared->buckets);
    3506         1040 :     hashtable->nbuckets = hashtable->parallel_state->nbuckets;
    3507         1040 :     hashtable->log2_nbuckets = pg_ceil_log2_32(hashtable->nbuckets);
    3508         1040 :     hashtable->current_chunk = NULL;
    3509         1040 :     hashtable->current_chunk_shared = InvalidDsaPointer;
    3510         1040 :     hashtable->batches[batchno].at_least_one_chunk = false;
    3511         1040 : }
    3512              : 
    3513              : /*
    3514              :  * Take the next available chunk from the queue of chunks being worked on in
    3515              :  * parallel.  Return NULL if there are none left.  Otherwise return a pointer
    3516              :  * to the chunk, and set *shared to the DSA pointer to the chunk.
    3517              :  */
    3518              : static HashMemoryChunk
    3519          341 : ExecParallelHashPopChunkQueue(HashJoinTable hashtable, dsa_pointer *shared)
    3520              : {
    3521          341 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    3522              :     HashMemoryChunk chunk;
    3523              : 
    3524          341 :     LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
    3525          341 :     if (DsaPointerIsValid(pstate->chunk_work_queue))
    3526              :     {
    3527          271 :         *shared = pstate->chunk_work_queue;
    3528              :         chunk = (HashMemoryChunk)
    3529          271 :             dsa_get_address(hashtable->area, *shared);
    3530          271 :         pstate->chunk_work_queue = chunk->next.shared;
    3531              :     }
    3532              :     else
    3533           70 :         chunk = NULL;
    3534          341 :     LWLockRelease(&pstate->lock);
    3535              : 
    3536          341 :     return chunk;
    3537              : }
    3538              : 
    3539              : /*
    3540              :  * Increase the space preallocated in this backend for a given inner batch by
    3541              :  * at least a given amount.  This allows us to track whether a given batch
    3542              :  * would fit in memory when loaded back in.  Also increase the number of
    3543              :  * batches or buckets if required.
    3544              :  *
    3545              :  * This maintains a running estimation of how much space will be taken when we
    3546              :  * load the batch back into memory by simulating the way chunks will be handed
    3547              :  * out to workers.  It's not perfectly accurate because the tuples will be
    3548              :  * packed into memory chunks differently by ExecParallelHashTupleAlloc(), but
    3549              :  * it should be pretty close.  It tends to overestimate by a fraction of a
    3550              :  * chunk per worker since all workers gang up to preallocate during hashing,
    3551              :  * but workers tend to reload batches alone if there are enough to go around,
    3552              :  * leaving fewer partially filled chunks.  This effect is bounded by
    3553              :  * nparticipants.
    3554              :  *
    3555              :  * Return false if the number of batches or buckets has changed, and the
    3556              :  * caller should reconsider which batch a given tuple now belongs in and call
    3557              :  * again.
    3558              :  */
    3559              : static bool
    3560          868 : ExecParallelHashTuplePrealloc(HashJoinTable hashtable, int batchno, size_t size)
    3561              : {
    3562          868 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    3563          868 :     ParallelHashJoinBatchAccessor *batch = &hashtable->batches[batchno];
    3564          868 :     size_t      want = Max(size, HASH_CHUNK_SIZE - HASH_CHUNK_HEADER_SIZE);
    3565              : 
    3566              :     Assert(batchno > 0);
    3567              :     Assert(batchno < hashtable->nbatch);
    3568              :     Assert(size == MAXALIGN(size));
    3569              : 
    3570          868 :     LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
    3571              : 
    3572              :     /* Has another participant commanded us to help grow? */
    3573          868 :     if (pstate->growth == PHJ_GROWTH_NEED_MORE_BATCHES ||
    3574          860 :         pstate->growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
    3575              :     {
    3576            8 :         ParallelHashGrowth growth = pstate->growth;
    3577              : 
    3578            8 :         LWLockRelease(&pstate->lock);
    3579            8 :         if (growth == PHJ_GROWTH_NEED_MORE_BATCHES)
    3580            8 :             ExecParallelHashIncreaseNumBatches(hashtable);
    3581            0 :         else if (growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
    3582            0 :             ExecParallelHashIncreaseNumBuckets(hashtable);
    3583              : 
    3584            8 :         return false;
    3585              :     }
    3586              : 
    3587          860 :     if (pstate->growth != PHJ_GROWTH_DISABLED &&
    3588          745 :         batch->at_least_one_chunk &&
    3589          229 :         (batch->shared->estimated_size + want + HASH_CHUNK_HEADER_SIZE
    3590          229 :          > pstate->space_allowed))
    3591              :     {
    3592              :         /*
    3593              :          * We have determined that this batch would exceed the space budget if
    3594              :          * loaded into memory.  Command all participants to help repartition.
    3595              :          */
    3596            6 :         batch->shared->space_exhausted = true;
    3597            6 :         pstate->growth = PHJ_GROWTH_NEED_MORE_BATCHES;
    3598            6 :         LWLockRelease(&pstate->lock);
    3599              : 
    3600            6 :         return false;
    3601              :     }
    3602              : 
    3603          854 :     batch->at_least_one_chunk = true;
    3604          854 :     batch->shared->estimated_size += want + HASH_CHUNK_HEADER_SIZE;
    3605          854 :     batch->preallocated = want;
    3606          854 :     LWLockRelease(&pstate->lock);
    3607              : 
    3608          854 :     return true;
    3609              : }
    3610              : 
    3611              : /*
    3612              :  * Calculate the limit on how much memory can be used by Hash and similar
    3613              :  * plan types.  This is work_mem times hash_mem_multiplier, and is
    3614              :  * expressed in bytes.
    3615              :  *
    3616              :  * Exported for use by the planner, as well as other hash-like executor
    3617              :  * nodes.  This is a rather random place for this, but there is no better
    3618              :  * place.
    3619              :  */
    3620              : size_t
    3621       958379 : get_hash_memory_limit(void)
    3622              : {
    3623              :     double      mem_limit;
    3624              : 
    3625              :     /* Do initial calculation in double arithmetic */
    3626       958379 :     mem_limit = (double) work_mem * hash_mem_multiplier * 1024.0;
    3627              : 
    3628              :     /* Clamp in case it doesn't fit in size_t */
    3629       958379 :     mem_limit = Min(mem_limit, (double) SIZE_MAX);
    3630              : 
    3631       958379 :     return (size_t) mem_limit;
    3632              : }
        

Generated by: LCOV version 2.0-1