LCOV - code coverage report
Current view: top level - src/backend/executor - nodeHash.c (source / functions) Hit Total Coverage
Test: PostgreSQL 18devel Lines: 1039 1090 95.3 %
Date: 2024-12-03 09:15:01 Functions: 53 54 98.1 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * nodeHash.c
       4             :  *    Routines to hash relations for hashjoin
       5             :  *
       6             :  * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
       7             :  * Portions Copyright (c) 1994, Regents of the University of California
       8             :  *
       9             :  *
      10             :  * IDENTIFICATION
      11             :  *    src/backend/executor/nodeHash.c
      12             :  *
      13             :  * See note on parallelism in nodeHashjoin.c.
      14             :  *
      15             :  *-------------------------------------------------------------------------
      16             :  */
      17             : /*
      18             :  * INTERFACE ROUTINES
      19             :  *      MultiExecHash   - generate an in-memory hash table of the relation
      20             :  *      ExecInitHash    - initialize node and subnodes
      21             :  *      ExecEndHash     - shutdown node and subnodes
      22             :  */
      23             : 
      24             : #include "postgres.h"
      25             : 
      26             : #include <math.h>
      27             : #include <limits.h>
      28             : 
      29             : #include "access/htup_details.h"
      30             : #include "access/parallel.h"
      31             : #include "catalog/pg_statistic.h"
      32             : #include "commands/tablespace.h"
      33             : #include "executor/executor.h"
      34             : #include "executor/hashjoin.h"
      35             : #include "executor/nodeHash.h"
      36             : #include "executor/nodeHashjoin.h"
      37             : #include "miscadmin.h"
      38             : #include "port/pg_bitutils.h"
      39             : #include "utils/dynahash.h"
      40             : #include "utils/lsyscache.h"
      41             : #include "utils/memutils.h"
      42             : #include "utils/syscache.h"
      43             : #include "utils/wait_event.h"
      44             : 
      45             : static void ExecHashIncreaseNumBatches(HashJoinTable hashtable);
      46             : static void ExecHashIncreaseNumBuckets(HashJoinTable hashtable);
      47             : static void ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable);
      48             : static void ExecParallelHashIncreaseNumBuckets(HashJoinTable hashtable);
      49             : static void ExecHashBuildSkewHash(HashState *hashstate,
      50             :                                   HashJoinTable hashtable, Hash *node,
      51             :                                   int mcvsToUse);
      52             : static void ExecHashSkewTableInsert(HashJoinTable hashtable,
      53             :                                     TupleTableSlot *slot,
      54             :                                     uint32 hashvalue,
      55             :                                     int bucketNumber);
      56             : static void ExecHashRemoveNextSkewBucket(HashJoinTable hashtable);
      57             : 
      58             : static void *dense_alloc(HashJoinTable hashtable, Size size);
      59             : static HashJoinTuple ExecParallelHashTupleAlloc(HashJoinTable hashtable,
      60             :                                                 size_t size,
      61             :                                                 dsa_pointer *shared);
      62             : static void MultiExecPrivateHash(HashState *node);
      63             : static void MultiExecParallelHash(HashState *node);
      64             : static inline HashJoinTuple ExecParallelHashFirstTuple(HashJoinTable hashtable,
      65             :                                                        int bucketno);
      66             : static inline HashJoinTuple ExecParallelHashNextTuple(HashJoinTable hashtable,
      67             :                                                       HashJoinTuple tuple);
      68             : static inline void ExecParallelHashPushTuple(dsa_pointer_atomic *head,
      69             :                                              HashJoinTuple tuple,
      70             :                                              dsa_pointer tuple_shared);
      71             : static void ExecParallelHashJoinSetUpBatches(HashJoinTable hashtable, int nbatch);
      72             : static void ExecParallelHashEnsureBatchAccessors(HashJoinTable hashtable);
      73             : static void ExecParallelHashRepartitionFirst(HashJoinTable hashtable);
      74             : static void ExecParallelHashRepartitionRest(HashJoinTable hashtable);
      75             : static HashMemoryChunk ExecParallelHashPopChunkQueue(HashJoinTable hashtable,
      76             :                                                      dsa_pointer *shared);
      77             : static bool ExecParallelHashTuplePrealloc(HashJoinTable hashtable,
      78             :                                           int batchno,
      79             :                                           size_t size);
      80             : static void ExecParallelHashMergeCounters(HashJoinTable hashtable);
      81             : static void ExecParallelHashCloseBatchAccessors(HashJoinTable hashtable);
      82             : 
      83             : 
      84             : /* ----------------------------------------------------------------
      85             :  *      ExecHash
      86             :  *
      87             :  *      stub for pro forma compliance
      88             :  * ----------------------------------------------------------------
      89             :  */
      90             : static TupleTableSlot *
      91           0 : ExecHash(PlanState *pstate)
      92             : {
      93           0 :     elog(ERROR, "Hash node does not support ExecProcNode call convention");
      94             :     return NULL;
      95             : }
      96             : 
      97             : /* ----------------------------------------------------------------
      98             :  *      MultiExecHash
      99             :  *
     100             :  *      build hash table for hashjoin, doing partitioning if more
     101             :  *      than one batch is required.
     102             :  * ----------------------------------------------------------------
     103             :  */
     104             : Node *
     105       18604 : MultiExecHash(HashState *node)
     106             : {
     107             :     /* must provide our own instrumentation support */
     108       18604 :     if (node->ps.instrument)
     109         296 :         InstrStartNode(node->ps.instrument);
     110             : 
     111       18604 :     if (node->parallel_state != NULL)
     112         396 :         MultiExecParallelHash(node);
     113             :     else
     114       18208 :         MultiExecPrivateHash(node);
     115             : 
     116             :     /* must provide our own instrumentation support */
     117       18604 :     if (node->ps.instrument)
     118         296 :         InstrStopNode(node->ps.instrument, node->hashtable->partialTuples);
     119             : 
     120             :     /*
     121             :      * We do not return the hash table directly because it's not a subtype of
     122             :      * Node, and so would violate the MultiExecProcNode API.  Instead, our
     123             :      * parent Hashjoin node is expected to know how to fish it out of our node
     124             :      * state.  Ugly but not really worth cleaning up, since Hashjoin knows
     125             :      * quite a bit more about Hash besides that.
     126             :      */
     127       18604 :     return NULL;
     128             : }
     129             : 
     130             : /* ----------------------------------------------------------------
     131             :  *      MultiExecPrivateHash
     132             :  *
     133             :  *      parallel-oblivious version, building a backend-private
     134             :  *      hash table and (if necessary) batch files.
     135             :  * ----------------------------------------------------------------
     136             :  */
     137             : static void
     138       18208 : MultiExecPrivateHash(HashState *node)
     139             : {
     140             :     PlanState  *outerNode;
     141             :     HashJoinTable hashtable;
     142             :     TupleTableSlot *slot;
     143             :     ExprContext *econtext;
     144             : 
     145             :     /*
     146             :      * get state info from node
     147             :      */
     148       18208 :     outerNode = outerPlanState(node);
     149       18208 :     hashtable = node->hashtable;
     150             : 
     151             :     /*
     152             :      * set expression context
     153             :      */
     154       18208 :     econtext = node->ps.ps_ExprContext;
     155             : 
     156             :     /*
     157             :      * Get all tuples from the node below the Hash node and insert into the
     158             :      * hash table (or temp files).
     159             :      */
     160             :     for (;;)
     161     7548408 :     {
     162             :         bool        isnull;
     163             :         Datum       hashdatum;
     164             : 
     165     7566616 :         slot = ExecProcNode(outerNode);
     166     7566616 :         if (TupIsNull(slot))
     167             :             break;
     168             :         /* We have to compute the hash value */
     169     7548408 :         econtext->ecxt_outertuple = slot;
     170             : 
     171     7548408 :         ResetExprContext(econtext);
     172             : 
     173     7548408 :         hashdatum = ExecEvalExprSwitchContext(node->hash_expr, econtext,
     174             :                                               &isnull);
     175             : 
     176     7548408 :         if (!isnull)
     177             :         {
     178     7548396 :             uint32      hashvalue = DatumGetUInt32(hashdatum);
     179             :             int         bucketNumber;
     180             : 
     181     7548396 :             bucketNumber = ExecHashGetSkewBucket(hashtable, hashvalue);
     182     7548396 :             if (bucketNumber != INVALID_SKEW_BUCKET_NO)
     183             :             {
     184             :                 /* It's a skew tuple, so put it into that hash table */
     185         588 :                 ExecHashSkewTableInsert(hashtable, slot, hashvalue,
     186             :                                         bucketNumber);
     187         588 :                 hashtable->skewTuples += 1;
     188             :             }
     189             :             else
     190             :             {
     191             :                 /* Not subject to skew optimization, so insert normally */
     192     7547808 :                 ExecHashTableInsert(hashtable, slot, hashvalue);
     193             :             }
     194     7548396 :             hashtable->totalTuples += 1;
     195             :         }
     196             :     }
     197             : 
     198             :     /* resize the hash table if needed (NTUP_PER_BUCKET exceeded) */
     199       18208 :     if (hashtable->nbuckets != hashtable->nbuckets_optimal)
     200          72 :         ExecHashIncreaseNumBuckets(hashtable);
     201             : 
     202             :     /* Account for the buckets in spaceUsed (reported in EXPLAIN ANALYZE) */
     203       18208 :     hashtable->spaceUsed += hashtable->nbuckets * sizeof(HashJoinTuple);
     204       18208 :     if (hashtable->spaceUsed > hashtable->spacePeak)
     205       18182 :         hashtable->spacePeak = hashtable->spaceUsed;
     206             : 
     207       18208 :     hashtable->partialTuples = hashtable->totalTuples;
     208       18208 : }
     209             : 
     210             : /* ----------------------------------------------------------------
     211             :  *      MultiExecParallelHash
     212             :  *
     213             :  *      parallel-aware version, building a shared hash table and
     214             :  *      (if necessary) batch files using the combined effort of
     215             :  *      a set of co-operating backends.
     216             :  * ----------------------------------------------------------------
     217             :  */
     218             : static void
     219         396 : MultiExecParallelHash(HashState *node)
     220             : {
     221             :     ParallelHashJoinState *pstate;
     222             :     PlanState  *outerNode;
     223             :     HashJoinTable hashtable;
     224             :     TupleTableSlot *slot;
     225             :     ExprContext *econtext;
     226             :     uint32      hashvalue;
     227             :     Barrier    *build_barrier;
     228             :     int         i;
     229             : 
     230             :     /*
     231             :      * get state info from node
     232             :      */
     233         396 :     outerNode = outerPlanState(node);
     234         396 :     hashtable = node->hashtable;
     235             : 
     236             :     /*
     237             :      * set expression context
     238             :      */
     239         396 :     econtext = node->ps.ps_ExprContext;
     240             : 
     241             :     /*
     242             :      * Synchronize the parallel hash table build.  At this stage we know that
     243             :      * the shared hash table has been or is being set up by
     244             :      * ExecHashTableCreate(), but we don't know if our peers have returned
     245             :      * from there or are here in MultiExecParallelHash(), and if so how far
     246             :      * through they are.  To find out, we check the build_barrier phase then
     247             :      * and jump to the right step in the build algorithm.
     248             :      */
     249         396 :     pstate = hashtable->parallel_state;
     250         396 :     build_barrier = &pstate->build_barrier;
     251             :     Assert(BarrierPhase(build_barrier) >= PHJ_BUILD_ALLOCATE);
     252         396 :     switch (BarrierPhase(build_barrier))
     253             :     {
     254         168 :         case PHJ_BUILD_ALLOCATE:
     255             : 
     256             :             /*
     257             :              * Either I just allocated the initial hash table in
     258             :              * ExecHashTableCreate(), or someone else is doing that.  Either
     259             :              * way, wait for everyone to arrive here so we can proceed.
     260             :              */
     261         168 :             BarrierArriveAndWait(build_barrier, WAIT_EVENT_HASH_BUILD_ALLOCATE);
     262             :             /* Fall through. */
     263             : 
     264         246 :         case PHJ_BUILD_HASH_INNER:
     265             : 
     266             :             /*
     267             :              * It's time to begin hashing, or if we just arrived here then
     268             :              * hashing is already underway, so join in that effort.  While
     269             :              * hashing we have to be prepared to help increase the number of
     270             :              * batches or buckets at any time, and if we arrived here when
     271             :              * that was already underway we'll have to help complete that work
     272             :              * immediately so that it's safe to access batches and buckets
     273             :              * below.
     274             :              */
     275         246 :             if (PHJ_GROW_BATCHES_PHASE(BarrierAttach(&pstate->grow_batches_barrier)) !=
     276             :                 PHJ_GROW_BATCHES_ELECT)
     277           0 :                 ExecParallelHashIncreaseNumBatches(hashtable);
     278         246 :             if (PHJ_GROW_BUCKETS_PHASE(BarrierAttach(&pstate->grow_buckets_barrier)) !=
     279             :                 PHJ_GROW_BUCKETS_ELECT)
     280           2 :                 ExecParallelHashIncreaseNumBuckets(hashtable);
     281         246 :             ExecParallelHashEnsureBatchAccessors(hashtable);
     282         246 :             ExecParallelHashTableSetCurrentBatch(hashtable, 0);
     283             :             for (;;)
     284     2160132 :             {
     285             :                 bool        isnull;
     286             : 
     287     2160378 :                 slot = ExecProcNode(outerNode);
     288     2160378 :                 if (TupIsNull(slot))
     289             :                     break;
     290     2160132 :                 econtext->ecxt_outertuple = slot;
     291             : 
     292     2160132 :                 ResetExprContext(econtext);
     293             : 
     294     2160132 :                 hashvalue = DatumGetUInt32(ExecEvalExprSwitchContext(node->hash_expr,
     295             :                                                                      econtext,
     296             :                                                                      &isnull));
     297             : 
     298     2160132 :                 if (!isnull)
     299     2160132 :                     ExecParallelHashTableInsert(hashtable, slot, hashvalue);
     300     2160132 :                 hashtable->partialTuples++;
     301             :             }
     302             : 
     303             :             /*
     304             :              * Make sure that any tuples we wrote to disk are visible to
     305             :              * others before anyone tries to load them.
     306             :              */
     307        1400 :             for (i = 0; i < hashtable->nbatch; ++i)
     308        1154 :                 sts_end_write(hashtable->batches[i].inner_tuples);
     309             : 
     310             :             /*
     311             :              * Update shared counters.  We need an accurate total tuple count
     312             :              * to control the empty table optimization.
     313             :              */
     314         246 :             ExecParallelHashMergeCounters(hashtable);
     315             : 
     316         246 :             BarrierDetach(&pstate->grow_buckets_barrier);
     317         246 :             BarrierDetach(&pstate->grow_batches_barrier);
     318             : 
     319             :             /*
     320             :              * Wait for everyone to finish building and flushing files and
     321             :              * counters.
     322             :              */
     323         246 :             if (BarrierArriveAndWait(build_barrier,
     324             :                                      WAIT_EVENT_HASH_BUILD_HASH_INNER))
     325             :             {
     326             :                 /*
     327             :                  * Elect one backend to disable any further growth.  Batches
     328             :                  * are now fixed.  While building them we made sure they'd fit
     329             :                  * in our memory budget when we load them back in later (or we
     330             :                  * tried to do that and gave up because we detected extreme
     331             :                  * skew).
     332             :                  */
     333         168 :                 pstate->growth = PHJ_GROWTH_DISABLED;
     334             :             }
     335             :     }
     336             : 
     337             :     /*
     338             :      * We're not yet attached to a batch.  We all agree on the dimensions and
     339             :      * number of inner tuples (for the empty table optimization).
     340             :      */
     341         396 :     hashtable->curbatch = -1;
     342         396 :     hashtable->nbuckets = pstate->nbuckets;
     343         396 :     hashtable->log2_nbuckets = my_log2(hashtable->nbuckets);
     344         396 :     hashtable->totalTuples = pstate->total_tuples;
     345             : 
     346             :     /*
     347             :      * Unless we're completely done and the batch state has been freed, make
     348             :      * sure we have accessors.
     349             :      */
     350         396 :     if (BarrierPhase(build_barrier) < PHJ_BUILD_FREE)
     351         396 :         ExecParallelHashEnsureBatchAccessors(hashtable);
     352             : 
     353             :     /*
     354             :      * The next synchronization point is in ExecHashJoin's HJ_BUILD_HASHTABLE
     355             :      * case, which will bring the build phase to PHJ_BUILD_RUN (if it isn't
     356             :      * there already).
     357             :      */
     358             :     Assert(BarrierPhase(build_barrier) == PHJ_BUILD_HASH_OUTER ||
     359             :            BarrierPhase(build_barrier) == PHJ_BUILD_RUN ||
     360             :            BarrierPhase(build_barrier) == PHJ_BUILD_FREE);
     361         396 : }
     362             : 
     363             : /* ----------------------------------------------------------------
     364             :  *      ExecInitHash
     365             :  *
     366             :  *      Init routine for Hash node
     367             :  * ----------------------------------------------------------------
     368             :  */
     369             : HashState *
     370       29082 : ExecInitHash(Hash *node, EState *estate, int eflags)
     371             : {
     372             :     HashState  *hashstate;
     373             : 
     374             :     /* check for unsupported flags */
     375             :     Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
     376             : 
     377             :     /*
     378             :      * create state structure
     379             :      */
     380       29082 :     hashstate = makeNode(HashState);
     381       29082 :     hashstate->ps.plan = (Plan *) node;
     382       29082 :     hashstate->ps.state = estate;
     383       29082 :     hashstate->ps.ExecProcNode = ExecHash;
     384             :     /* delay building hashtable until ExecHashTableCreate() in executor run */
     385       29082 :     hashstate->hashtable = NULL;
     386             : 
     387             :     /*
     388             :      * Miscellaneous initialization
     389             :      *
     390             :      * create expression context for node
     391             :      */
     392       29082 :     ExecAssignExprContext(estate, &hashstate->ps);
     393             : 
     394             :     /*
     395             :      * initialize child nodes
     396             :      */
     397       29082 :     outerPlanState(hashstate) = ExecInitNode(outerPlan(node), estate, eflags);
     398             : 
     399             :     /*
     400             :      * initialize our result slot and type. No need to build projection
     401             :      * because this node doesn't do projections.
     402             :      */
     403       29082 :     ExecInitResultTupleSlotTL(&hashstate->ps, &TTSOpsMinimalTuple);
     404       29082 :     hashstate->ps.ps_ProjInfo = NULL;
     405             : 
     406             :     Assert(node->plan.qual == NIL);
     407             : 
     408             :     /*
     409             :      * Delay initialization of hash_expr until ExecInitHashJoin().  We cannot
     410             :      * build the ExprState here as we don't yet know the join type we're going
     411             :      * to be hashing values for and we need to know that before calling
     412             :      * ExecBuildHash32Expr as the keep_nulls parameter depends on the join
     413             :      * type.
     414             :      */
     415       29082 :     hashstate->hash_expr = NULL;
     416             : 
     417       29082 :     return hashstate;
     418             : }
     419             : 
     420             : /* ---------------------------------------------------------------
     421             :  *      ExecEndHash
     422             :  *
     423             :  *      clean up routine for Hash node
     424             :  * ----------------------------------------------------------------
     425             :  */
     426             : void
     427       28976 : ExecEndHash(HashState *node)
     428             : {
     429             :     PlanState  *outerPlan;
     430             : 
     431             :     /*
     432             :      * shut down the subplan
     433             :      */
     434       28976 :     outerPlan = outerPlanState(node);
     435       28976 :     ExecEndNode(outerPlan);
     436       28976 : }
     437             : 
     438             : 
     439             : /* ----------------------------------------------------------------
     440             :  *      ExecHashTableCreate
     441             :  *
     442             :  *      create an empty hashtable data structure for hashjoin.
     443             :  * ----------------------------------------------------------------
     444             :  */
     445             : HashJoinTable
     446       18604 : ExecHashTableCreate(HashState *state)
     447             : {
     448             :     Hash       *node;
     449             :     HashJoinTable hashtable;
     450             :     Plan       *outerNode;
     451             :     size_t      space_allowed;
     452             :     int         nbuckets;
     453             :     int         nbatch;
     454             :     double      rows;
     455             :     int         num_skew_mcvs;
     456             :     int         log2_nbuckets;
     457             :     MemoryContext oldcxt;
     458             : 
     459             :     /*
     460             :      * Get information about the size of the relation to be hashed (it's the
     461             :      * "outer" subtree of this node, but the inner relation of the hashjoin).
     462             :      * Compute the appropriate size of the hash table.
     463             :      */
     464       18604 :     node = (Hash *) state->ps.plan;
     465       18604 :     outerNode = outerPlan(node);
     466             : 
     467             :     /*
     468             :      * If this is shared hash table with a partial plan, then we can't use
     469             :      * outerNode->plan_rows to estimate its size.  We need an estimate of the
     470             :      * total number of rows across all copies of the partial plan.
     471             :      */
     472       18604 :     rows = node->plan.parallel_aware ? node->rows_total : outerNode->plan_rows;
     473             : 
     474       18208 :     ExecChooseHashTableSize(rows, outerNode->plan_width,
     475       18604 :                             OidIsValid(node->skewTable),
     476       18604 :                             state->parallel_state != NULL,
     477       18604 :                             state->parallel_state != NULL ?
     478         396 :                             state->parallel_state->nparticipants - 1 : 0,
     479             :                             &space_allowed,
     480             :                             &nbuckets, &nbatch, &num_skew_mcvs);
     481             : 
     482             :     /* nbuckets must be a power of 2 */
     483       18604 :     log2_nbuckets = my_log2(nbuckets);
     484             :     Assert(nbuckets == (1 << log2_nbuckets));
     485             : 
     486             :     /*
     487             :      * Initialize the hash table control block.
     488             :      *
     489             :      * The hashtable control block is just palloc'd from the executor's
     490             :      * per-query memory context.  Everything else should be kept inside the
     491             :      * subsidiary hashCxt, batchCxt or spillCxt.
     492             :      */
     493       18604 :     hashtable = palloc_object(HashJoinTableData);
     494       18604 :     hashtable->nbuckets = nbuckets;
     495       18604 :     hashtable->nbuckets_original = nbuckets;
     496       18604 :     hashtable->nbuckets_optimal = nbuckets;
     497       18604 :     hashtable->log2_nbuckets = log2_nbuckets;
     498       18604 :     hashtable->log2_nbuckets_optimal = log2_nbuckets;
     499       18604 :     hashtable->buckets.unshared = NULL;
     500       18604 :     hashtable->skewEnabled = false;
     501       18604 :     hashtable->skewBucket = NULL;
     502       18604 :     hashtable->skewBucketLen = 0;
     503       18604 :     hashtable->nSkewBuckets = 0;
     504       18604 :     hashtable->skewBucketNums = NULL;
     505       18604 :     hashtable->nbatch = nbatch;
     506       18604 :     hashtable->curbatch = 0;
     507       18604 :     hashtable->nbatch_original = nbatch;
     508       18604 :     hashtable->nbatch_outstart = nbatch;
     509       18604 :     hashtable->growEnabled = true;
     510       18604 :     hashtable->totalTuples = 0;
     511       18604 :     hashtable->partialTuples = 0;
     512       18604 :     hashtable->skewTuples = 0;
     513       18604 :     hashtable->innerBatchFile = NULL;
     514       18604 :     hashtable->outerBatchFile = NULL;
     515       18604 :     hashtable->spaceUsed = 0;
     516       18604 :     hashtable->spacePeak = 0;
     517       18604 :     hashtable->spaceAllowed = space_allowed;
     518       18604 :     hashtable->spaceUsedSkew = 0;
     519       18604 :     hashtable->spaceAllowedSkew =
     520       18604 :         hashtable->spaceAllowed * SKEW_HASH_MEM_PERCENT / 100;
     521       18604 :     hashtable->chunks = NULL;
     522       18604 :     hashtable->current_chunk = NULL;
     523       18604 :     hashtable->parallel_state = state->parallel_state;
     524       18604 :     hashtable->area = state->ps.state->es_query_dsa;
     525       18604 :     hashtable->batches = NULL;
     526             : 
     527             : #ifdef HJDEBUG
     528             :     printf("Hashjoin %p: initial nbatch = %d, nbuckets = %d\n",
     529             :            hashtable, nbatch, nbuckets);
     530             : #endif
     531             : 
     532             :     /*
     533             :      * Create temporary memory contexts in which to keep the hashtable working
     534             :      * storage.  See notes in executor/hashjoin.h.
     535             :      */
     536       18604 :     hashtable->hashCxt = AllocSetContextCreate(CurrentMemoryContext,
     537             :                                                "HashTableContext",
     538             :                                                ALLOCSET_DEFAULT_SIZES);
     539             : 
     540       18604 :     hashtable->batchCxt = AllocSetContextCreate(hashtable->hashCxt,
     541             :                                                 "HashBatchContext",
     542             :                                                 ALLOCSET_DEFAULT_SIZES);
     543             : 
     544       18604 :     hashtable->spillCxt = AllocSetContextCreate(hashtable->hashCxt,
     545             :                                                 "HashSpillContext",
     546             :                                                 ALLOCSET_DEFAULT_SIZES);
     547             : 
     548             :     /* Allocate data that will live for the life of the hashjoin */
     549             : 
     550       18604 :     oldcxt = MemoryContextSwitchTo(hashtable->hashCxt);
     551             : 
     552       18604 :     if (nbatch > 1 && hashtable->parallel_state == NULL)
     553             :     {
     554             :         MemoryContext oldctx;
     555             : 
     556             :         /*
     557             :          * allocate and initialize the file arrays in hashCxt (not needed for
     558             :          * parallel case which uses shared tuplestores instead of raw files)
     559             :          */
     560         106 :         oldctx = MemoryContextSwitchTo(hashtable->spillCxt);
     561             : 
     562         106 :         hashtable->innerBatchFile = palloc0_array(BufFile *, nbatch);
     563         106 :         hashtable->outerBatchFile = palloc0_array(BufFile *, nbatch);
     564             : 
     565         106 :         MemoryContextSwitchTo(oldctx);
     566             : 
     567             :         /* The files will not be opened until needed... */
     568             :         /* ... but make sure we have temp tablespaces established for them */
     569         106 :         PrepareTempTablespaces();
     570             :     }
     571             : 
     572       18604 :     MemoryContextSwitchTo(oldcxt);
     573             : 
     574       18604 :     if (hashtable->parallel_state)
     575             :     {
     576         396 :         ParallelHashJoinState *pstate = hashtable->parallel_state;
     577             :         Barrier    *build_barrier;
     578             : 
     579             :         /*
     580             :          * Attach to the build barrier.  The corresponding detach operation is
     581             :          * in ExecHashTableDetach.  Note that we won't attach to the
     582             :          * batch_barrier for batch 0 yet.  We'll attach later and start it out
     583             :          * in PHJ_BATCH_PROBE phase, because batch 0 is allocated up front and
     584             :          * then loaded while hashing (the standard hybrid hash join
     585             :          * algorithm), and we'll coordinate that using build_barrier.
     586             :          */
     587         396 :         build_barrier = &pstate->build_barrier;
     588         396 :         BarrierAttach(build_barrier);
     589             : 
     590             :         /*
     591             :          * So far we have no idea whether there are any other participants,
     592             :          * and if so, what phase they are working on.  The only thing we care
     593             :          * about at this point is whether someone has already created the
     594             :          * SharedHashJoinBatch objects and the hash table for batch 0.  One
     595             :          * backend will be elected to do that now if necessary.
     596             :          */
     597         564 :         if (BarrierPhase(build_barrier) == PHJ_BUILD_ELECT &&
     598         168 :             BarrierArriveAndWait(build_barrier, WAIT_EVENT_HASH_BUILD_ELECT))
     599             :         {
     600         168 :             pstate->nbatch = nbatch;
     601         168 :             pstate->space_allowed = space_allowed;
     602         168 :             pstate->growth = PHJ_GROWTH_OK;
     603             : 
     604             :             /* Set up the shared state for coordinating batches. */
     605         168 :             ExecParallelHashJoinSetUpBatches(hashtable, nbatch);
     606             : 
     607             :             /*
     608             :              * Allocate batch 0's hash table up front so we can load it
     609             :              * directly while hashing.
     610             :              */
     611         168 :             pstate->nbuckets = nbuckets;
     612         168 :             ExecParallelHashTableAlloc(hashtable, 0);
     613             :         }
     614             : 
     615             :         /*
     616             :          * The next Parallel Hash synchronization point is in
     617             :          * MultiExecParallelHash(), which will progress it all the way to
     618             :          * PHJ_BUILD_RUN.  The caller must not return control from this
     619             :          * executor node between now and then.
     620             :          */
     621             :     }
     622             :     else
     623             :     {
     624             :         /*
     625             :          * Prepare context for the first-scan space allocations; allocate the
     626             :          * hashbucket array therein, and set each bucket "empty".
     627             :          */
     628       18208 :         MemoryContextSwitchTo(hashtable->batchCxt);
     629             : 
     630       18208 :         hashtable->buckets.unshared = palloc0_array(HashJoinTuple, nbuckets);
     631             : 
     632             :         /*
     633             :          * Set up for skew optimization, if possible and there's a need for
     634             :          * more than one batch.  (In a one-batch join, there's no point in
     635             :          * it.)
     636             :          */
     637       18208 :         if (nbatch > 1)
     638         106 :             ExecHashBuildSkewHash(state, hashtable, node, num_skew_mcvs);
     639             : 
     640       18208 :         MemoryContextSwitchTo(oldcxt);
     641             :     }
     642             : 
     643       18604 :     return hashtable;
     644             : }
     645             : 
     646             : 
     647             : /*
     648             :  * Compute appropriate size for hashtable given the estimated size of the
     649             :  * relation to be hashed (number of rows and average row width).
     650             :  *
     651             :  * This is exported so that the planner's costsize.c can use it.
     652             :  */
     653             : 
     654             : /* Target bucket loading (tuples per bucket) */
     655             : #define NTUP_PER_BUCKET         1
     656             : 
     657             : void
     658      568002 : ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
     659             :                         bool try_combined_hash_mem,
     660             :                         int parallel_workers,
     661             :                         size_t *space_allowed,
     662             :                         int *numbuckets,
     663             :                         int *numbatches,
     664             :                         int *num_skew_mcvs)
     665             : {
     666             :     int         tupsize;
     667             :     double      inner_rel_bytes;
     668             :     size_t      hash_table_bytes;
     669             :     size_t      bucket_bytes;
     670             :     size_t      max_pointers;
     671      568002 :     int         nbatch = 1;
     672             :     int         nbuckets;
     673             :     double      dbuckets;
     674             : 
     675             :     /* Force a plausible relation size if no info */
     676      568002 :     if (ntuples <= 0.0)
     677         150 :         ntuples = 1000.0;
     678             : 
     679             :     /*
     680             :      * Estimate tupsize based on footprint of tuple in hashtable... note this
     681             :      * does not allow for any palloc overhead.  The manipulations of spaceUsed
     682             :      * don't count palloc overhead either.
     683             :      */
     684      568002 :     tupsize = HJTUPLE_OVERHEAD +
     685      568002 :         MAXALIGN(SizeofMinimalTupleHeader) +
     686      568002 :         MAXALIGN(tupwidth);
     687      568002 :     inner_rel_bytes = ntuples * tupsize;
     688             : 
     689             :     /*
     690             :      * Compute in-memory hashtable size limit from GUCs.
     691             :      */
     692      568002 :     hash_table_bytes = get_hash_memory_limit();
     693             : 
     694             :     /*
     695             :      * Parallel Hash tries to use the combined hash_mem of all workers to
     696             :      * avoid the need to batch.  If that won't work, it falls back to hash_mem
     697             :      * per worker and tries to process batches in parallel.
     698             :      */
     699      568002 :     if (try_combined_hash_mem)
     700             :     {
     701             :         /* Careful, this could overflow size_t */
     702             :         double      newlimit;
     703             : 
     704       12660 :         newlimit = (double) hash_table_bytes * (double) (parallel_workers + 1);
     705       12660 :         newlimit = Min(newlimit, (double) SIZE_MAX);
     706       12660 :         hash_table_bytes = (size_t) newlimit;
     707             :     }
     708             : 
     709      568002 :     *space_allowed = hash_table_bytes;
     710             : 
     711             :     /*
     712             :      * If skew optimization is possible, estimate the number of skew buckets
     713             :      * that will fit in the memory allowed, and decrement the assumed space
     714             :      * available for the main hash table accordingly.
     715             :      *
     716             :      * We make the optimistic assumption that each skew bucket will contain
     717             :      * one inner-relation tuple.  If that turns out to be low, we will recover
     718             :      * at runtime by reducing the number of skew buckets.
     719             :      *
     720             :      * hashtable->skewBucket will have up to 8 times as many HashSkewBucket
     721             :      * pointers as the number of MCVs we allow, since ExecHashBuildSkewHash
     722             :      * will round up to the next power of 2 and then multiply by 4 to reduce
     723             :      * collisions.
     724             :      */
     725      568002 :     if (useskew)
     726             :     {
     727             :         size_t      bytes_per_mcv;
     728             :         size_t      skew_mcvs;
     729             : 
     730             :         /*----------
     731             :          * Compute number of MCVs we could hold in hash_table_bytes
     732             :          *
     733             :          * Divisor is:
     734             :          * size of a hash tuple +
     735             :          * worst-case size of skewBucket[] per MCV +
     736             :          * size of skewBucketNums[] entry +
     737             :          * size of skew bucket struct itself
     738             :          *----------
     739             :          */
     740      563896 :         bytes_per_mcv = tupsize +
     741             :             (8 * sizeof(HashSkewBucket *)) +
     742      563896 :             sizeof(int) +
     743             :             SKEW_BUCKET_OVERHEAD;
     744      563896 :         skew_mcvs = hash_table_bytes / bytes_per_mcv;
     745             : 
     746             :         /*
     747             :          * Now scale by SKEW_HASH_MEM_PERCENT (we do it in this order so as
     748             :          * not to worry about size_t overflow in the multiplication)
     749             :          */
     750      563896 :         skew_mcvs = (skew_mcvs * SKEW_HASH_MEM_PERCENT) / 100;
     751             : 
     752             :         /* Now clamp to integer range */
     753      563896 :         skew_mcvs = Min(skew_mcvs, INT_MAX);
     754             : 
     755      563896 :         *num_skew_mcvs = (int) skew_mcvs;
     756             : 
     757             :         /* Reduce hash_table_bytes by the amount needed for the skew table */
     758      563896 :         if (skew_mcvs > 0)
     759      563896 :             hash_table_bytes -= skew_mcvs * bytes_per_mcv;
     760             :     }
     761             :     else
     762        4106 :         *num_skew_mcvs = 0;
     763             : 
     764             :     /*
     765             :      * Set nbuckets to achieve an average bucket load of NTUP_PER_BUCKET when
     766             :      * memory is filled, assuming a single batch; but limit the value so that
     767             :      * the pointer arrays we'll try to allocate do not exceed hash_table_bytes
     768             :      * nor MaxAllocSize.
     769             :      *
     770             :      * Note that both nbuckets and nbatch must be powers of 2 to make
     771             :      * ExecHashGetBucketAndBatch fast.
     772             :      */
     773      568002 :     max_pointers = hash_table_bytes / sizeof(HashJoinTuple);
     774      568002 :     max_pointers = Min(max_pointers, MaxAllocSize / sizeof(HashJoinTuple));
     775             :     /* If max_pointers isn't a power of 2, must round it down to one */
     776      568002 :     max_pointers = pg_prevpower2_size_t(max_pointers);
     777             : 
     778             :     /* Also ensure we avoid integer overflow in nbatch and nbuckets */
     779             :     /* (this step is redundant given the current value of MaxAllocSize) */
     780      568002 :     max_pointers = Min(max_pointers, INT_MAX / 2 + 1);
     781             : 
     782      568002 :     dbuckets = ceil(ntuples / NTUP_PER_BUCKET);
     783      568002 :     dbuckets = Min(dbuckets, max_pointers);
     784      568002 :     nbuckets = (int) dbuckets;
     785             :     /* don't let nbuckets be really small, though ... */
     786      568002 :     nbuckets = Max(nbuckets, 1024);
     787             :     /* ... and force it to be a power of 2. */
     788      568002 :     nbuckets = pg_nextpower2_32(nbuckets);
     789             : 
     790             :     /*
     791             :      * If there's not enough space to store the projected number of tuples and
     792             :      * the required bucket headers, we will need multiple batches.
     793             :      */
     794      568002 :     bucket_bytes = sizeof(HashJoinTuple) * nbuckets;
     795      568002 :     if (inner_rel_bytes + bucket_bytes > hash_table_bytes)
     796             :     {
     797             :         /* We'll need multiple batches */
     798             :         size_t      sbuckets;
     799             :         double      dbatch;
     800             :         int         minbatch;
     801             :         size_t      bucket_size;
     802             : 
     803             :         /*
     804             :          * If Parallel Hash with combined hash_mem would still need multiple
     805             :          * batches, we'll have to fall back to regular hash_mem budget.
     806             :          */
     807        4942 :         if (try_combined_hash_mem)
     808             :         {
     809         246 :             ExecChooseHashTableSize(ntuples, tupwidth, useskew,
     810             :                                     false, parallel_workers,
     811             :                                     space_allowed,
     812             :                                     numbuckets,
     813             :                                     numbatches,
     814             :                                     num_skew_mcvs);
     815         246 :             return;
     816             :         }
     817             : 
     818             :         /*
     819             :          * Estimate the number of buckets we'll want to have when hash_mem is
     820             :          * entirely full.  Each bucket will contain a bucket pointer plus
     821             :          * NTUP_PER_BUCKET tuples, whose projected size already includes
     822             :          * overhead for the hash code, pointer to the next tuple, etc.
     823             :          */
     824        4696 :         bucket_size = (tupsize * NTUP_PER_BUCKET + sizeof(HashJoinTuple));
     825        4696 :         if (hash_table_bytes <= bucket_size)
     826           0 :             sbuckets = 1;       /* avoid pg_nextpower2_size_t(0) */
     827             :         else
     828        4696 :             sbuckets = pg_nextpower2_size_t(hash_table_bytes / bucket_size);
     829        4696 :         sbuckets = Min(sbuckets, max_pointers);
     830        4696 :         nbuckets = (int) sbuckets;
     831        4696 :         nbuckets = pg_nextpower2_32(nbuckets);
     832        4696 :         bucket_bytes = nbuckets * sizeof(HashJoinTuple);
     833             : 
     834             :         /*
     835             :          * Buckets are simple pointers to hashjoin tuples, while tupsize
     836             :          * includes the pointer, hash code, and MinimalTupleData.  So buckets
     837             :          * should never really exceed 25% of hash_mem (even for
     838             :          * NTUP_PER_BUCKET=1); except maybe for hash_mem values that are not
     839             :          * 2^N bytes, where we might get more because of doubling. So let's
     840             :          * look for 50% here.
     841             :          */
     842             :         Assert(bucket_bytes <= hash_table_bytes / 2);
     843             : 
     844             :         /* Calculate required number of batches. */
     845        4696 :         dbatch = ceil(inner_rel_bytes / (hash_table_bytes - bucket_bytes));
     846        4696 :         dbatch = Min(dbatch, max_pointers);
     847        4696 :         minbatch = (int) dbatch;
     848        4696 :         nbatch = pg_nextpower2_32(Max(2, minbatch));
     849             :     }
     850             : 
     851             :     Assert(nbuckets > 0);
     852             :     Assert(nbatch > 0);
     853             : 
     854      567756 :     *numbuckets = nbuckets;
     855      567756 :     *numbatches = nbatch;
     856             : }
     857             : 
     858             : 
     859             : /* ----------------------------------------------------------------
     860             :  *      ExecHashTableDestroy
     861             :  *
     862             :  *      destroy a hash table
     863             :  * ----------------------------------------------------------------
     864             :  */
     865             : void
     866       18500 : ExecHashTableDestroy(HashJoinTable hashtable)
     867             : {
     868             :     int         i;
     869             : 
     870             :     /*
     871             :      * Make sure all the temp files are closed.  We skip batch 0, since it
     872             :      * can't have any temp files (and the arrays might not even exist if
     873             :      * nbatch is only 1).  Parallel hash joins don't use these files.
     874             :      */
     875       18500 :     if (hashtable->innerBatchFile != NULL)
     876             :     {
     877        1324 :         for (i = 1; i < hashtable->nbatch; i++)
     878             :         {
     879        1164 :             if (hashtable->innerBatchFile[i])
     880           0 :                 BufFileClose(hashtable->innerBatchFile[i]);
     881        1164 :             if (hashtable->outerBatchFile[i])
     882           0 :                 BufFileClose(hashtable->outerBatchFile[i]);
     883             :         }
     884             :     }
     885             : 
     886             :     /* Release working memory (batchCxt is a child, so it goes away too) */
     887       18500 :     MemoryContextDelete(hashtable->hashCxt);
     888             : 
     889             :     /* And drop the control block */
     890       18500 :     pfree(hashtable);
     891       18500 : }
     892             : 
     893             : /*
     894             :  * ExecHashIncreaseNumBatches
     895             :  *      increase the original number of batches in order to reduce
     896             :  *      current memory consumption
     897             :  */
     898             : static void
     899      449136 : ExecHashIncreaseNumBatches(HashJoinTable hashtable)
     900             : {
     901      449136 :     int         oldnbatch = hashtable->nbatch;
     902      449136 :     int         curbatch = hashtable->curbatch;
     903             :     int         nbatch;
     904             :     long        ninmemory;
     905             :     long        nfreed;
     906             :     HashMemoryChunk oldchunks;
     907             : 
     908             :     /* do nothing if we've decided to shut off growth */
     909      449136 :     if (!hashtable->growEnabled)
     910      449020 :         return;
     911             : 
     912             :     /* safety check to avoid overflow */
     913         116 :     if (oldnbatch > Min(INT_MAX / 2, MaxAllocSize / (sizeof(void *) * 2)))
     914           0 :         return;
     915             : 
     916         116 :     nbatch = oldnbatch * 2;
     917             :     Assert(nbatch > 1);
     918             : 
     919             : #ifdef HJDEBUG
     920             :     printf("Hashjoin %p: increasing nbatch to %d because space = %zu\n",
     921             :            hashtable, nbatch, hashtable->spaceUsed);
     922             : #endif
     923             : 
     924         116 :     if (hashtable->innerBatchFile == NULL)
     925             :     {
     926          54 :         MemoryContext oldcxt = MemoryContextSwitchTo(hashtable->spillCxt);
     927             : 
     928             :         /* we had no file arrays before */
     929          54 :         hashtable->innerBatchFile = palloc0_array(BufFile *, nbatch);
     930          54 :         hashtable->outerBatchFile = palloc0_array(BufFile *, nbatch);
     931             : 
     932          54 :         MemoryContextSwitchTo(oldcxt);
     933             : 
     934             :         /* time to establish the temp tablespaces, too */
     935          54 :         PrepareTempTablespaces();
     936             :     }
     937             :     else
     938             :     {
     939             :         /* enlarge arrays and zero out added entries */
     940          62 :         hashtable->innerBatchFile = repalloc0_array(hashtable->innerBatchFile, BufFile *, oldnbatch, nbatch);
     941          62 :         hashtable->outerBatchFile = repalloc0_array(hashtable->outerBatchFile, BufFile *, oldnbatch, nbatch);
     942             :     }
     943             : 
     944         116 :     hashtable->nbatch = nbatch;
     945             : 
     946             :     /*
     947             :      * Scan through the existing hash table entries and dump out any that are
     948             :      * no longer of the current batch.
     949             :      */
     950         116 :     ninmemory = nfreed = 0;
     951             : 
     952             :     /* If know we need to resize nbuckets, we can do it while rebatching. */
     953         116 :     if (hashtable->nbuckets_optimal != hashtable->nbuckets)
     954             :     {
     955             :         /* we never decrease the number of buckets */
     956             :         Assert(hashtable->nbuckets_optimal > hashtable->nbuckets);
     957             : 
     958          54 :         hashtable->nbuckets = hashtable->nbuckets_optimal;
     959          54 :         hashtable->log2_nbuckets = hashtable->log2_nbuckets_optimal;
     960             : 
     961          54 :         hashtable->buckets.unshared =
     962          54 :             repalloc_array(hashtable->buckets.unshared,
     963             :                            HashJoinTuple, hashtable->nbuckets);
     964             :     }
     965             : 
     966             :     /*
     967             :      * We will scan through the chunks directly, so that we can reset the
     968             :      * buckets now and not have to keep track which tuples in the buckets have
     969             :      * already been processed. We will free the old chunks as we go.
     970             :      */
     971         116 :     memset(hashtable->buckets.unshared, 0,
     972         116 :            sizeof(HashJoinTuple) * hashtable->nbuckets);
     973         116 :     oldchunks = hashtable->chunks;
     974         116 :     hashtable->chunks = NULL;
     975             : 
     976             :     /* so, let's scan through the old chunks, and all tuples in each chunk */
     977         580 :     while (oldchunks != NULL)
     978             :     {
     979         464 :         HashMemoryChunk nextchunk = oldchunks->next.unshared;
     980             : 
     981             :         /* position within the buffer (up to oldchunks->used) */
     982         464 :         size_t      idx = 0;
     983             : 
     984             :         /* process all tuples stored in this chunk (and then free it) */
     985      316828 :         while (idx < oldchunks->used)
     986             :         {
     987      316364 :             HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(oldchunks) + idx);
     988      316364 :             MinimalTuple tuple = HJTUPLE_MINTUPLE(hashTuple);
     989      316364 :             int         hashTupleSize = (HJTUPLE_OVERHEAD + tuple->t_len);
     990             :             int         bucketno;
     991             :             int         batchno;
     992             : 
     993      316364 :             ninmemory++;
     994      316364 :             ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
     995             :                                       &bucketno, &batchno);
     996             : 
     997      316364 :             if (batchno == curbatch)
     998             :             {
     999             :                 /* keep tuple in memory - copy it into the new chunk */
    1000             :                 HashJoinTuple copyTuple;
    1001             : 
    1002      121786 :                 copyTuple = (HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
    1003      121786 :                 memcpy(copyTuple, hashTuple, hashTupleSize);
    1004             : 
    1005             :                 /* and add it back to the appropriate bucket */
    1006      121786 :                 copyTuple->next.unshared = hashtable->buckets.unshared[bucketno];
    1007      121786 :                 hashtable->buckets.unshared[bucketno] = copyTuple;
    1008             :             }
    1009             :             else
    1010             :             {
    1011             :                 /* dump it out */
    1012             :                 Assert(batchno > curbatch);
    1013      194578 :                 ExecHashJoinSaveTuple(HJTUPLE_MINTUPLE(hashTuple),
    1014             :                                       hashTuple->hashvalue,
    1015      194578 :                                       &hashtable->innerBatchFile[batchno],
    1016             :                                       hashtable);
    1017             : 
    1018      194578 :                 hashtable->spaceUsed -= hashTupleSize;
    1019      194578 :                 nfreed++;
    1020             :             }
    1021             : 
    1022             :             /* next tuple in this chunk */
    1023      316364 :             idx += MAXALIGN(hashTupleSize);
    1024             : 
    1025             :             /* allow this loop to be cancellable */
    1026      316364 :             CHECK_FOR_INTERRUPTS();
    1027             :         }
    1028             : 
    1029             :         /* we're done with this chunk - free it and proceed to the next one */
    1030         464 :         pfree(oldchunks);
    1031         464 :         oldchunks = nextchunk;
    1032             :     }
    1033             : 
    1034             : #ifdef HJDEBUG
    1035             :     printf("Hashjoin %p: freed %ld of %ld tuples, space now %zu\n",
    1036             :            hashtable, nfreed, ninmemory, hashtable->spaceUsed);
    1037             : #endif
    1038             : 
    1039             :     /*
    1040             :      * If we dumped out either all or none of the tuples in the table, disable
    1041             :      * further expansion of nbatch.  This situation implies that we have
    1042             :      * enough tuples of identical hashvalues to overflow spaceAllowed.
    1043             :      * Increasing nbatch will not fix it since there's no way to subdivide the
    1044             :      * group any more finely. We have to just gut it out and hope the server
    1045             :      * has enough RAM.
    1046             :      */
    1047         116 :     if (nfreed == 0 || nfreed == ninmemory)
    1048             :     {
    1049          26 :         hashtable->growEnabled = false;
    1050             : #ifdef HJDEBUG
    1051             :         printf("Hashjoin %p: disabling further increase of nbatch\n",
    1052             :                hashtable);
    1053             : #endif
    1054             :     }
    1055             : }
    1056             : 
    1057             : /*
    1058             :  * ExecParallelHashIncreaseNumBatches
    1059             :  *      Every participant attached to grow_batches_barrier must run this
    1060             :  *      function when it observes growth == PHJ_GROWTH_NEED_MORE_BATCHES.
    1061             :  */
    1062             : static void
    1063          56 : ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
    1064             : {
    1065          56 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    1066             : 
    1067             :     Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASH_INNER);
    1068             : 
    1069             :     /*
    1070             :      * It's unlikely, but we need to be prepared for new participants to show
    1071             :      * up while we're in the middle of this operation so we need to switch on
    1072             :      * barrier phase here.
    1073             :      */
    1074          56 :     switch (PHJ_GROW_BATCHES_PHASE(BarrierPhase(&pstate->grow_batches_barrier)))
    1075             :     {
    1076          56 :         case PHJ_GROW_BATCHES_ELECT:
    1077             : 
    1078             :             /*
    1079             :              * Elect one participant to prepare to grow the number of batches.
    1080             :              * This involves reallocating or resetting the buckets of batch 0
    1081             :              * in preparation for all participants to begin repartitioning the
    1082             :              * tuples.
    1083             :              */
    1084          56 :             if (BarrierArriveAndWait(&pstate->grow_batches_barrier,
    1085             :                                      WAIT_EVENT_HASH_GROW_BATCHES_ELECT))
    1086             :             {
    1087             :                 dsa_pointer_atomic *buckets;
    1088             :                 ParallelHashJoinBatch *old_batch0;
    1089             :                 int         new_nbatch;
    1090             :                 int         i;
    1091             : 
    1092             :                 /* Move the old batch out of the way. */
    1093          50 :                 old_batch0 = hashtable->batches[0].shared;
    1094          50 :                 pstate->old_batches = pstate->batches;
    1095          50 :                 pstate->old_nbatch = hashtable->nbatch;
    1096          50 :                 pstate->batches = InvalidDsaPointer;
    1097             : 
    1098             :                 /* Free this backend's old accessors. */
    1099          50 :                 ExecParallelHashCloseBatchAccessors(hashtable);
    1100             : 
    1101             :                 /* Figure out how many batches to use. */
    1102          50 :                 if (hashtable->nbatch == 1)
    1103             :                 {
    1104             :                     /*
    1105             :                      * We are going from single-batch to multi-batch.  We need
    1106             :                      * to switch from one large combined memory budget to the
    1107             :                      * regular hash_mem budget.
    1108             :                      */
    1109          36 :                     pstate->space_allowed = get_hash_memory_limit();
    1110             : 
    1111             :                     /*
    1112             :                      * The combined hash_mem of all participants wasn't
    1113             :                      * enough. Therefore one batch per participant would be
    1114             :                      * approximately equivalent and would probably also be
    1115             :                      * insufficient.  So try two batches per participant,
    1116             :                      * rounded up to a power of two.
    1117             :                      */
    1118          36 :                     new_nbatch = pg_nextpower2_32(pstate->nparticipants * 2);
    1119             :                 }
    1120             :                 else
    1121             :                 {
    1122             :                     /*
    1123             :                      * We were already multi-batched.  Try doubling the number
    1124             :                      * of batches.
    1125             :                      */
    1126          14 :                     new_nbatch = hashtable->nbatch * 2;
    1127             :                 }
    1128             : 
    1129             :                 /* Allocate new larger generation of batches. */
    1130             :                 Assert(hashtable->nbatch == pstate->nbatch);
    1131          50 :                 ExecParallelHashJoinSetUpBatches(hashtable, new_nbatch);
    1132             :                 Assert(hashtable->nbatch == pstate->nbatch);
    1133             : 
    1134             :                 /* Replace or recycle batch 0's bucket array. */
    1135          50 :                 if (pstate->old_nbatch == 1)
    1136             :                 {
    1137             :                     double      dtuples;
    1138             :                     double      dbuckets;
    1139             :                     int         new_nbuckets;
    1140             :                     uint32      max_buckets;
    1141             : 
    1142             :                     /*
    1143             :                      * We probably also need a smaller bucket array.  How many
    1144             :                      * tuples do we expect per batch, assuming we have only
    1145             :                      * half of them so far?  Normally we don't need to change
    1146             :                      * the bucket array's size, because the size of each batch
    1147             :                      * stays the same as we add more batches, but in this
    1148             :                      * special case we move from a large batch to many smaller
    1149             :                      * batches and it would be wasteful to keep the large
    1150             :                      * array.
    1151             :                      */
    1152          36 :                     dtuples = (old_batch0->ntuples * 2.0) / new_nbatch;
    1153             : 
    1154             :                     /*
    1155             :                      * We need to calculate the maximum number of buckets to
    1156             :                      * stay within the MaxAllocSize boundary.  Round the
    1157             :                      * maximum number to the previous power of 2 given that
    1158             :                      * later we round the number to the next power of 2.
    1159             :                      */
    1160          36 :                     max_buckets = pg_prevpower2_32((uint32)
    1161             :                                                    (MaxAllocSize / sizeof(dsa_pointer_atomic)));
    1162          36 :                     dbuckets = ceil(dtuples / NTUP_PER_BUCKET);
    1163          36 :                     dbuckets = Min(dbuckets, max_buckets);
    1164          36 :                     new_nbuckets = (int) dbuckets;
    1165          36 :                     new_nbuckets = Max(new_nbuckets, 1024);
    1166          36 :                     new_nbuckets = pg_nextpower2_32(new_nbuckets);
    1167          36 :                     dsa_free(hashtable->area, old_batch0->buckets);
    1168          72 :                     hashtable->batches[0].shared->buckets =
    1169          36 :                         dsa_allocate(hashtable->area,
    1170             :                                      sizeof(dsa_pointer_atomic) * new_nbuckets);
    1171             :                     buckets = (dsa_pointer_atomic *)
    1172          36 :                         dsa_get_address(hashtable->area,
    1173          36 :                                         hashtable->batches[0].shared->buckets);
    1174      110628 :                     for (i = 0; i < new_nbuckets; ++i)
    1175      110592 :                         dsa_pointer_atomic_init(&buckets[i], InvalidDsaPointer);
    1176          36 :                     pstate->nbuckets = new_nbuckets;
    1177             :                 }
    1178             :                 else
    1179             :                 {
    1180             :                     /* Recycle the existing bucket array. */
    1181          14 :                     hashtable->batches[0].shared->buckets = old_batch0->buckets;
    1182             :                     buckets = (dsa_pointer_atomic *)
    1183          14 :                         dsa_get_address(hashtable->area, old_batch0->buckets);
    1184       53262 :                     for (i = 0; i < hashtable->nbuckets; ++i)
    1185       53248 :                         dsa_pointer_atomic_write(&buckets[i], InvalidDsaPointer);
    1186             :                 }
    1187             : 
    1188             :                 /* Move all chunks to the work queue for parallel processing. */
    1189          50 :                 pstate->chunk_work_queue = old_batch0->chunks;
    1190             : 
    1191             :                 /* Disable further growth temporarily while we're growing. */
    1192          50 :                 pstate->growth = PHJ_GROWTH_DISABLED;
    1193             :             }
    1194             :             else
    1195             :             {
    1196             :                 /* All other participants just flush their tuples to disk. */
    1197           6 :                 ExecParallelHashCloseBatchAccessors(hashtable);
    1198             :             }
    1199             :             /* Fall through. */
    1200             : 
    1201             :         case PHJ_GROW_BATCHES_REALLOCATE:
    1202             :             /* Wait for the above to be finished. */
    1203          56 :             BarrierArriveAndWait(&pstate->grow_batches_barrier,
    1204             :                                  WAIT_EVENT_HASH_GROW_BATCHES_REALLOCATE);
    1205             :             /* Fall through. */
    1206             : 
    1207          56 :         case PHJ_GROW_BATCHES_REPARTITION:
    1208             :             /* Make sure that we have the current dimensions and buckets. */
    1209          56 :             ExecParallelHashEnsureBatchAccessors(hashtable);
    1210          56 :             ExecParallelHashTableSetCurrentBatch(hashtable, 0);
    1211             :             /* Then partition, flush counters. */
    1212          56 :             ExecParallelHashRepartitionFirst(hashtable);
    1213          56 :             ExecParallelHashRepartitionRest(hashtable);
    1214          56 :             ExecParallelHashMergeCounters(hashtable);
    1215             :             /* Wait for the above to be finished. */
    1216          56 :             BarrierArriveAndWait(&pstate->grow_batches_barrier,
    1217             :                                  WAIT_EVENT_HASH_GROW_BATCHES_REPARTITION);
    1218             :             /* Fall through. */
    1219             : 
    1220          56 :         case PHJ_GROW_BATCHES_DECIDE:
    1221             : 
    1222             :             /*
    1223             :              * Elect one participant to clean up and decide whether further
    1224             :              * repartitioning is needed, or should be disabled because it's
    1225             :              * not helping.
    1226             :              */
    1227          56 :             if (BarrierArriveAndWait(&pstate->grow_batches_barrier,
    1228             :                                      WAIT_EVENT_HASH_GROW_BATCHES_DECIDE))
    1229             :             {
    1230             :                 ParallelHashJoinBatch *old_batches;
    1231          50 :                 bool        space_exhausted = false;
    1232          50 :                 bool        extreme_skew_detected = false;
    1233             : 
    1234             :                 /* Make sure that we have the current dimensions and buckets. */
    1235          50 :                 ExecParallelHashEnsureBatchAccessors(hashtable);
    1236          50 :                 ExecParallelHashTableSetCurrentBatch(hashtable, 0);
    1237             : 
    1238          50 :                 old_batches = dsa_get_address(hashtable->area, pstate->old_batches);
    1239             : 
    1240             :                 /* Are any of the new generation of batches exhausted? */
    1241         370 :                 for (int i = 0; i < hashtable->nbatch; ++i)
    1242             :                 {
    1243             :                     ParallelHashJoinBatch *batch;
    1244             :                     ParallelHashJoinBatch *old_batch;
    1245             :                     int         parent;
    1246             : 
    1247         320 :                     batch = hashtable->batches[i].shared;
    1248         320 :                     if (batch->space_exhausted ||
    1249         320 :                         batch->estimated_size > pstate->space_allowed)
    1250          24 :                         space_exhausted = true;
    1251             : 
    1252         320 :                     parent = i % pstate->old_nbatch;
    1253         320 :                     old_batch = NthParallelHashJoinBatch(old_batches, parent);
    1254         320 :                     if (old_batch->space_exhausted ||
    1255         100 :                         batch->estimated_size > pstate->space_allowed)
    1256             :                     {
    1257             :                         /*
    1258             :                          * Did this batch receive ALL of the tuples from its
    1259             :                          * parent batch?  That would indicate that further
    1260             :                          * repartitioning isn't going to help (the hash values
    1261             :                          * are probably all the same).
    1262             :                          */
    1263         220 :                         if (batch->ntuples == hashtable->batches[parent].shared->old_ntuples)
    1264          24 :                             extreme_skew_detected = true;
    1265             :                     }
    1266             :                 }
    1267             : 
    1268             :                 /* Don't keep growing if it's not helping or we'd overflow. */
    1269          50 :                 if (extreme_skew_detected || hashtable->nbatch >= INT_MAX / 2)
    1270          24 :                     pstate->growth = PHJ_GROWTH_DISABLED;
    1271          26 :                 else if (space_exhausted)
    1272           0 :                     pstate->growth = PHJ_GROWTH_NEED_MORE_BATCHES;
    1273             :                 else
    1274          26 :                     pstate->growth = PHJ_GROWTH_OK;
    1275             : 
    1276             :                 /* Free the old batches in shared memory. */
    1277          50 :                 dsa_free(hashtable->area, pstate->old_batches);
    1278          50 :                 pstate->old_batches = InvalidDsaPointer;
    1279             :             }
    1280             :             /* Fall through. */
    1281             : 
    1282             :         case PHJ_GROW_BATCHES_FINISH:
    1283             :             /* Wait for the above to complete. */
    1284          56 :             BarrierArriveAndWait(&pstate->grow_batches_barrier,
    1285             :                                  WAIT_EVENT_HASH_GROW_BATCHES_FINISH);
    1286             :     }
    1287          56 : }
    1288             : 
    1289             : /*
    1290             :  * Repartition the tuples currently loaded into memory for inner batch 0
    1291             :  * because the number of batches has been increased.  Some tuples are retained
    1292             :  * in memory and some are written out to a later batch.
    1293             :  */
    1294             : static void
    1295          56 : ExecParallelHashRepartitionFirst(HashJoinTable hashtable)
    1296             : {
    1297             :     dsa_pointer chunk_shared;
    1298             :     HashMemoryChunk chunk;
    1299             : 
    1300             :     Assert(hashtable->nbatch == hashtable->parallel_state->nbatch);
    1301             : 
    1302         346 :     while ((chunk = ExecParallelHashPopChunkQueue(hashtable, &chunk_shared)))
    1303             :     {
    1304         290 :         size_t      idx = 0;
    1305             : 
    1306             :         /* Repartition all tuples in this chunk. */
    1307      222282 :         while (idx < chunk->used)
    1308             :         {
    1309      221992 :             HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
    1310      221992 :             MinimalTuple tuple = HJTUPLE_MINTUPLE(hashTuple);
    1311             :             HashJoinTuple copyTuple;
    1312             :             dsa_pointer shared;
    1313             :             int         bucketno;
    1314             :             int         batchno;
    1315             : 
    1316      221992 :             ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
    1317             :                                       &bucketno, &batchno);
    1318             : 
    1319             :             Assert(batchno < hashtable->nbatch);
    1320      221992 :             if (batchno == 0)
    1321             :             {
    1322             :                 /* It still belongs in batch 0.  Copy to a new chunk. */
    1323             :                 copyTuple =
    1324       51406 :                     ExecParallelHashTupleAlloc(hashtable,
    1325       51406 :                                                HJTUPLE_OVERHEAD + tuple->t_len,
    1326             :                                                &shared);
    1327       51406 :                 copyTuple->hashvalue = hashTuple->hashvalue;
    1328       51406 :                 memcpy(HJTUPLE_MINTUPLE(copyTuple), tuple, tuple->t_len);
    1329       51406 :                 ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
    1330             :                                           copyTuple, shared);
    1331             :             }
    1332             :             else
    1333             :             {
    1334      170586 :                 size_t      tuple_size =
    1335      170586 :                     MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
    1336             : 
    1337             :                 /* It belongs in a later batch. */
    1338      170586 :                 hashtable->batches[batchno].estimated_size += tuple_size;
    1339      170586 :                 sts_puttuple(hashtable->batches[batchno].inner_tuples,
    1340      170586 :                              &hashTuple->hashvalue, tuple);
    1341             :             }
    1342             : 
    1343             :             /* Count this tuple. */
    1344      221992 :             ++hashtable->batches[0].old_ntuples;
    1345      221992 :             ++hashtable->batches[batchno].ntuples;
    1346             : 
    1347      221992 :             idx += MAXALIGN(HJTUPLE_OVERHEAD +
    1348             :                             HJTUPLE_MINTUPLE(hashTuple)->t_len);
    1349             :         }
    1350             : 
    1351             :         /* Free this chunk. */
    1352         290 :         dsa_free(hashtable->area, chunk_shared);
    1353             : 
    1354         290 :         CHECK_FOR_INTERRUPTS();
    1355             :     }
    1356          56 : }
    1357             : 
    1358             : /*
    1359             :  * Help repartition inner batches 1..n.
    1360             :  */
    1361             : static void
    1362          56 : ExecParallelHashRepartitionRest(HashJoinTable hashtable)
    1363             : {
    1364          56 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    1365          56 :     int         old_nbatch = pstate->old_nbatch;
    1366             :     SharedTuplestoreAccessor **old_inner_tuples;
    1367             :     ParallelHashJoinBatch *old_batches;
    1368             :     int         i;
    1369             : 
    1370             :     /* Get our hands on the previous generation of batches. */
    1371             :     old_batches = (ParallelHashJoinBatch *)
    1372          56 :         dsa_get_address(hashtable->area, pstate->old_batches);
    1373          56 :     old_inner_tuples = palloc0_array(SharedTuplestoreAccessor *, old_nbatch);
    1374         126 :     for (i = 1; i < old_nbatch; ++i)
    1375             :     {
    1376          70 :         ParallelHashJoinBatch *shared =
    1377          70 :             NthParallelHashJoinBatch(old_batches, i);
    1378             : 
    1379          70 :         old_inner_tuples[i] = sts_attach(ParallelHashJoinBatchInner(shared),
    1380             :                                          ParallelWorkerNumber + 1,
    1381             :                                          &pstate->fileset);
    1382             :     }
    1383             : 
    1384             :     /* Join in the effort to repartition them. */
    1385         126 :     for (i = 1; i < old_nbatch; ++i)
    1386             :     {
    1387             :         MinimalTuple tuple;
    1388             :         uint32      hashvalue;
    1389             : 
    1390             :         /* Scan one partition from the previous generation. */
    1391          70 :         sts_begin_parallel_scan(old_inner_tuples[i]);
    1392      172598 :         while ((tuple = sts_parallel_scan_next(old_inner_tuples[i], &hashvalue)))
    1393             :         {
    1394      172528 :             size_t      tuple_size = MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
    1395             :             int         bucketno;
    1396             :             int         batchno;
    1397             : 
    1398             :             /* Decide which partition it goes to in the new generation. */
    1399      172528 :             ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno,
    1400             :                                       &batchno);
    1401             : 
    1402      172528 :             hashtable->batches[batchno].estimated_size += tuple_size;
    1403      172528 :             ++hashtable->batches[batchno].ntuples;
    1404      172528 :             ++hashtable->batches[i].old_ntuples;
    1405             : 
    1406             :             /* Store the tuple its new batch. */
    1407      172528 :             sts_puttuple(hashtable->batches[batchno].inner_tuples,
    1408             :                          &hashvalue, tuple);
    1409             : 
    1410      172528 :             CHECK_FOR_INTERRUPTS();
    1411             :         }
    1412          70 :         sts_end_parallel_scan(old_inner_tuples[i]);
    1413             :     }
    1414             : 
    1415          56 :     pfree(old_inner_tuples);
    1416          56 : }
    1417             : 
    1418             : /*
    1419             :  * Transfer the backend-local per-batch counters to the shared totals.
    1420             :  */
    1421             : static void
    1422         302 : ExecParallelHashMergeCounters(HashJoinTable hashtable)
    1423             : {
    1424         302 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    1425             :     int         i;
    1426             : 
    1427         302 :     LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
    1428         302 :     pstate->total_tuples = 0;
    1429        1832 :     for (i = 0; i < hashtable->nbatch; ++i)
    1430             :     {
    1431        1530 :         ParallelHashJoinBatchAccessor *batch = &hashtable->batches[i];
    1432             : 
    1433        1530 :         batch->shared->size += batch->size;
    1434        1530 :         batch->shared->estimated_size += batch->estimated_size;
    1435        1530 :         batch->shared->ntuples += batch->ntuples;
    1436        1530 :         batch->shared->old_ntuples += batch->old_ntuples;
    1437        1530 :         batch->size = 0;
    1438        1530 :         batch->estimated_size = 0;
    1439        1530 :         batch->ntuples = 0;
    1440        1530 :         batch->old_ntuples = 0;
    1441        1530 :         pstate->total_tuples += batch->shared->ntuples;
    1442             :     }
    1443         302 :     LWLockRelease(&pstate->lock);
    1444         302 : }
    1445             : 
    1446             : /*
    1447             :  * ExecHashIncreaseNumBuckets
    1448             :  *      increase the original number of buckets in order to reduce
    1449             :  *      number of tuples per bucket
    1450             :  */
    1451             : static void
    1452          72 : ExecHashIncreaseNumBuckets(HashJoinTable hashtable)
    1453             : {
    1454             :     HashMemoryChunk chunk;
    1455             : 
    1456             :     /* do nothing if not an increase (it's called increase for a reason) */
    1457          72 :     if (hashtable->nbuckets >= hashtable->nbuckets_optimal)
    1458           0 :         return;
    1459             : 
    1460             : #ifdef HJDEBUG
    1461             :     printf("Hashjoin %p: increasing nbuckets %d => %d\n",
    1462             :            hashtable, hashtable->nbuckets, hashtable->nbuckets_optimal);
    1463             : #endif
    1464             : 
    1465          72 :     hashtable->nbuckets = hashtable->nbuckets_optimal;
    1466          72 :     hashtable->log2_nbuckets = hashtable->log2_nbuckets_optimal;
    1467             : 
    1468             :     Assert(hashtable->nbuckets > 1);
    1469             :     Assert(hashtable->nbuckets <= (INT_MAX / 2));
    1470             :     Assert(hashtable->nbuckets == (1 << hashtable->log2_nbuckets));
    1471             : 
    1472             :     /*
    1473             :      * Just reallocate the proper number of buckets - we don't need to walk
    1474             :      * through them - we can walk the dense-allocated chunks (just like in
    1475             :      * ExecHashIncreaseNumBatches, but without all the copying into new
    1476             :      * chunks)
    1477             :      */
    1478          72 :     hashtable->buckets.unshared =
    1479          72 :         repalloc_array(hashtable->buckets.unshared,
    1480             :                        HashJoinTuple, hashtable->nbuckets);
    1481             : 
    1482          72 :     memset(hashtable->buckets.unshared, 0,
    1483          72 :            hashtable->nbuckets * sizeof(HashJoinTuple));
    1484             : 
    1485             :     /* scan through all tuples in all chunks to rebuild the hash table */
    1486        1008 :     for (chunk = hashtable->chunks; chunk != NULL; chunk = chunk->next.unshared)
    1487             :     {
    1488             :         /* process all tuples stored in this chunk */
    1489         936 :         size_t      idx = 0;
    1490             : 
    1491      720936 :         while (idx < chunk->used)
    1492             :         {
    1493      720000 :             HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
    1494             :             int         bucketno;
    1495             :             int         batchno;
    1496             : 
    1497      720000 :             ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
    1498             :                                       &bucketno, &batchno);
    1499             : 
    1500             :             /* add the tuple to the proper bucket */
    1501      720000 :             hashTuple->next.unshared = hashtable->buckets.unshared[bucketno];
    1502      720000 :             hashtable->buckets.unshared[bucketno] = hashTuple;
    1503             : 
    1504             :             /* advance index past the tuple */
    1505      720000 :             idx += MAXALIGN(HJTUPLE_OVERHEAD +
    1506             :                             HJTUPLE_MINTUPLE(hashTuple)->t_len);
    1507             :         }
    1508             : 
    1509             :         /* allow this loop to be cancellable */
    1510         936 :         CHECK_FOR_INTERRUPTS();
    1511             :     }
    1512             : }
    1513             : 
    1514             : static void
    1515         142 : ExecParallelHashIncreaseNumBuckets(HashJoinTable hashtable)
    1516             : {
    1517         142 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    1518             :     int         i;
    1519             :     HashMemoryChunk chunk;
    1520             :     dsa_pointer chunk_s;
    1521             : 
    1522             :     Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASH_INNER);
    1523             : 
    1524             :     /*
    1525             :      * It's unlikely, but we need to be prepared for new participants to show
    1526             :      * up while we're in the middle of this operation so we need to switch on
    1527             :      * barrier phase here.
    1528             :      */
    1529         142 :     switch (PHJ_GROW_BUCKETS_PHASE(BarrierPhase(&pstate->grow_buckets_barrier)))
    1530             :     {
    1531         140 :         case PHJ_GROW_BUCKETS_ELECT:
    1532             :             /* Elect one participant to prepare to increase nbuckets. */
    1533         140 :             if (BarrierArriveAndWait(&pstate->grow_buckets_barrier,
    1534             :                                      WAIT_EVENT_HASH_GROW_BUCKETS_ELECT))
    1535             :             {
    1536             :                 size_t      size;
    1537             :                 dsa_pointer_atomic *buckets;
    1538             : 
    1539             :                 /* Double the size of the bucket array. */
    1540         108 :                 pstate->nbuckets *= 2;
    1541         108 :                 size = pstate->nbuckets * sizeof(dsa_pointer_atomic);
    1542         108 :                 hashtable->batches[0].shared->size += size / 2;
    1543         108 :                 dsa_free(hashtable->area, hashtable->batches[0].shared->buckets);
    1544         216 :                 hashtable->batches[0].shared->buckets =
    1545         108 :                     dsa_allocate(hashtable->area, size);
    1546             :                 buckets = (dsa_pointer_atomic *)
    1547         108 :                     dsa_get_address(hashtable->area,
    1548         108 :                                     hashtable->batches[0].shared->buckets);
    1549      933996 :                 for (i = 0; i < pstate->nbuckets; ++i)
    1550      933888 :                     dsa_pointer_atomic_init(&buckets[i], InvalidDsaPointer);
    1551             : 
    1552             :                 /* Put the chunk list onto the work queue. */
    1553         108 :                 pstate->chunk_work_queue = hashtable->batches[0].shared->chunks;
    1554             : 
    1555             :                 /* Clear the flag. */
    1556         108 :                 pstate->growth = PHJ_GROWTH_OK;
    1557             :             }
    1558             :             /* Fall through. */
    1559             : 
    1560             :         case PHJ_GROW_BUCKETS_REALLOCATE:
    1561             :             /* Wait for the above to complete. */
    1562         142 :             BarrierArriveAndWait(&pstate->grow_buckets_barrier,
    1563             :                                  WAIT_EVENT_HASH_GROW_BUCKETS_REALLOCATE);
    1564             :             /* Fall through. */
    1565             : 
    1566         142 :         case PHJ_GROW_BUCKETS_REINSERT:
    1567             :             /* Reinsert all tuples into the hash table. */
    1568         142 :             ExecParallelHashEnsureBatchAccessors(hashtable);
    1569         142 :             ExecParallelHashTableSetCurrentBatch(hashtable, 0);
    1570         808 :             while ((chunk = ExecParallelHashPopChunkQueue(hashtable, &chunk_s)))
    1571             :             {
    1572         666 :                 size_t      idx = 0;
    1573             : 
    1574      544906 :                 while (idx < chunk->used)
    1575             :                 {
    1576      544240 :                     HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
    1577      544240 :                     dsa_pointer shared = chunk_s + HASH_CHUNK_HEADER_SIZE + idx;
    1578             :                     int         bucketno;
    1579             :                     int         batchno;
    1580             : 
    1581      544240 :                     ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
    1582             :                                               &bucketno, &batchno);
    1583             :                     Assert(batchno == 0);
    1584             : 
    1585             :                     /* add the tuple to the proper bucket */
    1586      544240 :                     ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
    1587             :                                               hashTuple, shared);
    1588             : 
    1589             :                     /* advance index past the tuple */
    1590      544240 :                     idx += MAXALIGN(HJTUPLE_OVERHEAD +
    1591             :                                     HJTUPLE_MINTUPLE(hashTuple)->t_len);
    1592             :                 }
    1593             : 
    1594             :                 /* allow this loop to be cancellable */
    1595         666 :                 CHECK_FOR_INTERRUPTS();
    1596             :             }
    1597         142 :             BarrierArriveAndWait(&pstate->grow_buckets_barrier,
    1598             :                                  WAIT_EVENT_HASH_GROW_BUCKETS_REINSERT);
    1599             :     }
    1600         142 : }
    1601             : 
    1602             : /*
    1603             :  * ExecHashTableInsert
    1604             :  *      insert a tuple into the hash table depending on the hash value
    1605             :  *      it may just go to a temp file for later batches
    1606             :  *
    1607             :  * Note: the passed TupleTableSlot may contain a regular, minimal, or virtual
    1608             :  * tuple; the minimal case in particular is certain to happen while reloading
    1609             :  * tuples from batch files.  We could save some cycles in the regular-tuple
    1610             :  * case by not forcing the slot contents into minimal form; not clear if it's
    1611             :  * worth the messiness required.
    1612             :  */
    1613             : void
    1614     9905198 : ExecHashTableInsert(HashJoinTable hashtable,
    1615             :                     TupleTableSlot *slot,
    1616             :                     uint32 hashvalue)
    1617             : {
    1618             :     bool        shouldFree;
    1619     9905198 :     MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
    1620             :     int         bucketno;
    1621             :     int         batchno;
    1622             : 
    1623     9905198 :     ExecHashGetBucketAndBatch(hashtable, hashvalue,
    1624             :                               &bucketno, &batchno);
    1625             : 
    1626             :     /*
    1627             :      * decide whether to put the tuple in the hash table or a temp file
    1628             :      */
    1629     9905198 :     if (batchno == hashtable->curbatch)
    1630             :     {
    1631             :         /*
    1632             :          * put the tuple in hash table
    1633             :          */
    1634             :         HashJoinTuple hashTuple;
    1635             :         int         hashTupleSize;
    1636     7742596 :         double      ntuples = (hashtable->totalTuples - hashtable->skewTuples);
    1637             : 
    1638             :         /* Create the HashJoinTuple */
    1639     7742596 :         hashTupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
    1640     7742596 :         hashTuple = (HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
    1641             : 
    1642     7742596 :         hashTuple->hashvalue = hashvalue;
    1643     7742596 :         memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
    1644             : 
    1645             :         /*
    1646             :          * We always reset the tuple-matched flag on insertion.  This is okay
    1647             :          * even when reloading a tuple from a batch file, since the tuple
    1648             :          * could not possibly have been matched to an outer tuple before it
    1649             :          * went into the batch file.
    1650             :          */
    1651     7742596 :         HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
    1652             : 
    1653             :         /* Push it onto the front of the bucket's list */
    1654     7742596 :         hashTuple->next.unshared = hashtable->buckets.unshared[bucketno];
    1655     7742596 :         hashtable->buckets.unshared[bucketno] = hashTuple;
    1656             : 
    1657             :         /*
    1658             :          * Increase the (optimal) number of buckets if we just exceeded the
    1659             :          * NTUP_PER_BUCKET threshold, but only when there's still a single
    1660             :          * batch.
    1661             :          */
    1662     7742596 :         if (hashtable->nbatch == 1 &&
    1663     5275870 :             ntuples > (hashtable->nbuckets_optimal * NTUP_PER_BUCKET))
    1664             :         {
    1665             :             /* Guard against integer overflow and alloc size overflow */
    1666         180 :             if (hashtable->nbuckets_optimal <= INT_MAX / 2 &&
    1667         180 :                 hashtable->nbuckets_optimal * 2 <= MaxAllocSize / sizeof(HashJoinTuple))
    1668             :             {
    1669         180 :                 hashtable->nbuckets_optimal *= 2;
    1670         180 :                 hashtable->log2_nbuckets_optimal += 1;
    1671             :             }
    1672             :         }
    1673             : 
    1674             :         /* Account for space used, and back off if we've used too much */
    1675     7742596 :         hashtable->spaceUsed += hashTupleSize;
    1676     7742596 :         if (hashtable->spaceUsed > hashtable->spacePeak)
    1677     5861454 :             hashtable->spacePeak = hashtable->spaceUsed;
    1678     7742596 :         if (hashtable->spaceUsed +
    1679     7742596 :             hashtable->nbuckets_optimal * sizeof(HashJoinTuple)
    1680     7742596 :             > hashtable->spaceAllowed)
    1681      449136 :             ExecHashIncreaseNumBatches(hashtable);
    1682             :     }
    1683             :     else
    1684             :     {
    1685             :         /*
    1686             :          * put the tuple into a temp file for later batches
    1687             :          */
    1688             :         Assert(batchno > hashtable->curbatch);
    1689     2162602 :         ExecHashJoinSaveTuple(tuple,
    1690             :                               hashvalue,
    1691     2162602 :                               &hashtable->innerBatchFile[batchno],
    1692             :                               hashtable);
    1693             :     }
    1694             : 
    1695     9905198 :     if (shouldFree)
    1696     7507456 :         heap_free_minimal_tuple(tuple);
    1697     9905198 : }
    1698             : 
    1699             : /*
    1700             :  * ExecParallelHashTableInsert
    1701             :  *      insert a tuple into a shared hash table or shared batch tuplestore
    1702             :  */
    1703             : void
    1704     2160132 : ExecParallelHashTableInsert(HashJoinTable hashtable,
    1705             :                             TupleTableSlot *slot,
    1706             :                             uint32 hashvalue)
    1707             : {
    1708             :     bool        shouldFree;
    1709     2160132 :     MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
    1710             :     dsa_pointer shared;
    1711             :     int         bucketno;
    1712             :     int         batchno;
    1713             : 
    1714     2160486 : retry:
    1715     2160486 :     ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
    1716             : 
    1717     2160486 :     if (batchno == 0)
    1718             :     {
    1719             :         HashJoinTuple hashTuple;
    1720             : 
    1721             :         /* Try to load it into memory. */
    1722             :         Assert(BarrierPhase(&hashtable->parallel_state->build_barrier) ==
    1723             :                PHJ_BUILD_HASH_INNER);
    1724     1246870 :         hashTuple = ExecParallelHashTupleAlloc(hashtable,
    1725     1246870 :                                                HJTUPLE_OVERHEAD + tuple->t_len,
    1726             :                                                &shared);
    1727     1246870 :         if (hashTuple == NULL)
    1728         326 :             goto retry;
    1729             : 
    1730             :         /* Store the hash value in the HashJoinTuple header. */
    1731     1246544 :         hashTuple->hashvalue = hashvalue;
    1732     1246544 :         memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
    1733     1246544 :         HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
    1734             : 
    1735             :         /* Push it onto the front of the bucket's list */
    1736     1246544 :         ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
    1737             :                                   hashTuple, shared);
    1738             :     }
    1739             :     else
    1740             :     {
    1741      913616 :         size_t      tuple_size = MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
    1742             : 
    1743             :         Assert(batchno > 0);
    1744             : 
    1745             :         /* Try to preallocate space in the batch if necessary. */
    1746      913616 :         if (hashtable->batches[batchno].preallocated < tuple_size)
    1747             :         {
    1748        1758 :             if (!ExecParallelHashTuplePrealloc(hashtable, batchno, tuple_size))
    1749          28 :                 goto retry;
    1750             :         }
    1751             : 
    1752             :         Assert(hashtable->batches[batchno].preallocated >= tuple_size);
    1753      913588 :         hashtable->batches[batchno].preallocated -= tuple_size;
    1754      913588 :         sts_puttuple(hashtable->batches[batchno].inner_tuples, &hashvalue,
    1755             :                      tuple);
    1756             :     }
    1757     2160132 :     ++hashtable->batches[batchno].ntuples;
    1758             : 
    1759     2160132 :     if (shouldFree)
    1760     2160132 :         heap_free_minimal_tuple(tuple);
    1761     2160132 : }
    1762             : 
    1763             : /*
    1764             :  * Insert a tuple into the current hash table.  Unlike
    1765             :  * ExecParallelHashTableInsert, this version is not prepared to send the tuple
    1766             :  * to other batches or to run out of memory, and should only be called with
    1767             :  * tuples that belong in the current batch once growth has been disabled.
    1768             :  */
    1769             : void
    1770     1084174 : ExecParallelHashTableInsertCurrentBatch(HashJoinTable hashtable,
    1771             :                                         TupleTableSlot *slot,
    1772             :                                         uint32 hashvalue)
    1773             : {
    1774             :     bool        shouldFree;
    1775     1084174 :     MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
    1776             :     HashJoinTuple hashTuple;
    1777             :     dsa_pointer shared;
    1778             :     int         batchno;
    1779             :     int         bucketno;
    1780             : 
    1781     1084174 :     ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
    1782             :     Assert(batchno == hashtable->curbatch);
    1783     1084174 :     hashTuple = ExecParallelHashTupleAlloc(hashtable,
    1784     1084174 :                                            HJTUPLE_OVERHEAD + tuple->t_len,
    1785             :                                            &shared);
    1786     1084174 :     hashTuple->hashvalue = hashvalue;
    1787     1084174 :     memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
    1788     1084174 :     HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
    1789     1084174 :     ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
    1790             :                               hashTuple, shared);
    1791             : 
    1792     1084174 :     if (shouldFree)
    1793           0 :         heap_free_minimal_tuple(tuple);
    1794     1084174 : }
    1795             : 
    1796             : 
    1797             : /*
    1798             :  * ExecHashGetBucketAndBatch
    1799             :  *      Determine the bucket number and batch number for a hash value
    1800             :  *
    1801             :  * Note: on-the-fly increases of nbatch must not change the bucket number
    1802             :  * for a given hash code (since we don't move tuples to different hash
    1803             :  * chains), and must only cause the batch number to remain the same or
    1804             :  * increase.  Our algorithm is
    1805             :  *      bucketno = hashvalue MOD nbuckets
    1806             :  *      batchno = ROR(hashvalue, log2_nbuckets) MOD nbatch
    1807             :  * where nbuckets and nbatch are both expected to be powers of 2, so we can
    1808             :  * do the computations by shifting and masking.  (This assumes that all hash
    1809             :  * functions are good about randomizing all their output bits, else we are
    1810             :  * likely to have very skewed bucket or batch occupancy.)
    1811             :  *
    1812             :  * nbuckets and log2_nbuckets may change while nbatch == 1 because of dynamic
    1813             :  * bucket count growth.  Once we start batching, the value is fixed and does
    1814             :  * not change over the course of the join (making it possible to compute batch
    1815             :  * number the way we do here).
    1816             :  *
    1817             :  * nbatch is always a power of 2; we increase it only by doubling it.  This
    1818             :  * effectively adds one more bit to the top of the batchno.  In very large
    1819             :  * joins, we might run out of bits to add, so we do this by rotating the hash
    1820             :  * value.  This causes batchno to steal bits from bucketno when the number of
    1821             :  * virtual buckets exceeds 2^32.  It's better to have longer bucket chains
    1822             :  * than to lose the ability to divide batches.
    1823             :  */
    1824             : void
    1825    33296138 : ExecHashGetBucketAndBatch(HashJoinTable hashtable,
    1826             :                           uint32 hashvalue,
    1827             :                           int *bucketno,
    1828             :                           int *batchno)
    1829             : {
    1830    33296138 :     uint32      nbuckets = (uint32) hashtable->nbuckets;
    1831    33296138 :     uint32      nbatch = (uint32) hashtable->nbatch;
    1832             : 
    1833    33296138 :     if (nbatch > 1)
    1834             :     {
    1835    13187900 :         *bucketno = hashvalue & (nbuckets - 1);
    1836    13187900 :         *batchno = pg_rotate_right32(hashvalue,
    1837    13187900 :                                      hashtable->log2_nbuckets) & (nbatch - 1);
    1838             :     }
    1839             :     else
    1840             :     {
    1841    20108238 :         *bucketno = hashvalue & (nbuckets - 1);
    1842    20108238 :         *batchno = 0;
    1843             :     }
    1844    33296138 : }
    1845             : 
    1846             : /*
    1847             :  * ExecScanHashBucket
    1848             :  *      scan a hash bucket for matches to the current outer tuple
    1849             :  *
    1850             :  * The current outer tuple must be stored in econtext->ecxt_outertuple.
    1851             :  *
    1852             :  * On success, the inner tuple is stored into hjstate->hj_CurTuple and
    1853             :  * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
    1854             :  * for the latter.
    1855             :  */
    1856             : bool
    1857    17820662 : ExecScanHashBucket(HashJoinState *hjstate,
    1858             :                    ExprContext *econtext)
    1859             : {
    1860    17820662 :     ExprState  *hjclauses = hjstate->hashclauses;
    1861    17820662 :     HashJoinTable hashtable = hjstate->hj_HashTable;
    1862    17820662 :     HashJoinTuple hashTuple = hjstate->hj_CurTuple;
    1863    17820662 :     uint32      hashvalue = hjstate->hj_CurHashValue;
    1864             : 
    1865             :     /*
    1866             :      * hj_CurTuple is the address of the tuple last returned from the current
    1867             :      * bucket, or NULL if it's time to start scanning a new bucket.
    1868             :      *
    1869             :      * If the tuple hashed to a skew bucket then scan the skew bucket
    1870             :      * otherwise scan the standard hashtable bucket.
    1871             :      */
    1872    17820662 :     if (hashTuple != NULL)
    1873     4479854 :         hashTuple = hashTuple->next.unshared;
    1874    13340808 :     else if (hjstate->hj_CurSkewBucketNo != INVALID_SKEW_BUCKET_NO)
    1875        2400 :         hashTuple = hashtable->skewBucket[hjstate->hj_CurSkewBucketNo]->tuples;
    1876             :     else
    1877    13338408 :         hashTuple = hashtable->buckets.unshared[hjstate->hj_CurBucketNo];
    1878             : 
    1879    21977512 :     while (hashTuple != NULL)
    1880             :     {
    1881    12059258 :         if (hashTuple->hashvalue == hashvalue)
    1882             :         {
    1883             :             TupleTableSlot *inntuple;
    1884             : 
    1885             :             /* insert hashtable's tuple into exec slot so ExecQual sees it */
    1886     7902420 :             inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
    1887             :                                              hjstate->hj_HashTupleSlot,
    1888             :                                              false);    /* do not pfree */
    1889     7902420 :             econtext->ecxt_innertuple = inntuple;
    1890             : 
    1891     7902420 :             if (ExecQualAndReset(hjclauses, econtext))
    1892             :             {
    1893     7902408 :                 hjstate->hj_CurTuple = hashTuple;
    1894     7902408 :                 return true;
    1895             :             }
    1896             :         }
    1897             : 
    1898     4156850 :         hashTuple = hashTuple->next.unshared;
    1899             :     }
    1900             : 
    1901             :     /*
    1902             :      * no match
    1903             :      */
    1904     9918254 :     return false;
    1905             : }
    1906             : 
    1907             : /*
    1908             :  * ExecParallelScanHashBucket
    1909             :  *      scan a hash bucket for matches to the current outer tuple
    1910             :  *
    1911             :  * The current outer tuple must be stored in econtext->ecxt_outertuple.
    1912             :  *
    1913             :  * On success, the inner tuple is stored into hjstate->hj_CurTuple and
    1914             :  * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
    1915             :  * for the latter.
    1916             :  */
    1917             : bool
    1918     4200054 : ExecParallelScanHashBucket(HashJoinState *hjstate,
    1919             :                            ExprContext *econtext)
    1920             : {
    1921     4200054 :     ExprState  *hjclauses = hjstate->hashclauses;
    1922     4200054 :     HashJoinTable hashtable = hjstate->hj_HashTable;
    1923     4200054 :     HashJoinTuple hashTuple = hjstate->hj_CurTuple;
    1924     4200054 :     uint32      hashvalue = hjstate->hj_CurHashValue;
    1925             : 
    1926             :     /*
    1927             :      * hj_CurTuple is the address of the tuple last returned from the current
    1928             :      * bucket, or NULL if it's time to start scanning a new bucket.
    1929             :      */
    1930     4200054 :     if (hashTuple != NULL)
    1931     2040024 :         hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
    1932             :     else
    1933     2160030 :         hashTuple = ExecParallelHashFirstTuple(hashtable,
    1934             :                                                hjstate->hj_CurBucketNo);
    1935             : 
    1936     5600842 :     while (hashTuple != NULL)
    1937             :     {
    1938     3440812 :         if (hashTuple->hashvalue == hashvalue)
    1939             :         {
    1940             :             TupleTableSlot *inntuple;
    1941             : 
    1942             :             /* insert hashtable's tuple into exec slot so ExecQual sees it */
    1943     2040024 :             inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
    1944             :                                              hjstate->hj_HashTupleSlot,
    1945             :                                              false);    /* do not pfree */
    1946     2040024 :             econtext->ecxt_innertuple = inntuple;
    1947             : 
    1948     2040024 :             if (ExecQualAndReset(hjclauses, econtext))
    1949             :             {
    1950     2040024 :                 hjstate->hj_CurTuple = hashTuple;
    1951     2040024 :                 return true;
    1952             :             }
    1953             :         }
    1954             : 
    1955     1400788 :         hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
    1956             :     }
    1957             : 
    1958             :     /*
    1959             :      * no match
    1960             :      */
    1961     2160030 :     return false;
    1962             : }
    1963             : 
    1964             : /*
    1965             :  * ExecPrepHashTableForUnmatched
    1966             :  *      set up for a series of ExecScanHashTableForUnmatched calls
    1967             :  */
    1968             : void
    1969        3574 : ExecPrepHashTableForUnmatched(HashJoinState *hjstate)
    1970             : {
    1971             :     /*----------
    1972             :      * During this scan we use the HashJoinState fields as follows:
    1973             :      *
    1974             :      * hj_CurBucketNo: next regular bucket to scan
    1975             :      * hj_CurSkewBucketNo: next skew bucket (an index into skewBucketNums)
    1976             :      * hj_CurTuple: last tuple returned, or NULL to start next bucket
    1977             :      *----------
    1978             :      */
    1979        3574 :     hjstate->hj_CurBucketNo = 0;
    1980        3574 :     hjstate->hj_CurSkewBucketNo = 0;
    1981        3574 :     hjstate->hj_CurTuple = NULL;
    1982        3574 : }
    1983             : 
    1984             : /*
    1985             :  * Decide if this process is allowed to run the unmatched scan.  If so, the
    1986             :  * batch barrier is advanced to PHJ_BATCH_SCAN and true is returned.
    1987             :  * Otherwise the batch is detached and false is returned.
    1988             :  */
    1989             : bool
    1990          72 : ExecParallelPrepHashTableForUnmatched(HashJoinState *hjstate)
    1991             : {
    1992          72 :     HashJoinTable hashtable = hjstate->hj_HashTable;
    1993          72 :     int         curbatch = hashtable->curbatch;
    1994          72 :     ParallelHashJoinBatch *batch = hashtable->batches[curbatch].shared;
    1995             : 
    1996             :     Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE);
    1997             : 
    1998             :     /*
    1999             :      * It would not be deadlock-free to wait on the batch barrier, because it
    2000             :      * is in PHJ_BATCH_PROBE phase, and thus processes attached to it have
    2001             :      * already emitted tuples.  Therefore, we'll hold a wait-free election:
    2002             :      * only one process can continue to the next phase, and all others detach
    2003             :      * from this batch.  They can still go any work on other batches, if there
    2004             :      * are any.
    2005             :      */
    2006          72 :     if (!BarrierArriveAndDetachExceptLast(&batch->batch_barrier))
    2007             :     {
    2008             :         /* This process considers the batch to be done. */
    2009           6 :         hashtable->batches[hashtable->curbatch].done = true;
    2010             : 
    2011             :         /* Make sure any temporary files are closed. */
    2012           6 :         sts_end_parallel_scan(hashtable->batches[curbatch].inner_tuples);
    2013           6 :         sts_end_parallel_scan(hashtable->batches[curbatch].outer_tuples);
    2014             : 
    2015             :         /*
    2016             :          * Track largest batch we've seen, which would normally happen in
    2017             :          * ExecHashTableDetachBatch().
    2018             :          */
    2019           6 :         hashtable->spacePeak =
    2020           6 :             Max(hashtable->spacePeak,
    2021             :                 batch->size + sizeof(dsa_pointer_atomic) * hashtable->nbuckets);
    2022           6 :         hashtable->curbatch = -1;
    2023           6 :         return false;
    2024             :     }
    2025             : 
    2026             :     /* Now we are alone with this batch. */
    2027             :     Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_SCAN);
    2028             : 
    2029             :     /*
    2030             :      * Has another process decided to give up early and command all processes
    2031             :      * to skip the unmatched scan?
    2032             :      */
    2033          66 :     if (batch->skip_unmatched)
    2034             :     {
    2035           0 :         hashtable->batches[hashtable->curbatch].done = true;
    2036           0 :         ExecHashTableDetachBatch(hashtable);
    2037           0 :         return false;
    2038             :     }
    2039             : 
    2040             :     /* Now prepare the process local state, just as for non-parallel join. */
    2041          66 :     ExecPrepHashTableForUnmatched(hjstate);
    2042             : 
    2043          66 :     return true;
    2044             : }
    2045             : 
    2046             : /*
    2047             :  * ExecScanHashTableForUnmatched
    2048             :  *      scan the hash table for unmatched inner tuples
    2049             :  *
    2050             :  * On success, the inner tuple is stored into hjstate->hj_CurTuple and
    2051             :  * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
    2052             :  * for the latter.
    2053             :  */
    2054             : bool
    2055      374298 : ExecScanHashTableForUnmatched(HashJoinState *hjstate, ExprContext *econtext)
    2056             : {
    2057      374298 :     HashJoinTable hashtable = hjstate->hj_HashTable;
    2058      374298 :     HashJoinTuple hashTuple = hjstate->hj_CurTuple;
    2059             : 
    2060             :     for (;;)
    2061             :     {
    2062             :         /*
    2063             :          * hj_CurTuple is the address of the tuple last returned from the
    2064             :          * current bucket, or NULL if it's time to start scanning a new
    2065             :          * bucket.
    2066             :          */
    2067     5072894 :         if (hashTuple != NULL)
    2068      370790 :             hashTuple = hashTuple->next.unshared;
    2069     4702104 :         else if (hjstate->hj_CurBucketNo < hashtable->nbuckets)
    2070             :         {
    2071     4698608 :             hashTuple = hashtable->buckets.unshared[hjstate->hj_CurBucketNo];
    2072     4698608 :             hjstate->hj_CurBucketNo++;
    2073             :         }
    2074        3496 :         else if (hjstate->hj_CurSkewBucketNo < hashtable->nSkewBuckets)
    2075             :         {
    2076           0 :             int         j = hashtable->skewBucketNums[hjstate->hj_CurSkewBucketNo];
    2077             : 
    2078           0 :             hashTuple = hashtable->skewBucket[j]->tuples;
    2079           0 :             hjstate->hj_CurSkewBucketNo++;
    2080             :         }
    2081             :         else
    2082        3496 :             break;              /* finished all buckets */
    2083             : 
    2084     5471162 :         while (hashTuple != NULL)
    2085             :         {
    2086      772566 :             if (!HeapTupleHeaderHasMatch(HJTUPLE_MINTUPLE(hashTuple)))
    2087             :             {
    2088             :                 TupleTableSlot *inntuple;
    2089             : 
    2090             :                 /* insert hashtable's tuple into exec slot */
    2091      370802 :                 inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
    2092             :                                                  hjstate->hj_HashTupleSlot,
    2093             :                                                  false);    /* do not pfree */
    2094      370802 :                 econtext->ecxt_innertuple = inntuple;
    2095             : 
    2096             :                 /*
    2097             :                  * Reset temp memory each time; although this function doesn't
    2098             :                  * do any qual eval, the caller will, so let's keep it
    2099             :                  * parallel to ExecScanHashBucket.
    2100             :                  */
    2101      370802 :                 ResetExprContext(econtext);
    2102             : 
    2103      370802 :                 hjstate->hj_CurTuple = hashTuple;
    2104      370802 :                 return true;
    2105             :             }
    2106             : 
    2107      401764 :             hashTuple = hashTuple->next.unshared;
    2108             :         }
    2109             : 
    2110             :         /* allow this loop to be cancellable */
    2111     4698596 :         CHECK_FOR_INTERRUPTS();
    2112             :     }
    2113             : 
    2114             :     /*
    2115             :      * no more unmatched tuples
    2116             :      */
    2117        3496 :     return false;
    2118             : }
    2119             : 
    2120             : /*
    2121             :  * ExecParallelScanHashTableForUnmatched
    2122             :  *      scan the hash table for unmatched inner tuples, in parallel join
    2123             :  *
    2124             :  * On success, the inner tuple is stored into hjstate->hj_CurTuple and
    2125             :  * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
    2126             :  * for the latter.
    2127             :  */
    2128             : bool
    2129      120072 : ExecParallelScanHashTableForUnmatched(HashJoinState *hjstate,
    2130             :                                       ExprContext *econtext)
    2131             : {
    2132      120072 :     HashJoinTable hashtable = hjstate->hj_HashTable;
    2133      120072 :     HashJoinTuple hashTuple = hjstate->hj_CurTuple;
    2134             : 
    2135             :     for (;;)
    2136             :     {
    2137             :         /*
    2138             :          * hj_CurTuple is the address of the tuple last returned from the
    2139             :          * current bucket, or NULL if it's time to start scanning a new
    2140             :          * bucket.
    2141             :          */
    2142      734472 :         if (hashTuple != NULL)
    2143      120006 :             hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
    2144      614466 :         else if (hjstate->hj_CurBucketNo < hashtable->nbuckets)
    2145      614400 :             hashTuple = ExecParallelHashFirstTuple(hashtable,
    2146      614400 :                                                    hjstate->hj_CurBucketNo++);
    2147             :         else
    2148          66 :             break;              /* finished all buckets */
    2149             : 
    2150      974406 :         while (hashTuple != NULL)
    2151             :         {
    2152      360006 :             if (!HeapTupleHeaderHasMatch(HJTUPLE_MINTUPLE(hashTuple)))
    2153             :             {
    2154             :                 TupleTableSlot *inntuple;
    2155             : 
    2156             :                 /* insert hashtable's tuple into exec slot */
    2157      120006 :                 inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
    2158             :                                                  hjstate->hj_HashTupleSlot,
    2159             :                                                  false);    /* do not pfree */
    2160      120006 :                 econtext->ecxt_innertuple = inntuple;
    2161             : 
    2162             :                 /*
    2163             :                  * Reset temp memory each time; although this function doesn't
    2164             :                  * do any qual eval, the caller will, so let's keep it
    2165             :                  * parallel to ExecScanHashBucket.
    2166             :                  */
    2167      120006 :                 ResetExprContext(econtext);
    2168             : 
    2169      120006 :                 hjstate->hj_CurTuple = hashTuple;
    2170      120006 :                 return true;
    2171             :             }
    2172             : 
    2173      240000 :             hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
    2174             :         }
    2175             : 
    2176             :         /* allow this loop to be cancellable */
    2177      614400 :         CHECK_FOR_INTERRUPTS();
    2178             :     }
    2179             : 
    2180             :     /*
    2181             :      * no more unmatched tuples
    2182             :      */
    2183          66 :     return false;
    2184             : }
    2185             : 
    2186             : /*
    2187             :  * ExecHashTableReset
    2188             :  *
    2189             :  *      reset hash table header for new batch
    2190             :  */
    2191             : void
    2192        1164 : ExecHashTableReset(HashJoinTable hashtable)
    2193             : {
    2194             :     MemoryContext oldcxt;
    2195        1164 :     int         nbuckets = hashtable->nbuckets;
    2196             : 
    2197             :     /*
    2198             :      * Release all the hash buckets and tuples acquired in the prior pass, and
    2199             :      * reinitialize the context for a new pass.
    2200             :      */
    2201        1164 :     MemoryContextReset(hashtable->batchCxt);
    2202        1164 :     oldcxt = MemoryContextSwitchTo(hashtable->batchCxt);
    2203             : 
    2204             :     /* Reallocate and reinitialize the hash bucket headers. */
    2205        1164 :     hashtable->buckets.unshared = palloc0_array(HashJoinTuple, nbuckets);
    2206             : 
    2207        1164 :     hashtable->spaceUsed = 0;
    2208             : 
    2209        1164 :     MemoryContextSwitchTo(oldcxt);
    2210             : 
    2211             :     /* Forget the chunks (the memory was freed by the context reset above). */
    2212        1164 :     hashtable->chunks = NULL;
    2213        1164 : }
    2214             : 
    2215             : /*
    2216             :  * ExecHashTableResetMatchFlags
    2217             :  *      Clear all the HeapTupleHeaderHasMatch flags in the table
    2218             :  */
    2219             : void
    2220          20 : ExecHashTableResetMatchFlags(HashJoinTable hashtable)
    2221             : {
    2222             :     HashJoinTuple tuple;
    2223             :     int         i;
    2224             : 
    2225             :     /* Reset all flags in the main table ... */
    2226       20500 :     for (i = 0; i < hashtable->nbuckets; i++)
    2227             :     {
    2228       20650 :         for (tuple = hashtable->buckets.unshared[i]; tuple != NULL;
    2229         170 :              tuple = tuple->next.unshared)
    2230         170 :             HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(tuple));
    2231             :     }
    2232             : 
    2233             :     /* ... and the same for the skew buckets, if any */
    2234          20 :     for (i = 0; i < hashtable->nSkewBuckets; i++)
    2235             :     {
    2236           0 :         int         j = hashtable->skewBucketNums[i];
    2237           0 :         HashSkewBucket *skewBucket = hashtable->skewBucket[j];
    2238             : 
    2239           0 :         for (tuple = skewBucket->tuples; tuple != NULL; tuple = tuple->next.unshared)
    2240           0 :             HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(tuple));
    2241             :     }
    2242          20 : }
    2243             : 
    2244             : 
    2245             : void
    2246        1314 : ExecReScanHash(HashState *node)
    2247             : {
    2248        1314 :     PlanState  *outerPlan = outerPlanState(node);
    2249             : 
    2250             :     /*
    2251             :      * if chgParam of subnode is not null then plan will be re-scanned by
    2252             :      * first ExecProcNode.
    2253             :      */
    2254        1314 :     if (outerPlan->chgParam == NULL)
    2255          30 :         ExecReScan(outerPlan);
    2256        1314 : }
    2257             : 
    2258             : 
    2259             : /*
    2260             :  * ExecHashBuildSkewHash
    2261             :  *
    2262             :  *      Set up for skew optimization if we can identify the most common values
    2263             :  *      (MCVs) of the outer relation's join key.  We make a skew hash bucket
    2264             :  *      for the hash value of each MCV, up to the number of slots allowed
    2265             :  *      based on available memory.
    2266             :  */
    2267             : static void
    2268         106 : ExecHashBuildSkewHash(HashState *hashstate, HashJoinTable hashtable,
    2269             :                       Hash *node, int mcvsToUse)
    2270             : {
    2271             :     HeapTupleData *statsTuple;
    2272             :     AttStatsSlot sslot;
    2273             : 
    2274             :     /* Do nothing if planner didn't identify the outer relation's join key */
    2275         106 :     if (!OidIsValid(node->skewTable))
    2276          72 :         return;
    2277             :     /* Also, do nothing if we don't have room for at least one skew bucket */
    2278         106 :     if (mcvsToUse <= 0)
    2279           0 :         return;
    2280             : 
    2281             :     /*
    2282             :      * Try to find the MCV statistics for the outer relation's join key.
    2283             :      */
    2284         106 :     statsTuple = SearchSysCache3(STATRELATTINH,
    2285             :                                  ObjectIdGetDatum(node->skewTable),
    2286         106 :                                  Int16GetDatum(node->skewColumn),
    2287         106 :                                  BoolGetDatum(node->skewInherit));
    2288         106 :     if (!HeapTupleIsValid(statsTuple))
    2289          72 :         return;
    2290             : 
    2291          34 :     if (get_attstatsslot(&sslot, statsTuple,
    2292             :                          STATISTIC_KIND_MCV, InvalidOid,
    2293             :                          ATTSTATSSLOT_VALUES | ATTSTATSSLOT_NUMBERS))
    2294             :     {
    2295             :         double      frac;
    2296             :         int         nbuckets;
    2297             :         int         i;
    2298             : 
    2299           6 :         if (mcvsToUse > sslot.nvalues)
    2300           0 :             mcvsToUse = sslot.nvalues;
    2301             : 
    2302             :         /*
    2303             :          * Calculate the expected fraction of outer relation that will
    2304             :          * participate in the skew optimization.  If this isn't at least
    2305             :          * SKEW_MIN_OUTER_FRACTION, don't use skew optimization.
    2306             :          */
    2307           6 :         frac = 0;
    2308         132 :         for (i = 0; i < mcvsToUse; i++)
    2309         126 :             frac += sslot.numbers[i];
    2310           6 :         if (frac < SKEW_MIN_OUTER_FRACTION)
    2311             :         {
    2312           0 :             free_attstatsslot(&sslot);
    2313           0 :             ReleaseSysCache(statsTuple);
    2314           0 :             return;
    2315             :         }
    2316             : 
    2317             :         /*
    2318             :          * Okay, set up the skew hashtable.
    2319             :          *
    2320             :          * skewBucket[] is an open addressing hashtable with a power of 2 size
    2321             :          * that is greater than the number of MCV values.  (This ensures there
    2322             :          * will be at least one null entry, so searches will always
    2323             :          * terminate.)
    2324             :          *
    2325             :          * Note: this code could fail if mcvsToUse exceeds INT_MAX/8 or
    2326             :          * MaxAllocSize/sizeof(void *)/8, but that is not currently possible
    2327             :          * since we limit pg_statistic entries to much less than that.
    2328             :          */
    2329           6 :         nbuckets = pg_nextpower2_32(mcvsToUse + 1);
    2330             :         /* use two more bits just to help avoid collisions */
    2331           6 :         nbuckets <<= 2;
    2332             : 
    2333           6 :         hashtable->skewEnabled = true;
    2334           6 :         hashtable->skewBucketLen = nbuckets;
    2335             : 
    2336             :         /*
    2337             :          * We allocate the bucket memory in the hashtable's batch context. It
    2338             :          * is only needed during the first batch, and this ensures it will be
    2339             :          * automatically removed once the first batch is done.
    2340             :          */
    2341           6 :         hashtable->skewBucket = (HashSkewBucket **)
    2342           6 :             MemoryContextAllocZero(hashtable->batchCxt,
    2343             :                                    nbuckets * sizeof(HashSkewBucket *));
    2344           6 :         hashtable->skewBucketNums = (int *)
    2345           6 :             MemoryContextAllocZero(hashtable->batchCxt,
    2346             :                                    mcvsToUse * sizeof(int));
    2347             : 
    2348           6 :         hashtable->spaceUsed += nbuckets * sizeof(HashSkewBucket *)
    2349           6 :             + mcvsToUse * sizeof(int);
    2350           6 :         hashtable->spaceUsedSkew += nbuckets * sizeof(HashSkewBucket *)
    2351           6 :             + mcvsToUse * sizeof(int);
    2352           6 :         if (hashtable->spaceUsed > hashtable->spacePeak)
    2353           6 :             hashtable->spacePeak = hashtable->spaceUsed;
    2354             : 
    2355             :         /*
    2356             :          * Create a skew bucket for each MCV hash value.
    2357             :          *
    2358             :          * Note: it is very important that we create the buckets in order of
    2359             :          * decreasing MCV frequency.  If we have to remove some buckets, they
    2360             :          * must be removed in reverse order of creation (see notes in
    2361             :          * ExecHashRemoveNextSkewBucket) and we want the least common MCVs to
    2362             :          * be removed first.
    2363             :          */
    2364             : 
    2365         132 :         for (i = 0; i < mcvsToUse; i++)
    2366             :         {
    2367             :             uint32      hashvalue;
    2368             :             int         bucket;
    2369             : 
    2370         126 :             hashvalue = DatumGetUInt32(FunctionCall1Coll(hashstate->skew_hashfunction,
    2371             :                                                          hashstate->skew_collation,
    2372         126 :                                                          sslot.values[i]));
    2373             : 
    2374             :             /*
    2375             :              * While we have not hit a hole in the hashtable and have not hit
    2376             :              * the desired bucket, we have collided with some previous hash
    2377             :              * value, so try the next bucket location.  NB: this code must
    2378             :              * match ExecHashGetSkewBucket.
    2379             :              */
    2380         126 :             bucket = hashvalue & (nbuckets - 1);
    2381         126 :             while (hashtable->skewBucket[bucket] != NULL &&
    2382           0 :                    hashtable->skewBucket[bucket]->hashvalue != hashvalue)
    2383           0 :                 bucket = (bucket + 1) & (nbuckets - 1);
    2384             : 
    2385             :             /*
    2386             :              * If we found an existing bucket with the same hashvalue, leave
    2387             :              * it alone.  It's okay for two MCVs to share a hashvalue.
    2388             :              */
    2389         126 :             if (hashtable->skewBucket[bucket] != NULL)
    2390           0 :                 continue;
    2391             : 
    2392             :             /* Okay, create a new skew bucket for this hashvalue. */
    2393         252 :             hashtable->skewBucket[bucket] = (HashSkewBucket *)
    2394         126 :                 MemoryContextAlloc(hashtable->batchCxt,
    2395             :                                    sizeof(HashSkewBucket));
    2396         126 :             hashtable->skewBucket[bucket]->hashvalue = hashvalue;
    2397         126 :             hashtable->skewBucket[bucket]->tuples = NULL;
    2398         126 :             hashtable->skewBucketNums[hashtable->nSkewBuckets] = bucket;
    2399         126 :             hashtable->nSkewBuckets++;
    2400         126 :             hashtable->spaceUsed += SKEW_BUCKET_OVERHEAD;
    2401         126 :             hashtable->spaceUsedSkew += SKEW_BUCKET_OVERHEAD;
    2402         126 :             if (hashtable->spaceUsed > hashtable->spacePeak)
    2403         126 :                 hashtable->spacePeak = hashtable->spaceUsed;
    2404             :         }
    2405             : 
    2406           6 :         free_attstatsslot(&sslot);
    2407             :     }
    2408             : 
    2409          34 :     ReleaseSysCache(statsTuple);
    2410             : }
    2411             : 
    2412             : /*
    2413             :  * ExecHashGetSkewBucket
    2414             :  *
    2415             :  *      Returns the index of the skew bucket for this hashvalue,
    2416             :  *      or INVALID_SKEW_BUCKET_NO if the hashvalue is not
    2417             :  *      associated with any active skew bucket.
    2418             :  */
    2419             : int
    2420    24519426 : ExecHashGetSkewBucket(HashJoinTable hashtable, uint32 hashvalue)
    2421             : {
    2422             :     int         bucket;
    2423             : 
    2424             :     /*
    2425             :      * Always return INVALID_SKEW_BUCKET_NO if not doing skew optimization (in
    2426             :      * particular, this happens after the initial batch is done).
    2427             :      */
    2428    24519426 :     if (!hashtable->skewEnabled)
    2429    24399426 :         return INVALID_SKEW_BUCKET_NO;
    2430             : 
    2431             :     /*
    2432             :      * Since skewBucketLen is a power of 2, we can do a modulo by ANDing.
    2433             :      */
    2434      120000 :     bucket = hashvalue & (hashtable->skewBucketLen - 1);
    2435             : 
    2436             :     /*
    2437             :      * While we have not hit a hole in the hashtable and have not hit the
    2438             :      * desired bucket, we have collided with some other hash value, so try the
    2439             :      * next bucket location.
    2440             :      */
    2441      127830 :     while (hashtable->skewBucket[bucket] != NULL &&
    2442       10818 :            hashtable->skewBucket[bucket]->hashvalue != hashvalue)
    2443        7830 :         bucket = (bucket + 1) & (hashtable->skewBucketLen - 1);
    2444             : 
    2445             :     /*
    2446             :      * Found the desired bucket?
    2447             :      */
    2448      120000 :     if (hashtable->skewBucket[bucket] != NULL)
    2449        2988 :         return bucket;
    2450             : 
    2451             :     /*
    2452             :      * There must not be any hashtable entry for this hash value.
    2453             :      */
    2454      117012 :     return INVALID_SKEW_BUCKET_NO;
    2455             : }
    2456             : 
    2457             : /*
    2458             :  * ExecHashSkewTableInsert
    2459             :  *
    2460             :  *      Insert a tuple into the skew hashtable.
    2461             :  *
    2462             :  * This should generally match up with the current-batch case in
    2463             :  * ExecHashTableInsert.
    2464             :  */
    2465             : static void
    2466         588 : ExecHashSkewTableInsert(HashJoinTable hashtable,
    2467             :                         TupleTableSlot *slot,
    2468             :                         uint32 hashvalue,
    2469             :                         int bucketNumber)
    2470             : {
    2471             :     bool        shouldFree;
    2472         588 :     MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
    2473             :     HashJoinTuple hashTuple;
    2474             :     int         hashTupleSize;
    2475             : 
    2476             :     /* Create the HashJoinTuple */
    2477         588 :     hashTupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
    2478         588 :     hashTuple = (HashJoinTuple) MemoryContextAlloc(hashtable->batchCxt,
    2479             :                                                    hashTupleSize);
    2480         588 :     hashTuple->hashvalue = hashvalue;
    2481         588 :     memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
    2482         588 :     HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
    2483             : 
    2484             :     /* Push it onto the front of the skew bucket's list */
    2485         588 :     hashTuple->next.unshared = hashtable->skewBucket[bucketNumber]->tuples;
    2486         588 :     hashtable->skewBucket[bucketNumber]->tuples = hashTuple;
    2487             :     Assert(hashTuple != hashTuple->next.unshared);
    2488             : 
    2489             :     /* Account for space used, and back off if we've used too much */
    2490         588 :     hashtable->spaceUsed += hashTupleSize;
    2491         588 :     hashtable->spaceUsedSkew += hashTupleSize;
    2492         588 :     if (hashtable->spaceUsed > hashtable->spacePeak)
    2493         432 :         hashtable->spacePeak = hashtable->spaceUsed;
    2494         690 :     while (hashtable->spaceUsedSkew > hashtable->spaceAllowedSkew)
    2495         102 :         ExecHashRemoveNextSkewBucket(hashtable);
    2496             : 
    2497             :     /* Check we are not over the total spaceAllowed, either */
    2498         588 :     if (hashtable->spaceUsed > hashtable->spaceAllowed)
    2499           0 :         ExecHashIncreaseNumBatches(hashtable);
    2500             : 
    2501         588 :     if (shouldFree)
    2502         588 :         heap_free_minimal_tuple(tuple);
    2503         588 : }
    2504             : 
    2505             : /*
    2506             :  *      ExecHashRemoveNextSkewBucket
    2507             :  *
    2508             :  *      Remove the least valuable skew bucket by pushing its tuples into
    2509             :  *      the main hash table.
    2510             :  */
    2511             : static void
    2512         102 : ExecHashRemoveNextSkewBucket(HashJoinTable hashtable)
    2513             : {
    2514             :     int         bucketToRemove;
    2515             :     HashSkewBucket *bucket;
    2516             :     uint32      hashvalue;
    2517             :     int         bucketno;
    2518             :     int         batchno;
    2519             :     HashJoinTuple hashTuple;
    2520             : 
    2521             :     /* Locate the bucket to remove */
    2522         102 :     bucketToRemove = hashtable->skewBucketNums[hashtable->nSkewBuckets - 1];
    2523         102 :     bucket = hashtable->skewBucket[bucketToRemove];
    2524             : 
    2525             :     /*
    2526             :      * Calculate which bucket and batch the tuples belong to in the main
    2527             :      * hashtable.  They all have the same hash value, so it's the same for all
    2528             :      * of them.  Also note that it's not possible for nbatch to increase while
    2529             :      * we are processing the tuples.
    2530             :      */
    2531         102 :     hashvalue = bucket->hashvalue;
    2532         102 :     ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
    2533             : 
    2534             :     /* Process all tuples in the bucket */
    2535         102 :     hashTuple = bucket->tuples;
    2536         450 :     while (hashTuple != NULL)
    2537             :     {
    2538         348 :         HashJoinTuple nextHashTuple = hashTuple->next.unshared;
    2539             :         MinimalTuple tuple;
    2540             :         Size        tupleSize;
    2541             : 
    2542             :         /*
    2543             :          * This code must agree with ExecHashTableInsert.  We do not use
    2544             :          * ExecHashTableInsert directly as ExecHashTableInsert expects a
    2545             :          * TupleTableSlot while we already have HashJoinTuples.
    2546             :          */
    2547         348 :         tuple = HJTUPLE_MINTUPLE(hashTuple);
    2548         348 :         tupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
    2549             : 
    2550             :         /* Decide whether to put the tuple in the hash table or a temp file */
    2551         348 :         if (batchno == hashtable->curbatch)
    2552             :         {
    2553             :             /* Move the tuple to the main hash table */
    2554             :             HashJoinTuple copyTuple;
    2555             : 
    2556             :             /*
    2557             :              * We must copy the tuple into the dense storage, else it will not
    2558             :              * be found by, eg, ExecHashIncreaseNumBatches.
    2559             :              */
    2560         138 :             copyTuple = (HashJoinTuple) dense_alloc(hashtable, tupleSize);
    2561         138 :             memcpy(copyTuple, hashTuple, tupleSize);
    2562         138 :             pfree(hashTuple);
    2563             : 
    2564         138 :             copyTuple->next.unshared = hashtable->buckets.unshared[bucketno];
    2565         138 :             hashtable->buckets.unshared[bucketno] = copyTuple;
    2566             : 
    2567             :             /* We have reduced skew space, but overall space doesn't change */
    2568         138 :             hashtable->spaceUsedSkew -= tupleSize;
    2569             :         }
    2570             :         else
    2571             :         {
    2572             :             /* Put the tuple into a temp file for later batches */
    2573             :             Assert(batchno > hashtable->curbatch);
    2574         210 :             ExecHashJoinSaveTuple(tuple, hashvalue,
    2575         210 :                                   &hashtable->innerBatchFile[batchno],
    2576             :                                   hashtable);
    2577         210 :             pfree(hashTuple);
    2578         210 :             hashtable->spaceUsed -= tupleSize;
    2579         210 :             hashtable->spaceUsedSkew -= tupleSize;
    2580             :         }
    2581             : 
    2582         348 :         hashTuple = nextHashTuple;
    2583             : 
    2584             :         /* allow this loop to be cancellable */
    2585         348 :         CHECK_FOR_INTERRUPTS();
    2586             :     }
    2587             : 
    2588             :     /*
    2589             :      * Free the bucket struct itself and reset the hashtable entry to NULL.
    2590             :      *
    2591             :      * NOTE: this is not nearly as simple as it looks on the surface, because
    2592             :      * of the possibility of collisions in the hashtable.  Suppose that hash
    2593             :      * values A and B collide at a particular hashtable entry, and that A was
    2594             :      * entered first so B gets shifted to a different table entry.  If we were
    2595             :      * to remove A first then ExecHashGetSkewBucket would mistakenly start
    2596             :      * reporting that B is not in the hashtable, because it would hit the NULL
    2597             :      * before finding B.  However, we always remove entries in the reverse
    2598             :      * order of creation, so this failure cannot happen.
    2599             :      */
    2600         102 :     hashtable->skewBucket[bucketToRemove] = NULL;
    2601         102 :     hashtable->nSkewBuckets--;
    2602         102 :     pfree(bucket);
    2603         102 :     hashtable->spaceUsed -= SKEW_BUCKET_OVERHEAD;
    2604         102 :     hashtable->spaceUsedSkew -= SKEW_BUCKET_OVERHEAD;
    2605             : 
    2606             :     /*
    2607             :      * If we have removed all skew buckets then give up on skew optimization.
    2608             :      * Release the arrays since they aren't useful any more.
    2609             :      */
    2610         102 :     if (hashtable->nSkewBuckets == 0)
    2611             :     {
    2612           0 :         hashtable->skewEnabled = false;
    2613           0 :         pfree(hashtable->skewBucket);
    2614           0 :         pfree(hashtable->skewBucketNums);
    2615           0 :         hashtable->skewBucket = NULL;
    2616           0 :         hashtable->skewBucketNums = NULL;
    2617           0 :         hashtable->spaceUsed -= hashtable->spaceUsedSkew;
    2618           0 :         hashtable->spaceUsedSkew = 0;
    2619             :     }
    2620         102 : }
    2621             : 
    2622             : /*
    2623             :  * Reserve space in the DSM segment for instrumentation data.
    2624             :  */
    2625             : void
    2626         192 : ExecHashEstimate(HashState *node, ParallelContext *pcxt)
    2627             : {
    2628             :     size_t      size;
    2629             : 
    2630             :     /* don't need this if not instrumenting or no workers */
    2631         192 :     if (!node->ps.instrument || pcxt->nworkers == 0)
    2632         108 :         return;
    2633             : 
    2634          84 :     size = mul_size(pcxt->nworkers, sizeof(HashInstrumentation));
    2635          84 :     size = add_size(size, offsetof(SharedHashInfo, hinstrument));
    2636          84 :     shm_toc_estimate_chunk(&pcxt->estimator, size);
    2637          84 :     shm_toc_estimate_keys(&pcxt->estimator, 1);
    2638             : }
    2639             : 
    2640             : /*
    2641             :  * Set up a space in the DSM for all workers to record instrumentation data
    2642             :  * about their hash table.
    2643             :  */
    2644             : void
    2645         192 : ExecHashInitializeDSM(HashState *node, ParallelContext *pcxt)
    2646             : {
    2647             :     size_t      size;
    2648             : 
    2649             :     /* don't need this if not instrumenting or no workers */
    2650         192 :     if (!node->ps.instrument || pcxt->nworkers == 0)
    2651         108 :         return;
    2652             : 
    2653          84 :     size = offsetof(SharedHashInfo, hinstrument) +
    2654          84 :         pcxt->nworkers * sizeof(HashInstrumentation);
    2655          84 :     node->shared_info = (SharedHashInfo *) shm_toc_allocate(pcxt->toc, size);
    2656             : 
    2657             :     /* Each per-worker area must start out as zeroes. */
    2658          84 :     memset(node->shared_info, 0, size);
    2659             : 
    2660          84 :     node->shared_info->num_workers = pcxt->nworkers;
    2661          84 :     shm_toc_insert(pcxt->toc, node->ps.plan->plan_node_id,
    2662          84 :                    node->shared_info);
    2663             : }
    2664             : 
    2665             : /*
    2666             :  * Locate the DSM space for hash table instrumentation data that we'll write
    2667             :  * to at shutdown time.
    2668             :  */
    2669             : void
    2670         546 : ExecHashInitializeWorker(HashState *node, ParallelWorkerContext *pwcxt)
    2671             : {
    2672             :     SharedHashInfo *shared_info;
    2673             : 
    2674             :     /* don't need this if not instrumenting */
    2675         546 :     if (!node->ps.instrument)
    2676         294 :         return;
    2677             : 
    2678             :     /*
    2679             :      * Find our entry in the shared area, and set up a pointer to it so that
    2680             :      * we'll accumulate stats there when shutting down or rebuilding the hash
    2681             :      * table.
    2682             :      */
    2683             :     shared_info = (SharedHashInfo *)
    2684         252 :         shm_toc_lookup(pwcxt->toc, node->ps.plan->plan_node_id, false);
    2685         252 :     node->hinstrument = &shared_info->hinstrument[ParallelWorkerNumber];
    2686             : }
    2687             : 
    2688             : /*
    2689             :  * Collect EXPLAIN stats if needed, saving them into DSM memory if
    2690             :  * ExecHashInitializeWorker was called, or local storage if not.  In the
    2691             :  * parallel case, this must be done in ExecShutdownHash() rather than
    2692             :  * ExecEndHash() because the latter runs after we've detached from the DSM
    2693             :  * segment.
    2694             :  */
    2695             : void
    2696       25330 : ExecShutdownHash(HashState *node)
    2697             : {
    2698             :     /* Allocate save space if EXPLAIN'ing and we didn't do so already */
    2699       25330 :     if (node->ps.instrument && !node->hinstrument)
    2700         108 :         node->hinstrument = palloc0_object(HashInstrumentation);
    2701             :     /* Now accumulate data for the current (final) hash table */
    2702       25330 :     if (node->hinstrument && node->hashtable)
    2703         296 :         ExecHashAccumInstrumentation(node->hinstrument, node->hashtable);
    2704       25330 : }
    2705             : 
    2706             : /*
    2707             :  * Retrieve instrumentation data from workers before the DSM segment is
    2708             :  * detached, so that EXPLAIN can access it.
    2709             :  */
    2710             : void
    2711          84 : ExecHashRetrieveInstrumentation(HashState *node)
    2712             : {
    2713          84 :     SharedHashInfo *shared_info = node->shared_info;
    2714             :     size_t      size;
    2715             : 
    2716          84 :     if (shared_info == NULL)
    2717           0 :         return;
    2718             : 
    2719             :     /* Replace node->shared_info with a copy in backend-local memory. */
    2720          84 :     size = offsetof(SharedHashInfo, hinstrument) +
    2721          84 :         shared_info->num_workers * sizeof(HashInstrumentation);
    2722          84 :     node->shared_info = palloc(size);
    2723          84 :     memcpy(node->shared_info, shared_info, size);
    2724             : }
    2725             : 
    2726             : /*
    2727             :  * Accumulate instrumentation data from 'hashtable' into an
    2728             :  * initially-zeroed HashInstrumentation struct.
    2729             :  *
    2730             :  * This is used to merge information across successive hash table instances
    2731             :  * within a single plan node.  We take the maximum values of each interesting
    2732             :  * number.  The largest nbuckets and largest nbatch values might have occurred
    2733             :  * in different instances, so there's some risk of confusion from reporting
    2734             :  * unrelated numbers; but there's a bigger risk of misdiagnosing a performance
    2735             :  * issue if we don't report the largest values.  Similarly, we want to report
    2736             :  * the largest spacePeak regardless of whether it happened in the same
    2737             :  * instance as the largest nbuckets or nbatch.  All the instances should have
    2738             :  * the same nbuckets_original and nbatch_original; but there's little value
    2739             :  * in depending on that here, so handle them the same way.
    2740             :  */
    2741             : void
    2742         296 : ExecHashAccumInstrumentation(HashInstrumentation *instrument,
    2743             :                              HashJoinTable hashtable)
    2744             : {
    2745         296 :     instrument->nbuckets = Max(instrument->nbuckets,
    2746             :                                hashtable->nbuckets);
    2747         296 :     instrument->nbuckets_original = Max(instrument->nbuckets_original,
    2748             :                                         hashtable->nbuckets_original);
    2749         296 :     instrument->nbatch = Max(instrument->nbatch,
    2750             :                              hashtable->nbatch);
    2751         296 :     instrument->nbatch_original = Max(instrument->nbatch_original,
    2752             :                                       hashtable->nbatch_original);
    2753         296 :     instrument->space_peak = Max(instrument->space_peak,
    2754             :                                  hashtable->spacePeak);
    2755         296 : }
    2756             : 
    2757             : /*
    2758             :  * Allocate 'size' bytes from the currently active HashMemoryChunk
    2759             :  */
    2760             : static void *
    2761     7864520 : dense_alloc(HashJoinTable hashtable, Size size)
    2762             : {
    2763             :     HashMemoryChunk newChunk;
    2764             :     char       *ptr;
    2765             : 
    2766             :     /* just in case the size is not already aligned properly */
    2767     7864520 :     size = MAXALIGN(size);
    2768             : 
    2769             :     /*
    2770             :      * If tuple size is larger than threshold, allocate a separate chunk.
    2771             :      */
    2772     7864520 :     if (size > HASH_CHUNK_THRESHOLD)
    2773             :     {
    2774             :         /* allocate new chunk and put it at the beginning of the list */
    2775           0 :         newChunk = (HashMemoryChunk) MemoryContextAlloc(hashtable->batchCxt,
    2776             :                                                         HASH_CHUNK_HEADER_SIZE + size);
    2777           0 :         newChunk->maxlen = size;
    2778           0 :         newChunk->used = size;
    2779           0 :         newChunk->ntuples = 1;
    2780             : 
    2781             :         /*
    2782             :          * Add this chunk to the list after the first existing chunk, so that
    2783             :          * we don't lose the remaining space in the "current" chunk.
    2784             :          */
    2785           0 :         if (hashtable->chunks != NULL)
    2786             :         {
    2787           0 :             newChunk->next = hashtable->chunks->next;
    2788           0 :             hashtable->chunks->next.unshared = newChunk;
    2789             :         }
    2790             :         else
    2791             :         {
    2792           0 :             newChunk->next.unshared = hashtable->chunks;
    2793           0 :             hashtable->chunks = newChunk;
    2794             :         }
    2795             : 
    2796           0 :         return HASH_CHUNK_DATA(newChunk);
    2797             :     }
    2798             : 
    2799             :     /*
    2800             :      * See if we have enough space for it in the current chunk (if any). If
    2801             :      * not, allocate a fresh chunk.
    2802             :      */
    2803     7864520 :     if ((hashtable->chunks == NULL) ||
    2804     7846466 :         (hashtable->chunks->maxlen - hashtable->chunks->used) < size)
    2805             :     {
    2806             :         /* allocate new chunk and put it at the beginning of the list */
    2807       28038 :         newChunk = (HashMemoryChunk) MemoryContextAlloc(hashtable->batchCxt,
    2808             :                                                         HASH_CHUNK_HEADER_SIZE + HASH_CHUNK_SIZE);
    2809             : 
    2810       28038 :         newChunk->maxlen = HASH_CHUNK_SIZE;
    2811       28038 :         newChunk->used = size;
    2812       28038 :         newChunk->ntuples = 1;
    2813             : 
    2814       28038 :         newChunk->next.unshared = hashtable->chunks;
    2815       28038 :         hashtable->chunks = newChunk;
    2816             : 
    2817       28038 :         return HASH_CHUNK_DATA(newChunk);
    2818             :     }
    2819             : 
    2820             :     /* There is enough space in the current chunk, let's add the tuple */
    2821     7836482 :     ptr = HASH_CHUNK_DATA(hashtable->chunks) + hashtable->chunks->used;
    2822     7836482 :     hashtable->chunks->used += size;
    2823     7836482 :     hashtable->chunks->ntuples += 1;
    2824             : 
    2825             :     /* return pointer to the start of the tuple memory */
    2826     7836482 :     return ptr;
    2827             : }
    2828             : 
    2829             : /*
    2830             :  * Allocate space for a tuple in shared dense storage.  This is equivalent to
    2831             :  * dense_alloc but for Parallel Hash using shared memory.
    2832             :  *
    2833             :  * While loading a tuple into shared memory, we might run out of memory and
    2834             :  * decide to repartition, or determine that the load factor is too high and
    2835             :  * decide to expand the bucket array, or discover that another participant has
    2836             :  * commanded us to help do that.  Return NULL if number of buckets or batches
    2837             :  * has changed, indicating that the caller must retry (considering the
    2838             :  * possibility that the tuple no longer belongs in the same batch).
    2839             :  */
    2840             : static HashJoinTuple
    2841     2382450 : ExecParallelHashTupleAlloc(HashJoinTable hashtable, size_t size,
    2842             :                            dsa_pointer *shared)
    2843             : {
    2844     2382450 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    2845             :     dsa_pointer chunk_shared;
    2846             :     HashMemoryChunk chunk;
    2847             :     Size        chunk_size;
    2848             :     HashJoinTuple result;
    2849     2382450 :     int         curbatch = hashtable->curbatch;
    2850             : 
    2851     2382450 :     size = MAXALIGN(size);
    2852             : 
    2853             :     /*
    2854             :      * Fast path: if there is enough space in this backend's current chunk,
    2855             :      * then we can allocate without any locking.
    2856             :      */
    2857     2382450 :     chunk = hashtable->current_chunk;
    2858     2382450 :     if (chunk != NULL &&
    2859     2381458 :         size <= HASH_CHUNK_THRESHOLD &&
    2860     2381458 :         chunk->maxlen - chunk->used >= size)
    2861             :     {
    2862             : 
    2863     2378696 :         chunk_shared = hashtable->current_chunk_shared;
    2864             :         Assert(chunk == dsa_get_address(hashtable->area, chunk_shared));
    2865     2378696 :         *shared = chunk_shared + HASH_CHUNK_HEADER_SIZE + chunk->used;
    2866     2378696 :         result = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + chunk->used);
    2867     2378696 :         chunk->used += size;
    2868             : 
    2869             :         Assert(chunk->used <= chunk->maxlen);
    2870             :         Assert(result == dsa_get_address(hashtable->area, *shared));
    2871             : 
    2872     2378696 :         return result;
    2873             :     }
    2874             : 
    2875             :     /* Slow path: try to allocate a new chunk. */
    2876        3754 :     LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
    2877             : 
    2878             :     /*
    2879             :      * Check if we need to help increase the number of buckets or batches.
    2880             :      */
    2881        3754 :     if (pstate->growth == PHJ_GROWTH_NEED_MORE_BATCHES ||
    2882        3714 :         pstate->growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
    2883             :     {
    2884         180 :         ParallelHashGrowth growth = pstate->growth;
    2885             : 
    2886         180 :         hashtable->current_chunk = NULL;
    2887         180 :         LWLockRelease(&pstate->lock);
    2888             : 
    2889             :         /* Another participant has commanded us to help grow. */
    2890         180 :         if (growth == PHJ_GROWTH_NEED_MORE_BATCHES)
    2891          40 :             ExecParallelHashIncreaseNumBatches(hashtable);
    2892         140 :         else if (growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
    2893         140 :             ExecParallelHashIncreaseNumBuckets(hashtable);
    2894             : 
    2895             :         /* The caller must retry. */
    2896         180 :         return NULL;
    2897             :     }
    2898             : 
    2899             :     /* Oversized tuples get their own chunk. */
    2900        3574 :     if (size > HASH_CHUNK_THRESHOLD)
    2901          48 :         chunk_size = size + HASH_CHUNK_HEADER_SIZE;
    2902             :     else
    2903        3526 :         chunk_size = HASH_CHUNK_SIZE;
    2904             : 
    2905             :     /* Check if it's time to grow batches or buckets. */
    2906        3574 :     if (pstate->growth != PHJ_GROWTH_DISABLED)
    2907             :     {
    2908             :         Assert(curbatch == 0);
    2909             :         Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASH_INNER);
    2910             : 
    2911             :         /*
    2912             :          * Check if our space limit would be exceeded.  To avoid choking on
    2913             :          * very large tuples or very low hash_mem setting, we'll always allow
    2914             :          * each backend to allocate at least one chunk.
    2915             :          */
    2916        1820 :         if (hashtable->batches[0].at_least_one_chunk &&
    2917        1440 :             hashtable->batches[0].shared->size +
    2918        1440 :             chunk_size > pstate->space_allowed)
    2919             :         {
    2920          38 :             pstate->growth = PHJ_GROWTH_NEED_MORE_BATCHES;
    2921          38 :             hashtable->batches[0].shared->space_exhausted = true;
    2922          38 :             LWLockRelease(&pstate->lock);
    2923             : 
    2924          38 :             return NULL;
    2925             :         }
    2926             : 
    2927             :         /* Check if our load factor limit would be exceeded. */
    2928        1782 :         if (hashtable->nbatch == 1)
    2929             :         {
    2930        1556 :             hashtable->batches[0].shared->ntuples += hashtable->batches[0].ntuples;
    2931        1556 :             hashtable->batches[0].ntuples = 0;
    2932             :             /* Guard against integer overflow and alloc size overflow */
    2933        1556 :             if (hashtable->batches[0].shared->ntuples + 1 >
    2934        1556 :                 hashtable->nbuckets * NTUP_PER_BUCKET &&
    2935         108 :                 hashtable->nbuckets < (INT_MAX / 2) &&
    2936         108 :                 hashtable->nbuckets * 2 <=
    2937             :                 MaxAllocSize / sizeof(dsa_pointer_atomic))
    2938             :             {
    2939         108 :                 pstate->growth = PHJ_GROWTH_NEED_MORE_BUCKETS;
    2940         108 :                 LWLockRelease(&pstate->lock);
    2941             : 
    2942         108 :                 return NULL;
    2943             :             }
    2944             :         }
    2945             :     }
    2946             : 
    2947             :     /* We are cleared to allocate a new chunk. */
    2948        3428 :     chunk_shared = dsa_allocate(hashtable->area, chunk_size);
    2949        3428 :     hashtable->batches[curbatch].shared->size += chunk_size;
    2950        3428 :     hashtable->batches[curbatch].at_least_one_chunk = true;
    2951             : 
    2952             :     /* Set up the chunk. */
    2953        3428 :     chunk = (HashMemoryChunk) dsa_get_address(hashtable->area, chunk_shared);
    2954        3428 :     *shared = chunk_shared + HASH_CHUNK_HEADER_SIZE;
    2955        3428 :     chunk->maxlen = chunk_size - HASH_CHUNK_HEADER_SIZE;
    2956        3428 :     chunk->used = size;
    2957             : 
    2958             :     /*
    2959             :      * Push it onto the list of chunks, so that it can be found if we need to
    2960             :      * increase the number of buckets or batches (batch 0 only) and later for
    2961             :      * freeing the memory (all batches).
    2962             :      */
    2963        3428 :     chunk->next.shared = hashtable->batches[curbatch].shared->chunks;
    2964        3428 :     hashtable->batches[curbatch].shared->chunks = chunk_shared;
    2965             : 
    2966        3428 :     if (size <= HASH_CHUNK_THRESHOLD)
    2967             :     {
    2968             :         /*
    2969             :          * Make this the current chunk so that we can use the fast path to
    2970             :          * fill the rest of it up in future calls.
    2971             :          */
    2972        3392 :         hashtable->current_chunk = chunk;
    2973        3392 :         hashtable->current_chunk_shared = chunk_shared;
    2974             :     }
    2975        3428 :     LWLockRelease(&pstate->lock);
    2976             : 
    2977             :     Assert(HASH_CHUNK_DATA(chunk) == dsa_get_address(hashtable->area, *shared));
    2978        3428 :     result = (HashJoinTuple) HASH_CHUNK_DATA(chunk);
    2979             : 
    2980        3428 :     return result;
    2981             : }
    2982             : 
    2983             : /*
    2984             :  * One backend needs to set up the shared batch state including tuplestores.
    2985             :  * Other backends will ensure they have correctly configured accessors by
    2986             :  * called ExecParallelHashEnsureBatchAccessors().
    2987             :  */
    2988             : static void
    2989         218 : ExecParallelHashJoinSetUpBatches(HashJoinTable hashtable, int nbatch)
    2990             : {
    2991         218 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    2992             :     ParallelHashJoinBatch *batches;
    2993             :     MemoryContext oldcxt;
    2994             :     int         i;
    2995             : 
    2996             :     Assert(hashtable->batches == NULL);
    2997             : 
    2998             :     /* Allocate space. */
    2999         218 :     pstate->batches =
    3000         218 :         dsa_allocate0(hashtable->area,
    3001             :                       EstimateParallelHashJoinBatch(hashtable) * nbatch);
    3002         218 :     pstate->nbatch = nbatch;
    3003         218 :     batches = dsa_get_address(hashtable->area, pstate->batches);
    3004             : 
    3005             :     /*
    3006             :      * Use hash join spill memory context to allocate accessors, including
    3007             :      * buffers for the temporary files.
    3008             :      */
    3009         218 :     oldcxt = MemoryContextSwitchTo(hashtable->spillCxt);
    3010             : 
    3011             :     /* Allocate this backend's accessor array. */
    3012         218 :     hashtable->nbatch = nbatch;
    3013         218 :     hashtable->batches =
    3014         218 :         palloc0_array(ParallelHashJoinBatchAccessor, hashtable->nbatch);
    3015             : 
    3016             :     /* Set up the shared state, tuplestores and backend-local accessors. */
    3017        1084 :     for (i = 0; i < hashtable->nbatch; ++i)
    3018             :     {
    3019         866 :         ParallelHashJoinBatchAccessor *accessor = &hashtable->batches[i];
    3020         866 :         ParallelHashJoinBatch *shared = NthParallelHashJoinBatch(batches, i);
    3021             :         char        name[MAXPGPATH];
    3022             : 
    3023             :         /*
    3024             :          * All members of shared were zero-initialized.  We just need to set
    3025             :          * up the Barrier.
    3026             :          */
    3027         866 :         BarrierInit(&shared->batch_barrier, 0);
    3028         866 :         if (i == 0)
    3029             :         {
    3030             :             /* Batch 0 doesn't need to be loaded. */
    3031         218 :             BarrierAttach(&shared->batch_barrier);
    3032         872 :             while (BarrierPhase(&shared->batch_barrier) < PHJ_BATCH_PROBE)
    3033         654 :                 BarrierArriveAndWait(&shared->batch_barrier, 0);
    3034         218 :             BarrierDetach(&shared->batch_barrier);
    3035             :         }
    3036             : 
    3037             :         /* Initialize accessor state.  All members were zero-initialized. */
    3038         866 :         accessor->shared = shared;
    3039             : 
    3040             :         /* Initialize the shared tuplestores. */
    3041         866 :         snprintf(name, sizeof(name), "i%dof%d", i, hashtable->nbatch);
    3042         866 :         accessor->inner_tuples =
    3043         866 :             sts_initialize(ParallelHashJoinBatchInner(shared),
    3044             :                            pstate->nparticipants,
    3045             :                            ParallelWorkerNumber + 1,
    3046             :                            sizeof(uint32),
    3047             :                            SHARED_TUPLESTORE_SINGLE_PASS,
    3048             :                            &pstate->fileset,
    3049             :                            name);
    3050         866 :         snprintf(name, sizeof(name), "o%dof%d", i, hashtable->nbatch);
    3051         866 :         accessor->outer_tuples =
    3052         866 :             sts_initialize(ParallelHashJoinBatchOuter(shared,
    3053             :                                                       pstate->nparticipants),
    3054             :                            pstate->nparticipants,
    3055             :                            ParallelWorkerNumber + 1,
    3056             :                            sizeof(uint32),
    3057             :                            SHARED_TUPLESTORE_SINGLE_PASS,
    3058             :                            &pstate->fileset,
    3059             :                            name);
    3060             :     }
    3061             : 
    3062         218 :     MemoryContextSwitchTo(oldcxt);
    3063         218 : }
    3064             : 
    3065             : /*
    3066             :  * Free the current set of ParallelHashJoinBatchAccessor objects.
    3067             :  */
    3068             : static void
    3069          56 : ExecParallelHashCloseBatchAccessors(HashJoinTable hashtable)
    3070             : {
    3071             :     int         i;
    3072             : 
    3073         182 :     for (i = 0; i < hashtable->nbatch; ++i)
    3074             :     {
    3075             :         /* Make sure no files are left open. */
    3076         126 :         sts_end_write(hashtable->batches[i].inner_tuples);
    3077         126 :         sts_end_write(hashtable->batches[i].outer_tuples);
    3078         126 :         sts_end_parallel_scan(hashtable->batches[i].inner_tuples);
    3079         126 :         sts_end_parallel_scan(hashtable->batches[i].outer_tuples);
    3080             :     }
    3081          56 :     pfree(hashtable->batches);
    3082          56 :     hashtable->batches = NULL;
    3083          56 : }
    3084             : 
    3085             : /*
    3086             :  * Make sure this backend has up-to-date accessors for the current set of
    3087             :  * batches.
    3088             :  */
    3089             : static void
    3090         890 : ExecParallelHashEnsureBatchAccessors(HashJoinTable hashtable)
    3091             : {
    3092         890 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    3093             :     ParallelHashJoinBatch *batches;
    3094             :     MemoryContext oldcxt;
    3095             :     int         i;
    3096             : 
    3097         890 :     if (hashtable->batches != NULL)
    3098             :     {
    3099         656 :         if (hashtable->nbatch == pstate->nbatch)
    3100         656 :             return;
    3101           0 :         ExecParallelHashCloseBatchAccessors(hashtable);
    3102             :     }
    3103             : 
    3104             :     /*
    3105             :      * We should never see a state where the batch-tracking array is freed,
    3106             :      * because we should have given up sooner if we join when the build
    3107             :      * barrier has reached the PHJ_BUILD_FREE phase.
    3108             :      */
    3109             :     Assert(DsaPointerIsValid(pstate->batches));
    3110             : 
    3111             :     /*
    3112             :      * Use hash join spill memory context to allocate accessors, including
    3113             :      * buffers for the temporary files.
    3114             :      */
    3115         234 :     oldcxt = MemoryContextSwitchTo(hashtable->spillCxt);
    3116             : 
    3117             :     /* Allocate this backend's accessor array. */
    3118         234 :     hashtable->nbatch = pstate->nbatch;
    3119         234 :     hashtable->batches =
    3120         234 :         palloc0_array(ParallelHashJoinBatchAccessor, hashtable->nbatch);
    3121             : 
    3122             :     /* Find the base of the pseudo-array of ParallelHashJoinBatch objects. */
    3123             :     batches = (ParallelHashJoinBatch *)
    3124         234 :         dsa_get_address(hashtable->area, pstate->batches);
    3125             : 
    3126             :     /* Set up the accessor array and attach to the tuplestores. */
    3127        1296 :     for (i = 0; i < hashtable->nbatch; ++i)
    3128             :     {
    3129        1062 :         ParallelHashJoinBatchAccessor *accessor = &hashtable->batches[i];
    3130        1062 :         ParallelHashJoinBatch *shared = NthParallelHashJoinBatch(batches, i);
    3131             : 
    3132        1062 :         accessor->shared = shared;
    3133        1062 :         accessor->preallocated = 0;
    3134        1062 :         accessor->done = false;
    3135        1062 :         accessor->outer_eof = false;
    3136        1062 :         accessor->inner_tuples =
    3137        1062 :             sts_attach(ParallelHashJoinBatchInner(shared),
    3138             :                        ParallelWorkerNumber + 1,
    3139             :                        &pstate->fileset);
    3140        1062 :         accessor->outer_tuples =
    3141        1062 :             sts_attach(ParallelHashJoinBatchOuter(shared,
    3142             :                                                   pstate->nparticipants),
    3143             :                        ParallelWorkerNumber + 1,
    3144             :                        &pstate->fileset);
    3145             :     }
    3146             : 
    3147         234 :     MemoryContextSwitchTo(oldcxt);
    3148             : }
    3149             : 
    3150             : /*
    3151             :  * Allocate an empty shared memory hash table for a given batch.
    3152             :  */
    3153             : void
    3154         766 : ExecParallelHashTableAlloc(HashJoinTable hashtable, int batchno)
    3155             : {
    3156         766 :     ParallelHashJoinBatch *batch = hashtable->batches[batchno].shared;
    3157             :     dsa_pointer_atomic *buckets;
    3158         766 :     int         nbuckets = hashtable->parallel_state->nbuckets;
    3159             :     int         i;
    3160             : 
    3161         766 :     batch->buckets =
    3162         766 :         dsa_allocate(hashtable->area, sizeof(dsa_pointer_atomic) * nbuckets);
    3163             :     buckets = (dsa_pointer_atomic *)
    3164         766 :         dsa_get_address(hashtable->area, batch->buckets);
    3165     3148542 :     for (i = 0; i < nbuckets; ++i)
    3166     3147776 :         dsa_pointer_atomic_init(&buckets[i], InvalidDsaPointer);
    3167         766 : }
    3168             : 
    3169             : /*
    3170             :  * If we are currently attached to a shared hash join batch, detach.  If we
    3171             :  * are last to detach, clean up.
    3172             :  */
    3173             : void
    3174       18278 : ExecHashTableDetachBatch(HashJoinTable hashtable)
    3175             : {
    3176       18278 :     if (hashtable->parallel_state != NULL &&
    3177        1288 :         hashtable->curbatch >= 0)
    3178             :     {
    3179         892 :         int         curbatch = hashtable->curbatch;
    3180         892 :         ParallelHashJoinBatch *batch = hashtable->batches[curbatch].shared;
    3181         892 :         bool        attached = true;
    3182             : 
    3183             :         /* Make sure any temporary files are closed. */
    3184         892 :         sts_end_parallel_scan(hashtable->batches[curbatch].inner_tuples);
    3185         892 :         sts_end_parallel_scan(hashtable->batches[curbatch].outer_tuples);
    3186             : 
    3187             :         /* After attaching we always get at least to PHJ_BATCH_PROBE. */
    3188             :         Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE ||
    3189             :                BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_SCAN);
    3190             : 
    3191             :         /*
    3192             :          * If we're abandoning the PHJ_BATCH_PROBE phase early without having
    3193             :          * reached the end of it, it means the plan doesn't want any more
    3194             :          * tuples, and it is happy to abandon any tuples buffered in this
    3195             :          * process's subplans.  For correctness, we can't allow any process to
    3196             :          * execute the PHJ_BATCH_SCAN phase, because we will never have the
    3197             :          * complete set of match bits.  Therefore we skip emitting unmatched
    3198             :          * tuples in all backends (if this is a full/right join), as if those
    3199             :          * tuples were all due to be emitted by this process and it has
    3200             :          * abandoned them too.
    3201             :          */
    3202         892 :         if (BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE &&
    3203         826 :             !hashtable->batches[curbatch].outer_eof)
    3204             :         {
    3205             :             /*
    3206             :              * This flag may be written to by multiple backends during
    3207             :              * PHJ_BATCH_PROBE phase, but will only be read in PHJ_BATCH_SCAN
    3208             :              * phase so requires no extra locking.
    3209             :              */
    3210           0 :             batch->skip_unmatched = true;
    3211             :         }
    3212             : 
    3213             :         /*
    3214             :          * Even if we aren't doing a full/right outer join, we'll step through
    3215             :          * the PHJ_BATCH_SCAN phase just to maintain the invariant that
    3216             :          * freeing happens in PHJ_BATCH_FREE, but that'll be wait-free.
    3217             :          */
    3218         892 :         if (BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE)
    3219         826 :             attached = BarrierArriveAndDetachExceptLast(&batch->batch_barrier);
    3220         892 :         if (attached && BarrierArriveAndDetach(&batch->batch_barrier))
    3221             :         {
    3222             :             /*
    3223             :              * We are not longer attached to the batch barrier, but we're the
    3224             :              * process that was chosen to free resources and it's safe to
    3225             :              * assert the current phase.  The ParallelHashJoinBatch can't go
    3226             :              * away underneath us while we are attached to the build barrier,
    3227             :              * making this access safe.
    3228             :              */
    3229             :             Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_FREE);
    3230             : 
    3231             :             /* Free shared chunks and buckets. */
    3232        3904 :             while (DsaPointerIsValid(batch->chunks))
    3233             :             {
    3234             :                 HashMemoryChunk chunk =
    3235        3138 :                     dsa_get_address(hashtable->area, batch->chunks);
    3236        3138 :                 dsa_pointer next = chunk->next.shared;
    3237             : 
    3238        3138 :                 dsa_free(hashtable->area, batch->chunks);
    3239        3138 :                 batch->chunks = next;
    3240             :             }
    3241         766 :             if (DsaPointerIsValid(batch->buckets))
    3242             :             {
    3243         766 :                 dsa_free(hashtable->area, batch->buckets);
    3244         766 :                 batch->buckets = InvalidDsaPointer;
    3245             :             }
    3246             :         }
    3247             : 
    3248             :         /*
    3249             :          * Track the largest batch we've been attached to.  Though each
    3250             :          * backend might see a different subset of batches, explain.c will
    3251             :          * scan the results from all backends to find the largest value.
    3252             :          */
    3253         892 :         hashtable->spacePeak =
    3254         892 :             Max(hashtable->spacePeak,
    3255             :                 batch->size + sizeof(dsa_pointer_atomic) * hashtable->nbuckets);
    3256             : 
    3257             :         /* Remember that we are not attached to a batch. */
    3258         892 :         hashtable->curbatch = -1;
    3259             :     }
    3260       18278 : }
    3261             : 
    3262             : /*
    3263             :  * Detach from all shared resources.  If we are last to detach, clean up.
    3264             :  */
    3265             : void
    3266       17386 : ExecHashTableDetach(HashJoinTable hashtable)
    3267             : {
    3268       17386 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    3269             : 
    3270             :     /*
    3271             :      * If we're involved in a parallel query, we must either have gotten all
    3272             :      * the way to PHJ_BUILD_RUN, or joined too late and be in PHJ_BUILD_FREE.
    3273             :      */
    3274             :     Assert(!pstate ||
    3275             :            BarrierPhase(&pstate->build_barrier) >= PHJ_BUILD_RUN);
    3276             : 
    3277       17386 :     if (pstate && BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_RUN)
    3278             :     {
    3279             :         int         i;
    3280             : 
    3281             :         /* Make sure any temporary files are closed. */
    3282         396 :         if (hashtable->batches)
    3283             :         {
    3284        2198 :             for (i = 0; i < hashtable->nbatch; ++i)
    3285             :             {
    3286        1802 :                 sts_end_write(hashtable->batches[i].inner_tuples);
    3287        1802 :                 sts_end_write(hashtable->batches[i].outer_tuples);
    3288        1802 :                 sts_end_parallel_scan(hashtable->batches[i].inner_tuples);
    3289        1802 :                 sts_end_parallel_scan(hashtable->batches[i].outer_tuples);
    3290             :             }
    3291             :         }
    3292             : 
    3293             :         /* If we're last to detach, clean up shared memory. */
    3294         396 :         if (BarrierArriveAndDetach(&pstate->build_barrier))
    3295             :         {
    3296             :             /*
    3297             :              * Late joining processes will see this state and give up
    3298             :              * immediately.
    3299             :              */
    3300             :             Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_FREE);
    3301             : 
    3302         168 :             if (DsaPointerIsValid(pstate->batches))
    3303             :             {
    3304         168 :                 dsa_free(hashtable->area, pstate->batches);
    3305         168 :                 pstate->batches = InvalidDsaPointer;
    3306             :             }
    3307             :         }
    3308             :     }
    3309       17386 :     hashtable->parallel_state = NULL;
    3310       17386 : }
    3311             : 
    3312             : /*
    3313             :  * Get the first tuple in a given bucket identified by number.
    3314             :  */
    3315             : static inline HashJoinTuple
    3316     2774430 : ExecParallelHashFirstTuple(HashJoinTable hashtable, int bucketno)
    3317             : {
    3318             :     HashJoinTuple tuple;
    3319             :     dsa_pointer p;
    3320             : 
    3321             :     Assert(hashtable->parallel_state);
    3322     2774430 :     p = dsa_pointer_atomic_read(&hashtable->buckets.shared[bucketno]);
    3323     2774430 :     tuple = (HashJoinTuple) dsa_get_address(hashtable->area, p);
    3324             : 
    3325     2774430 :     return tuple;
    3326             : }
    3327             : 
    3328             : /*
    3329             :  * Get the next tuple in the same bucket as 'tuple'.
    3330             :  */
    3331             : static inline HashJoinTuple
    3332     3800818 : ExecParallelHashNextTuple(HashJoinTable hashtable, HashJoinTuple tuple)
    3333             : {
    3334             :     HashJoinTuple next;
    3335             : 
    3336             :     Assert(hashtable->parallel_state);
    3337     3800818 :     next = (HashJoinTuple) dsa_get_address(hashtable->area, tuple->next.shared);
    3338             : 
    3339     3800818 :     return next;
    3340             : }
    3341             : 
    3342             : /*
    3343             :  * Insert a tuple at the front of a chain of tuples in DSA memory atomically.
    3344             :  */
    3345             : static inline void
    3346     2938366 : ExecParallelHashPushTuple(dsa_pointer_atomic *head,
    3347             :                           HashJoinTuple tuple,
    3348             :                           dsa_pointer tuple_shared)
    3349             : {
    3350             :     for (;;)
    3351             :     {
    3352     2938366 :         tuple->next.shared = dsa_pointer_atomic_read(head);
    3353     2938366 :         if (dsa_pointer_atomic_compare_exchange(head,
    3354     2938366 :                                                 &tuple->next.shared,
    3355             :                                                 tuple_shared))
    3356     2926364 :             break;
    3357             :     }
    3358     2926364 : }
    3359             : 
    3360             : /*
    3361             :  * Prepare to work on a given batch.
    3362             :  */
    3363             : void
    3364        2008 : ExecParallelHashTableSetCurrentBatch(HashJoinTable hashtable, int batchno)
    3365             : {
    3366             :     Assert(hashtable->batches[batchno].shared->buckets != InvalidDsaPointer);
    3367             : 
    3368        2008 :     hashtable->curbatch = batchno;
    3369        2008 :     hashtable->buckets.shared = (dsa_pointer_atomic *)
    3370        2008 :         dsa_get_address(hashtable->area,
    3371        2008 :                         hashtable->batches[batchno].shared->buckets);
    3372        2008 :     hashtable->nbuckets = hashtable->parallel_state->nbuckets;
    3373        2008 :     hashtable->log2_nbuckets = my_log2(hashtable->nbuckets);
    3374        2008 :     hashtable->current_chunk = NULL;
    3375        2008 :     hashtable->current_chunk_shared = InvalidDsaPointer;
    3376        2008 :     hashtable->batches[batchno].at_least_one_chunk = false;
    3377        2008 : }
    3378             : 
    3379             : /*
    3380             :  * Take the next available chunk from the queue of chunks being worked on in
    3381             :  * parallel.  Return NULL if there are none left.  Otherwise return a pointer
    3382             :  * to the chunk, and set *shared to the DSA pointer to the chunk.
    3383             :  */
    3384             : static HashMemoryChunk
    3385        1154 : ExecParallelHashPopChunkQueue(HashJoinTable hashtable, dsa_pointer *shared)
    3386             : {
    3387        1154 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    3388             :     HashMemoryChunk chunk;
    3389             : 
    3390        1154 :     LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
    3391        1154 :     if (DsaPointerIsValid(pstate->chunk_work_queue))
    3392             :     {
    3393         956 :         *shared = pstate->chunk_work_queue;
    3394             :         chunk = (HashMemoryChunk)
    3395         956 :             dsa_get_address(hashtable->area, *shared);
    3396         956 :         pstate->chunk_work_queue = chunk->next.shared;
    3397             :     }
    3398             :     else
    3399         198 :         chunk = NULL;
    3400        1154 :     LWLockRelease(&pstate->lock);
    3401             : 
    3402        1154 :     return chunk;
    3403             : }
    3404             : 
    3405             : /*
    3406             :  * Increase the space preallocated in this backend for a given inner batch by
    3407             :  * at least a given amount.  This allows us to track whether a given batch
    3408             :  * would fit in memory when loaded back in.  Also increase the number of
    3409             :  * batches or buckets if required.
    3410             :  *
    3411             :  * This maintains a running estimation of how much space will be taken when we
    3412             :  * load the batch back into memory by simulating the way chunks will be handed
    3413             :  * out to workers.  It's not perfectly accurate because the tuples will be
    3414             :  * packed into memory chunks differently by ExecParallelHashTupleAlloc(), but
    3415             :  * it should be pretty close.  It tends to overestimate by a fraction of a
    3416             :  * chunk per worker since all workers gang up to preallocate during hashing,
    3417             :  * but workers tend to reload batches alone if there are enough to go around,
    3418             :  * leaving fewer partially filled chunks.  This effect is bounded by
    3419             :  * nparticipants.
    3420             :  *
    3421             :  * Return false if the number of batches or buckets has changed, and the
    3422             :  * caller should reconsider which batch a given tuple now belongs in and call
    3423             :  * again.
    3424             :  */
    3425             : static bool
    3426        1758 : ExecParallelHashTuplePrealloc(HashJoinTable hashtable, int batchno, size_t size)
    3427             : {
    3428        1758 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    3429        1758 :     ParallelHashJoinBatchAccessor *batch = &hashtable->batches[batchno];
    3430        1758 :     size_t      want = Max(size, HASH_CHUNK_SIZE - HASH_CHUNK_HEADER_SIZE);
    3431             : 
    3432             :     Assert(batchno > 0);
    3433             :     Assert(batchno < hashtable->nbatch);
    3434             :     Assert(size == MAXALIGN(size));
    3435             : 
    3436        1758 :     LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
    3437             : 
    3438             :     /* Has another participant commanded us to help grow? */
    3439        1758 :     if (pstate->growth == PHJ_GROWTH_NEED_MORE_BATCHES ||
    3440        1742 :         pstate->growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
    3441             :     {
    3442          16 :         ParallelHashGrowth growth = pstate->growth;
    3443             : 
    3444          16 :         LWLockRelease(&pstate->lock);
    3445          16 :         if (growth == PHJ_GROWTH_NEED_MORE_BATCHES)
    3446          16 :             ExecParallelHashIncreaseNumBatches(hashtable);
    3447           0 :         else if (growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
    3448           0 :             ExecParallelHashIncreaseNumBuckets(hashtable);
    3449             : 
    3450          16 :         return false;
    3451             :     }
    3452             : 
    3453        1742 :     if (pstate->growth != PHJ_GROWTH_DISABLED &&
    3454        1514 :         batch->at_least_one_chunk &&
    3455         680 :         (batch->shared->estimated_size + want + HASH_CHUNK_HEADER_SIZE
    3456         680 :          > pstate->space_allowed))
    3457             :     {
    3458             :         /*
    3459             :          * We have determined that this batch would exceed the space budget if
    3460             :          * loaded into memory.  Command all participants to help repartition.
    3461             :          */
    3462          12 :         batch->shared->space_exhausted = true;
    3463          12 :         pstate->growth = PHJ_GROWTH_NEED_MORE_BATCHES;
    3464          12 :         LWLockRelease(&pstate->lock);
    3465             : 
    3466          12 :         return false;
    3467             :     }
    3468             : 
    3469        1730 :     batch->at_least_one_chunk = true;
    3470        1730 :     batch->shared->estimated_size += want + HASH_CHUNK_HEADER_SIZE;
    3471        1730 :     batch->preallocated = want;
    3472        1730 :     LWLockRelease(&pstate->lock);
    3473             : 
    3474        1730 :     return true;
    3475             : }
    3476             : 
    3477             : /*
    3478             :  * Calculate the limit on how much memory can be used by Hash and similar
    3479             :  * plan types.  This is work_mem times hash_mem_multiplier, and is
    3480             :  * expressed in bytes.
    3481             :  *
    3482             :  * Exported for use by the planner, as well as other hash-like executor
    3483             :  * nodes.  This is a rather random place for this, but there is no better
    3484             :  * place.
    3485             :  */
    3486             : size_t
    3487     1119070 : get_hash_memory_limit(void)
    3488             : {
    3489             :     double      mem_limit;
    3490             : 
    3491             :     /* Do initial calculation in double arithmetic */
    3492     1119070 :     mem_limit = (double) work_mem * hash_mem_multiplier * 1024.0;
    3493             : 
    3494             :     /* Clamp in case it doesn't fit in size_t */
    3495     1119070 :     mem_limit = Min(mem_limit, (double) SIZE_MAX);
    3496             : 
    3497     1119070 :     return (size_t) mem_limit;
    3498             : }

Generated by: LCOV version 1.14