LCOV - code coverage report
Current view: top level - src/backend/executor - nodeHash.c (source / functions) Hit Total Coverage
Test: PostgreSQL 13devel Lines: 1035 1084 95.5 %
Date: 2019-09-22 08:06:49 Functions: 51 52 98.1 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * nodeHash.c
       4             :  *    Routines to hash relations for hashjoin
       5             :  *
       6             :  * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
       7             :  * Portions Copyright (c) 1994, Regents of the University of California
       8             :  *
       9             :  *
      10             :  * IDENTIFICATION
      11             :  *    src/backend/executor/nodeHash.c
      12             :  *
      13             :  * See note on parallelism in nodeHashjoin.c.
      14             :  *
      15             :  *-------------------------------------------------------------------------
      16             :  */
      17             : /*
      18             :  * INTERFACE ROUTINES
      19             :  *      MultiExecHash   - generate an in-memory hash table of the relation
      20             :  *      ExecInitHash    - initialize node and subnodes
      21             :  *      ExecEndHash     - shutdown node and subnodes
      22             :  */
      23             : 
      24             : #include "postgres.h"
      25             : 
      26             : #include <math.h>
      27             : #include <limits.h>
      28             : 
      29             : #include "access/htup_details.h"
      30             : #include "access/parallel.h"
      31             : #include "catalog/pg_statistic.h"
      32             : #include "commands/tablespace.h"
      33             : #include "executor/execdebug.h"
      34             : #include "executor/hashjoin.h"
      35             : #include "executor/nodeHash.h"
      36             : #include "executor/nodeHashjoin.h"
      37             : #include "miscadmin.h"
      38             : #include "pgstat.h"
      39             : #include "port/atomics.h"
      40             : #include "utils/dynahash.h"
      41             : #include "utils/memutils.h"
      42             : #include "utils/lsyscache.h"
      43             : #include "utils/syscache.h"
      44             : 
      45             : 
      46             : static void ExecHashIncreaseNumBatches(HashJoinTable hashtable);
      47             : static void ExecHashIncreaseNumBuckets(HashJoinTable hashtable);
      48             : static void ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable);
      49             : static void ExecParallelHashIncreaseNumBuckets(HashJoinTable hashtable);
      50             : static void ExecHashBuildSkewHash(HashJoinTable hashtable, Hash *node,
      51             :                                   int mcvsToUse);
      52             : static void ExecHashSkewTableInsert(HashJoinTable hashtable,
      53             :                                     TupleTableSlot *slot,
      54             :                                     uint32 hashvalue,
      55             :                                     int bucketNumber);
      56             : static void ExecHashRemoveNextSkewBucket(HashJoinTable hashtable);
      57             : 
      58             : static void *dense_alloc(HashJoinTable hashtable, Size size);
      59             : static HashJoinTuple ExecParallelHashTupleAlloc(HashJoinTable hashtable,
      60             :                                                 size_t size,
      61             :                                                 dsa_pointer *shared);
      62             : static void MultiExecPrivateHash(HashState *node);
      63             : static void MultiExecParallelHash(HashState *node);
      64             : static inline HashJoinTuple ExecParallelHashFirstTuple(HashJoinTable table,
      65             :                                                        int bucketno);
      66             : static inline HashJoinTuple ExecParallelHashNextTuple(HashJoinTable table,
      67             :                                                       HashJoinTuple tuple);
      68             : static inline void ExecParallelHashPushTuple(dsa_pointer_atomic *head,
      69             :                                              HashJoinTuple tuple,
      70             :                                              dsa_pointer tuple_shared);
      71             : static void ExecParallelHashJoinSetUpBatches(HashJoinTable hashtable, int nbatch);
      72             : static void ExecParallelHashEnsureBatchAccessors(HashJoinTable hashtable);
      73             : static void ExecParallelHashRepartitionFirst(HashJoinTable hashtable);
      74             : static void ExecParallelHashRepartitionRest(HashJoinTable hashtable);
      75             : static HashMemoryChunk ExecParallelHashPopChunkQueue(HashJoinTable table,
      76             :                                                      dsa_pointer *shared);
      77             : static bool ExecParallelHashTuplePrealloc(HashJoinTable hashtable,
      78             :                                           int batchno,
      79             :                                           size_t size);
      80             : static void ExecParallelHashMergeCounters(HashJoinTable hashtable);
      81             : static void ExecParallelHashCloseBatchAccessors(HashJoinTable hashtable);
      82             : 
      83             : 
      84             : /* ----------------------------------------------------------------
      85             :  *      ExecHash
      86             :  *
      87             :  *      stub for pro forma compliance
      88             :  * ----------------------------------------------------------------
      89             :  */
      90             : static TupleTableSlot *
      91           0 : ExecHash(PlanState *pstate)
      92             : {
      93           0 :     elog(ERROR, "Hash node does not support ExecProcNode call convention");
      94             :     return NULL;
      95             : }
      96             : 
      97             : /* ----------------------------------------------------------------
      98             :  *      MultiExecHash
      99             :  *
     100             :  *      build hash table for hashjoin, doing partitioning if more
     101             :  *      than one batch is required.
     102             :  * ----------------------------------------------------------------
     103             :  */
     104             : Node *
     105      258560 : MultiExecHash(HashState *node)
     106             : {
     107             :     /* must provide our own instrumentation support */
     108      258560 :     if (node->ps.instrument)
     109         214 :         InstrStartNode(node->ps.instrument);
     110             : 
     111      258560 :     if (node->parallel_state != NULL)
     112         224 :         MultiExecParallelHash(node);
     113             :     else
     114      258336 :         MultiExecPrivateHash(node);
     115             : 
     116             :     /* must provide our own instrumentation support */
     117      258560 :     if (node->ps.instrument)
     118         214 :         InstrStopNode(node->ps.instrument, node->hashtable->partialTuples);
     119             : 
     120             :     /*
     121             :      * We do not return the hash table directly because it's not a subtype of
     122             :      * Node, and so would violate the MultiExecProcNode API.  Instead, our
     123             :      * parent Hashjoin node is expected to know how to fish it out of our node
     124             :      * state.  Ugly but not really worth cleaning up, since Hashjoin knows
     125             :      * quite a bit more about Hash besides that.
     126             :      */
     127      258560 :     return NULL;
     128             : }
     129             : 
     130             : /* ----------------------------------------------------------------
     131             :  *      MultiExecPrivateHash
     132             :  *
     133             :  *      parallel-oblivious version, building a backend-private
     134             :  *      hash table and (if necessary) batch files.
     135             :  * ----------------------------------------------------------------
     136             :  */
     137             : static void
     138      258336 : MultiExecPrivateHash(HashState *node)
     139             : {
     140             :     PlanState  *outerNode;
     141             :     List       *hashkeys;
     142             :     HashJoinTable hashtable;
     143             :     TupleTableSlot *slot;
     144             :     ExprContext *econtext;
     145             :     uint32      hashvalue;
     146             : 
     147             :     /*
     148             :      * get state info from node
     149             :      */
     150      258336 :     outerNode = outerPlanState(node);
     151      258336 :     hashtable = node->hashtable;
     152             : 
     153             :     /*
     154             :      * set expression context
     155             :      */
     156      258336 :     hashkeys = node->hashkeys;
     157      258336 :     econtext = node->ps.ps_ExprContext;
     158             : 
     159             :     /*
     160             :      * Get all tuples from the node below the Hash node and insert into the
     161             :      * hash table (or temp files).
     162             :      */
     163             :     for (;;)
     164             :     {
     165    14344244 :         slot = ExecProcNode(outerNode);
     166     7301290 :         if (TupIsNull(slot))
     167             :             break;
     168             :         /* We have to compute the hash value */
     169     7042954 :         econtext->ecxt_outertuple = slot;
     170     7042954 :         if (ExecHashGetHashValue(hashtable, econtext, hashkeys,
     171     7042954 :                                  false, hashtable->keepNulls,
     172             :                                  &hashvalue))
     173             :         {
     174             :             int         bucketNumber;
     175             : 
     176     7042950 :             bucketNumber = ExecHashGetSkewBucket(hashtable, hashvalue);
     177     7042950 :             if (bucketNumber != INVALID_SKEW_BUCKET_NO)
     178             :             {
     179             :                 /* It's a skew tuple, so put it into that hash table */
     180         172 :                 ExecHashSkewTableInsert(hashtable, slot, hashvalue,
     181             :                                         bucketNumber);
     182         172 :                 hashtable->skewTuples += 1;
     183             :             }
     184             :             else
     185             :             {
     186             :                 /* Not subject to skew optimization, so insert normally */
     187     7042778 :                 ExecHashTableInsert(hashtable, slot, hashvalue);
     188             :             }
     189     7042950 :             hashtable->totalTuples += 1;
     190             :         }
     191             :     }
     192             : 
     193             :     /* resize the hash table if needed (NTUP_PER_BUCKET exceeded) */
     194      258336 :     if (hashtable->nbuckets != hashtable->nbuckets_optimal)
     195         386 :         ExecHashIncreaseNumBuckets(hashtable);
     196             : 
     197             :     /* Account for the buckets in spaceUsed (reported in EXPLAIN ANALYZE) */
     198      258336 :     hashtable->spaceUsed += hashtable->nbuckets * sizeof(HashJoinTuple);
     199      258336 :     if (hashtable->spaceUsed > hashtable->spacePeak)
     200      258316 :         hashtable->spacePeak = hashtable->spaceUsed;
     201             : 
     202      258336 :     hashtable->partialTuples = hashtable->totalTuples;
     203      258336 : }
     204             : 
     205             : /* ----------------------------------------------------------------
     206             :  *      MultiExecParallelHash
     207             :  *
     208             :  *      parallel-aware version, building a shared hash table and
     209             :  *      (if necessary) batch files using the combined effort of
     210             :  *      a set of co-operating backends.
     211             :  * ----------------------------------------------------------------
     212             :  */
     213             : static void
     214         224 : MultiExecParallelHash(HashState *node)
     215             : {
     216             :     ParallelHashJoinState *pstate;
     217             :     PlanState  *outerNode;
     218             :     List       *hashkeys;
     219             :     HashJoinTable hashtable;
     220             :     TupleTableSlot *slot;
     221             :     ExprContext *econtext;
     222             :     uint32      hashvalue;
     223             :     Barrier    *build_barrier;
     224             :     int         i;
     225             : 
     226             :     /*
     227             :      * get state info from node
     228             :      */
     229         224 :     outerNode = outerPlanState(node);
     230         224 :     hashtable = node->hashtable;
     231             : 
     232             :     /*
     233             :      * set expression context
     234             :      */
     235         224 :     hashkeys = node->hashkeys;
     236         224 :     econtext = node->ps.ps_ExprContext;
     237             : 
     238             :     /*
     239             :      * Synchronize the parallel hash table build.  At this stage we know that
     240             :      * the shared hash table has been or is being set up by
     241             :      * ExecHashTableCreate(), but we don't know if our peers have returned
     242             :      * from there or are here in MultiExecParallelHash(), and if so how far
     243             :      * through they are.  To find out, we check the build_barrier phase then
     244             :      * and jump to the right step in the build algorithm.
     245             :      */
     246         224 :     pstate = hashtable->parallel_state;
     247         224 :     build_barrier = &pstate->build_barrier;
     248             :     Assert(BarrierPhase(build_barrier) >= PHJ_BUILD_ALLOCATING);
     249         224 :     switch (BarrierPhase(build_barrier))
     250             :     {
     251             :         case PHJ_BUILD_ALLOCATING:
     252             : 
     253             :             /*
     254             :              * Either I just allocated the initial hash table in
     255             :              * ExecHashTableCreate(), or someone else is doing that.  Either
     256             :              * way, wait for everyone to arrive here so we can proceed.
     257             :              */
     258          96 :             BarrierArriveAndWait(build_barrier, WAIT_EVENT_HASH_BUILD_ALLOCATING);
     259             :             /* Fall through. */
     260             : 
     261             :         case PHJ_BUILD_HASHING_INNER:
     262             : 
     263             :             /*
     264             :              * It's time to begin hashing, or if we just arrived here then
     265             :              * hashing is already underway, so join in that effort.  While
     266             :              * hashing we have to be prepared to help increase the number of
     267             :              * batches or buckets at any time, and if we arrived here when
     268             :              * that was already underway we'll have to help complete that work
     269             :              * immediately so that it's safe to access batches and buckets
     270             :              * below.
     271             :              */
     272         154 :             if (PHJ_GROW_BATCHES_PHASE(BarrierAttach(&pstate->grow_batches_barrier)) !=
     273             :                 PHJ_GROW_BATCHES_ELECTING)
     274           2 :                 ExecParallelHashIncreaseNumBatches(hashtable);
     275         154 :             if (PHJ_GROW_BUCKETS_PHASE(BarrierAttach(&pstate->grow_buckets_barrier)) !=
     276             :                 PHJ_GROW_BUCKETS_ELECTING)
     277           0 :                 ExecParallelHashIncreaseNumBuckets(hashtable);
     278         154 :             ExecParallelHashEnsureBatchAccessors(hashtable);
     279         154 :             ExecParallelHashTableSetCurrentBatch(hashtable, 0);
     280             :             for (;;)
     281             :             {
     282     2400258 :                 slot = ExecProcNode(outerNode);
     283     1200206 :                 if (TupIsNull(slot))
     284             :                     break;
     285     1200052 :                 econtext->ecxt_outertuple = slot;
     286     1200052 :                 if (ExecHashGetHashValue(hashtable, econtext, hashkeys,
     287     1200052 :                                          false, hashtable->keepNulls,
     288             :                                          &hashvalue))
     289     1200052 :                     ExecParallelHashTableInsert(hashtable, slot, hashvalue);
     290     1200052 :                 hashtable->partialTuples++;
     291             :             }
     292             : 
     293             :             /*
     294             :              * Make sure that any tuples we wrote to disk are visible to
     295             :              * others before anyone tries to load them.
     296             :              */
     297         902 :             for (i = 0; i < hashtable->nbatch; ++i)
     298         748 :                 sts_end_write(hashtable->batches[i].inner_tuples);
     299             : 
     300             :             /*
     301             :              * Update shared counters.  We need an accurate total tuple count
     302             :              * to control the empty table optimization.
     303             :              */
     304         154 :             ExecParallelHashMergeCounters(hashtable);
     305             : 
     306         154 :             BarrierDetach(&pstate->grow_buckets_barrier);
     307         154 :             BarrierDetach(&pstate->grow_batches_barrier);
     308             : 
     309             :             /*
     310             :              * Wait for everyone to finish building and flushing files and
     311             :              * counters.
     312             :              */
     313         154 :             if (BarrierArriveAndWait(build_barrier,
     314             :                                      WAIT_EVENT_HASH_BUILD_HASHING_INNER))
     315             :             {
     316             :                 /*
     317             :                  * Elect one backend to disable any further growth.  Batches
     318             :                  * are now fixed.  While building them we made sure they'd fit
     319             :                  * in our memory budget when we load them back in later (or we
     320             :                  * tried to do that and gave up because we detected extreme
     321             :                  * skew).
     322             :                  */
     323          96 :                 pstate->growth = PHJ_GROWTH_DISABLED;
     324             :             }
     325             :     }
     326             : 
     327             :     /*
     328             :      * We're not yet attached to a batch.  We all agree on the dimensions and
     329             :      * number of inner tuples (for the empty table optimization).
     330             :      */
     331         224 :     hashtable->curbatch = -1;
     332         224 :     hashtable->nbuckets = pstate->nbuckets;
     333         224 :     hashtable->log2_nbuckets = my_log2(hashtable->nbuckets);
     334         224 :     hashtable->totalTuples = pstate->total_tuples;
     335         224 :     ExecParallelHashEnsureBatchAccessors(hashtable);
     336             : 
     337             :     /*
     338             :      * The next synchronization point is in ExecHashJoin's HJ_BUILD_HASHTABLE
     339             :      * case, which will bring the build phase to PHJ_BUILD_DONE (if it isn't
     340             :      * there already).
     341             :      */
     342             :     Assert(BarrierPhase(build_barrier) == PHJ_BUILD_HASHING_OUTER ||
     343             :            BarrierPhase(build_barrier) == PHJ_BUILD_DONE);
     344         224 : }
     345             : 
     346             : /* ----------------------------------------------------------------
     347             :  *      ExecInitHash
     348             :  *
     349             :  *      Init routine for Hash node
     350             :  * ----------------------------------------------------------------
     351             :  */
     352             : HashState *
     353       27356 : ExecInitHash(Hash *node, EState *estate, int eflags)
     354             : {
     355             :     HashState  *hashstate;
     356             : 
     357             :     /* check for unsupported flags */
     358             :     Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
     359             : 
     360             :     /*
     361             :      * create state structure
     362             :      */
     363       27356 :     hashstate = makeNode(HashState);
     364       27356 :     hashstate->ps.plan = (Plan *) node;
     365       27356 :     hashstate->ps.state = estate;
     366       27356 :     hashstate->ps.ExecProcNode = ExecHash;
     367       27356 :     hashstate->hashtable = NULL;
     368       27356 :     hashstate->hashkeys = NIL;   /* will be set by parent HashJoin */
     369             : 
     370             :     /*
     371             :      * Miscellaneous initialization
     372             :      *
     373             :      * create expression context for node
     374             :      */
     375       27356 :     ExecAssignExprContext(estate, &hashstate->ps);
     376             : 
     377             :     /*
     378             :      * initialize child nodes
     379             :      */
     380       27356 :     outerPlanState(hashstate) = ExecInitNode(outerPlan(node), estate, eflags);
     381             : 
     382             :     /*
     383             :      * initialize our result slot and type. No need to build projection
     384             :      * because this node doesn't do projections.
     385             :      */
     386       27356 :     ExecInitResultTupleSlotTL(&hashstate->ps, &TTSOpsMinimalTuple);
     387       27356 :     hashstate->ps.ps_ProjInfo = NULL;
     388             : 
     389             :     /*
     390             :      * initialize child expressions
     391             :      */
     392             :     Assert(node->plan.qual == NIL);
     393       27356 :     hashstate->hashkeys =
     394       27356 :         ExecInitExprList(node->hashkeys, (PlanState *) hashstate);
     395             : 
     396       27356 :     return hashstate;
     397             : }
     398             : 
     399             : /* ---------------------------------------------------------------
     400             :  *      ExecEndHash
     401             :  *
     402             :  *      clean up routine for Hash node
     403             :  * ----------------------------------------------------------------
     404             :  */
     405             : void
     406       27320 : ExecEndHash(HashState *node)
     407             : {
     408             :     PlanState  *outerPlan;
     409             : 
     410             :     /*
     411             :      * free exprcontext
     412             :      */
     413       27320 :     ExecFreeExprContext(&node->ps);
     414             : 
     415             :     /*
     416             :      * shut down the subplan
     417             :      */
     418       27320 :     outerPlan = outerPlanState(node);
     419       27320 :     ExecEndNode(outerPlan);
     420       27320 : }
     421             : 
     422             : 
     423             : /* ----------------------------------------------------------------
     424             :  *      ExecHashTableCreate
     425             :  *
     426             :  *      create an empty hashtable data structure for hashjoin.
     427             :  * ----------------------------------------------------------------
     428             :  */
     429             : HashJoinTable
     430      258560 : ExecHashTableCreate(HashState *state, List *hashOperators, List *hashCollations, bool keepNulls)
     431             : {
     432             :     Hash       *node;
     433             :     HashJoinTable hashtable;
     434             :     Plan       *outerNode;
     435             :     size_t      space_allowed;
     436             :     int         nbuckets;
     437             :     int         nbatch;
     438             :     double      rows;
     439             :     int         num_skew_mcvs;
     440             :     int         log2_nbuckets;
     441             :     int         nkeys;
     442             :     int         i;
     443             :     ListCell   *ho;
     444             :     ListCell   *hc;
     445             :     MemoryContext oldcxt;
     446             : 
     447             :     /*
     448             :      * Get information about the size of the relation to be hashed (it's the
     449             :      * "outer" subtree of this node, but the inner relation of the hashjoin).
     450             :      * Compute the appropriate size of the hash table.
     451             :      */
     452      258560 :     node = (Hash *) state->ps.plan;
     453      258560 :     outerNode = outerPlan(node);
     454             : 
     455             :     /*
     456             :      * If this is shared hash table with a partial plan, then we can't use
     457             :      * outerNode->plan_rows to estimate its size.  We need an estimate of the
     458             :      * total number of rows across all copies of the partial plan.
     459             :      */
     460      258560 :     rows = node->plan.parallel_aware ? node->rows_total : outerNode->plan_rows;
     461             : 
     462      775904 :     ExecChooseHashTableSize(rows, outerNode->plan_width,
     463      258560 :                             OidIsValid(node->skewTable),
     464      258560 :                             state->parallel_state != NULL,
     465      258560 :                             state->parallel_state != NULL ?
     466         224 :                             state->parallel_state->nparticipants - 1 : 0,
     467             :                             &space_allowed,
     468             :                             &nbuckets, &nbatch, &num_skew_mcvs);
     469             : 
     470             :     /* nbuckets must be a power of 2 */
     471      258560 :     log2_nbuckets = my_log2(nbuckets);
     472             :     Assert(nbuckets == (1 << log2_nbuckets));
     473             : 
     474             :     /*
     475             :      * Initialize the hash table control block.
     476             :      *
     477             :      * The hashtable control block is just palloc'd from the executor's
     478             :      * per-query memory context.  Everything else should be kept inside the
     479             :      * subsidiary hashCxt or batchCxt.
     480             :      */
     481      258560 :     hashtable = (HashJoinTable) palloc(sizeof(HashJoinTableData));
     482      258560 :     hashtable->nbuckets = nbuckets;
     483      258560 :     hashtable->nbuckets_original = nbuckets;
     484      258560 :     hashtable->nbuckets_optimal = nbuckets;
     485      258560 :     hashtable->log2_nbuckets = log2_nbuckets;
     486      258560 :     hashtable->log2_nbuckets_optimal = log2_nbuckets;
     487      258560 :     hashtable->buckets.unshared = NULL;
     488      258560 :     hashtable->keepNulls = keepNulls;
     489      258560 :     hashtable->skewEnabled = false;
     490      258560 :     hashtable->skewBucket = NULL;
     491      258560 :     hashtable->skewBucketLen = 0;
     492      258560 :     hashtable->nSkewBuckets = 0;
     493      258560 :     hashtable->skewBucketNums = NULL;
     494      258560 :     hashtable->nbatch = nbatch;
     495      258560 :     hashtable->curbatch = 0;
     496      258560 :     hashtable->nbatch_original = nbatch;
     497      258560 :     hashtable->nbatch_outstart = nbatch;
     498      258560 :     hashtable->growEnabled = true;
     499      258560 :     hashtable->totalTuples = 0;
     500      258560 :     hashtable->partialTuples = 0;
     501      258560 :     hashtable->skewTuples = 0;
     502      258560 :     hashtable->innerBatchFile = NULL;
     503      258560 :     hashtable->outerBatchFile = NULL;
     504      258560 :     hashtable->spaceUsed = 0;
     505      258560 :     hashtable->spacePeak = 0;
     506      258560 :     hashtable->spaceAllowed = space_allowed;
     507      258560 :     hashtable->spaceUsedSkew = 0;
     508      258560 :     hashtable->spaceAllowedSkew =
     509      258560 :         hashtable->spaceAllowed * SKEW_WORK_MEM_PERCENT / 100;
     510      258560 :     hashtable->chunks = NULL;
     511      258560 :     hashtable->current_chunk = NULL;
     512      258560 :     hashtable->parallel_state = state->parallel_state;
     513      258560 :     hashtable->area = state->ps.state->es_query_dsa;
     514      258560 :     hashtable->batches = NULL;
     515             : 
     516             : #ifdef HJDEBUG
     517             :     printf("Hashjoin %p: initial nbatch = %d, nbuckets = %d\n",
     518             :            hashtable, nbatch, nbuckets);
     519             : #endif
     520             : 
     521             :     /*
     522             :      * Create temporary memory contexts in which to keep the hashtable working
     523             :      * storage.  See notes in executor/hashjoin.h.
     524             :      */
     525      258560 :     hashtable->hashCxt = AllocSetContextCreate(CurrentMemoryContext,
     526             :                                                "HashTableContext",
     527             :                                                ALLOCSET_DEFAULT_SIZES);
     528             : 
     529      258560 :     hashtable->batchCxt = AllocSetContextCreate(hashtable->hashCxt,
     530             :                                                 "HashBatchContext",
     531             :                                                 ALLOCSET_DEFAULT_SIZES);
     532             : 
     533             :     /* Allocate data that will live for the life of the hashjoin */
     534             : 
     535      258560 :     oldcxt = MemoryContextSwitchTo(hashtable->hashCxt);
     536             : 
     537             :     /*
     538             :      * Get info about the hash functions to be used for each hash key. Also
     539             :      * remember whether the join operators are strict.
     540             :      */
     541      258560 :     nkeys = list_length(hashOperators);
     542      258560 :     hashtable->outer_hashfunctions =
     543      258560 :         (FmgrInfo *) palloc(nkeys * sizeof(FmgrInfo));
     544      258560 :     hashtable->inner_hashfunctions =
     545      258560 :         (FmgrInfo *) palloc(nkeys * sizeof(FmgrInfo));
     546      258560 :     hashtable->hashStrict = (bool *) palloc(nkeys * sizeof(bool));
     547      258560 :     hashtable->collations = (Oid *) palloc(nkeys * sizeof(Oid));
     548      258560 :     i = 0;
     549      518276 :     forboth(ho, hashOperators, hc, hashCollations)
     550             :     {
     551      259716 :         Oid         hashop = lfirst_oid(ho);
     552             :         Oid         left_hashfn;
     553             :         Oid         right_hashfn;
     554             : 
     555      259716 :         if (!get_op_hash_functions(hashop, &left_hashfn, &right_hashfn))
     556           0 :             elog(ERROR, "could not find hash function for hash operator %u",
     557             :                  hashop);
     558      259716 :         fmgr_info(left_hashfn, &hashtable->outer_hashfunctions[i]);
     559      259716 :         fmgr_info(right_hashfn, &hashtable->inner_hashfunctions[i]);
     560      259716 :         hashtable->hashStrict[i] = op_strict(hashop);
     561      259716 :         hashtable->collations[i] = lfirst_oid(hc);
     562      259716 :         i++;
     563             :     }
     564             : 
     565      258560 :     if (nbatch > 1 && hashtable->parallel_state == NULL)
     566             :     {
     567             :         /*
     568             :          * allocate and initialize the file arrays in hashCxt (not needed for
     569             :          * parallel case which uses shared tuplestores instead of raw files)
     570             :          */
     571          76 :         hashtable->innerBatchFile = (BufFile **)
     572          76 :             palloc0(nbatch * sizeof(BufFile *));
     573          76 :         hashtable->outerBatchFile = (BufFile **)
     574          76 :             palloc0(nbatch * sizeof(BufFile *));
     575             :         /* The files will not be opened until needed... */
     576             :         /* ... but make sure we have temp tablespaces established for them */
     577          76 :         PrepareTempTablespaces();
     578             :     }
     579             : 
     580      258560 :     MemoryContextSwitchTo(oldcxt);
     581             : 
     582      258560 :     if (hashtable->parallel_state)
     583             :     {
     584         224 :         ParallelHashJoinState *pstate = hashtable->parallel_state;
     585             :         Barrier    *build_barrier;
     586             : 
     587             :         /*
     588             :          * Attach to the build barrier.  The corresponding detach operation is
     589             :          * in ExecHashTableDetach.  Note that we won't attach to the
     590             :          * batch_barrier for batch 0 yet.  We'll attach later and start it out
     591             :          * in PHJ_BATCH_PROBING phase, because batch 0 is allocated up front
     592             :          * and then loaded while hashing (the standard hybrid hash join
     593             :          * algorithm), and we'll coordinate that using build_barrier.
     594             :          */
     595         224 :         build_barrier = &pstate->build_barrier;
     596         224 :         BarrierAttach(build_barrier);
     597             : 
     598             :         /*
     599             :          * So far we have no idea whether there are any other participants,
     600             :          * and if so, what phase they are working on.  The only thing we care
     601             :          * about at this point is whether someone has already created the
     602             :          * SharedHashJoinBatch objects and the hash table for batch 0.  One
     603             :          * backend will be elected to do that now if necessary.
     604             :          */
     605         320 :         if (BarrierPhase(build_barrier) == PHJ_BUILD_ELECTING &&
     606          96 :             BarrierArriveAndWait(build_barrier, WAIT_EVENT_HASH_BUILD_ELECTING))
     607             :         {
     608          96 :             pstate->nbatch = nbatch;
     609          96 :             pstate->space_allowed = space_allowed;
     610          96 :             pstate->growth = PHJ_GROWTH_OK;
     611             : 
     612             :             /* Set up the shared state for coordinating batches. */
     613          96 :             ExecParallelHashJoinSetUpBatches(hashtable, nbatch);
     614             : 
     615             :             /*
     616             :              * Allocate batch 0's hash table up front so we can load it
     617             :              * directly while hashing.
     618             :              */
     619          96 :             pstate->nbuckets = nbuckets;
     620          96 :             ExecParallelHashTableAlloc(hashtable, 0);
     621             :         }
     622             : 
     623             :         /*
     624             :          * The next Parallel Hash synchronization point is in
     625             :          * MultiExecParallelHash(), which will progress it all the way to
     626             :          * PHJ_BUILD_DONE.  The caller must not return control from this
     627             :          * executor node between now and then.
     628             :          */
     629             :     }
     630             :     else
     631             :     {
     632             :         /*
     633             :          * Prepare context for the first-scan space allocations; allocate the
     634             :          * hashbucket array therein, and set each bucket "empty".
     635             :          */
     636      258336 :         MemoryContextSwitchTo(hashtable->batchCxt);
     637             : 
     638      258336 :         hashtable->buckets.unshared = (HashJoinTuple *)
     639      258336 :             palloc0(nbuckets * sizeof(HashJoinTuple));
     640             : 
     641             :         /*
     642             :          * Set up for skew optimization, if possible and there's a need for
     643             :          * more than one batch.  (In a one-batch join, there's no point in
     644             :          * it.)
     645             :          */
     646      258336 :         if (nbatch > 1)
     647          76 :             ExecHashBuildSkewHash(hashtable, node, num_skew_mcvs);
     648             : 
     649      258336 :         MemoryContextSwitchTo(oldcxt);
     650             :     }
     651             : 
     652      258560 :     return hashtable;
     653             : }
     654             : 
     655             : 
     656             : /*
     657             :  * Compute appropriate size for hashtable given the estimated size of the
     658             :  * relation to be hashed (number of rows and average row width).
     659             :  *
     660             :  * This is exported so that the planner's costsize.c can use it.
     661             :  */
     662             : 
     663             : /* Target bucket loading (tuples per bucket) */
     664             : #define NTUP_PER_BUCKET         1
     665             : 
     666             : void
     667      522842 : ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
     668             :                         bool try_combined_work_mem,
     669             :                         int parallel_workers,
     670             :                         size_t *space_allowed,
     671             :                         int *numbuckets,
     672             :                         int *numbatches,
     673             :                         int *num_skew_mcvs)
     674             : {
     675             :     int         tupsize;
     676             :     double      inner_rel_bytes;
     677             :     long        bucket_bytes;
     678             :     long        hash_table_bytes;
     679             :     long        skew_table_bytes;
     680             :     long        max_pointers;
     681             :     long        mppow2;
     682      522842 :     int         nbatch = 1;
     683             :     int         nbuckets;
     684             :     double      dbuckets;
     685             : 
     686             :     /* Force a plausible relation size if no info */
     687      522842 :     if (ntuples <= 0.0)
     688          40 :         ntuples = 1000.0;
     689             : 
     690             :     /*
     691             :      * Estimate tupsize based on footprint of tuple in hashtable... note this
     692             :      * does not allow for any palloc overhead.  The manipulations of spaceUsed
     693             :      * don't count palloc overhead either.
     694             :      */
     695      522842 :     tupsize = HJTUPLE_OVERHEAD +
     696      522842 :         MAXALIGN(SizeofMinimalTupleHeader) +
     697      522842 :         MAXALIGN(tupwidth);
     698      522842 :     inner_rel_bytes = ntuples * tupsize;
     699             : 
     700             :     /*
     701             :      * Target in-memory hashtable size is work_mem kilobytes.
     702             :      */
     703      522842 :     hash_table_bytes = work_mem * 1024L;
     704             : 
     705             :     /*
     706             :      * Parallel Hash tries to use the combined work_mem of all workers to
     707             :      * avoid the need to batch.  If that won't work, it falls back to work_mem
     708             :      * per worker and tries to process batches in parallel.
     709             :      */
     710      522842 :     if (try_combined_work_mem)
     711        2740 :         hash_table_bytes += hash_table_bytes * parallel_workers;
     712             : 
     713      522842 :     *space_allowed = hash_table_bytes;
     714             : 
     715             :     /*
     716             :      * If skew optimization is possible, estimate the number of skew buckets
     717             :      * that will fit in the memory allowed, and decrement the assumed space
     718             :      * available for the main hash table accordingly.
     719             :      *
     720             :      * We make the optimistic assumption that each skew bucket will contain
     721             :      * one inner-relation tuple.  If that turns out to be low, we will recover
     722             :      * at runtime by reducing the number of skew buckets.
     723             :      *
     724             :      * hashtable->skewBucket will have up to 8 times as many HashSkewBucket
     725             :      * pointers as the number of MCVs we allow, since ExecHashBuildSkewHash
     726             :      * will round up to the next power of 2 and then multiply by 4 to reduce
     727             :      * collisions.
     728             :      */
     729      522842 :     if (useskew)
     730             :     {
     731      277778 :         skew_table_bytes = hash_table_bytes * SKEW_WORK_MEM_PERCENT / 100;
     732             : 
     733             :         /*----------
     734             :          * Divisor is:
     735             :          * size of a hash tuple +
     736             :          * worst-case size of skewBucket[] per MCV +
     737             :          * size of skewBucketNums[] entry +
     738             :          * size of skew bucket struct itself
     739             :          *----------
     740             :          */
     741      555556 :         *num_skew_mcvs = skew_table_bytes / (tupsize +
     742             :                                              (8 * sizeof(HashSkewBucket *)) +
     743      277778 :                                              sizeof(int) +
     744             :                                              SKEW_BUCKET_OVERHEAD);
     745      277778 :         if (*num_skew_mcvs > 0)
     746      277778 :             hash_table_bytes -= skew_table_bytes;
     747             :     }
     748             :     else
     749      245064 :         *num_skew_mcvs = 0;
     750             : 
     751             :     /*
     752             :      * Set nbuckets to achieve an average bucket load of NTUP_PER_BUCKET when
     753             :      * memory is filled, assuming a single batch; but limit the value so that
     754             :      * the pointer arrays we'll try to allocate do not exceed work_mem nor
     755             :      * MaxAllocSize.
     756             :      *
     757             :      * Note that both nbuckets and nbatch must be powers of 2 to make
     758             :      * ExecHashGetBucketAndBatch fast.
     759             :      */
     760      522842 :     max_pointers = *space_allowed / sizeof(HashJoinTuple);
     761      522842 :     max_pointers = Min(max_pointers, MaxAllocSize / sizeof(HashJoinTuple));
     762             :     /* If max_pointers isn't a power of 2, must round it down to one */
     763      522842 :     mppow2 = 1L << my_log2(max_pointers);
     764      522842 :     if (max_pointers != mppow2)
     765        1240 :         max_pointers = mppow2 / 2;
     766             : 
     767             :     /* Also ensure we avoid integer overflow in nbatch and nbuckets */
     768             :     /* (this step is redundant given the current value of MaxAllocSize) */
     769      522842 :     max_pointers = Min(max_pointers, INT_MAX / 2);
     770             : 
     771      522842 :     dbuckets = ceil(ntuples / NTUP_PER_BUCKET);
     772      522842 :     dbuckets = Min(dbuckets, max_pointers);
     773      522842 :     nbuckets = (int) dbuckets;
     774             :     /* don't let nbuckets be really small, though ... */
     775      522842 :     nbuckets = Max(nbuckets, 1024);
     776             :     /* ... and force it to be a power of 2. */
     777      522842 :     nbuckets = 1 << my_log2(nbuckets);
     778             : 
     779             :     /*
     780             :      * If there's not enough space to store the projected number of tuples and
     781             :      * the required bucket headers, we will need multiple batches.
     782             :      */
     783      522842 :     bucket_bytes = sizeof(HashJoinTuple) * nbuckets;
     784      522842 :     if (inner_rel_bytes + bucket_bytes > hash_table_bytes)
     785             :     {
     786             :         /* We'll need multiple batches */
     787             :         long        lbuckets;
     788             :         double      dbatch;
     789             :         int         minbatch;
     790             :         long        bucket_size;
     791             : 
     792             :         /*
     793             :          * If Parallel Hash with combined work_mem would still need multiple
     794             :          * batches, we'll have to fall back to regular work_mem budget.
     795             :          */
     796        2428 :         if (try_combined_work_mem)
     797             :         {
     798         144 :             ExecChooseHashTableSize(ntuples, tupwidth, useskew,
     799             :                                     false, parallel_workers,
     800             :                                     space_allowed,
     801             :                                     numbuckets,
     802             :                                     numbatches,
     803             :                                     num_skew_mcvs);
     804         144 :             return;
     805             :         }
     806             : 
     807             :         /*
     808             :          * Estimate the number of buckets we'll want to have when work_mem is
     809             :          * entirely full.  Each bucket will contain a bucket pointer plus
     810             :          * NTUP_PER_BUCKET tuples, whose projected size already includes
     811             :          * overhead for the hash code, pointer to the next tuple, etc.
     812             :          */
     813        2284 :         bucket_size = (tupsize * NTUP_PER_BUCKET + sizeof(HashJoinTuple));
     814        2284 :         lbuckets = 1L << my_log2(hash_table_bytes / bucket_size);
     815        2284 :         lbuckets = Min(lbuckets, max_pointers);
     816        2284 :         nbuckets = (int) lbuckets;
     817        2284 :         nbuckets = 1 << my_log2(nbuckets);
     818        2284 :         bucket_bytes = nbuckets * sizeof(HashJoinTuple);
     819             : 
     820             :         /*
     821             :          * Buckets are simple pointers to hashjoin tuples, while tupsize
     822             :          * includes the pointer, hash code, and MinimalTupleData.  So buckets
     823             :          * should never really exceed 25% of work_mem (even for
     824             :          * NTUP_PER_BUCKET=1); except maybe for work_mem values that are not
     825             :          * 2^N bytes, where we might get more because of doubling. So let's
     826             :          * look for 50% here.
     827             :          */
     828             :         Assert(bucket_bytes <= hash_table_bytes / 2);
     829             : 
     830             :         /* Calculate required number of batches. */
     831        2284 :         dbatch = ceil(inner_rel_bytes / (hash_table_bytes - bucket_bytes));
     832        2284 :         dbatch = Min(dbatch, max_pointers);
     833        2284 :         minbatch = (int) dbatch;
     834        2284 :         nbatch = 2;
     835       12524 :         while (nbatch < minbatch)
     836        7956 :             nbatch <<= 1;
     837             :     }
     838             : 
     839             :     Assert(nbuckets > 0);
     840             :     Assert(nbatch > 0);
     841             : 
     842      522698 :     *numbuckets = nbuckets;
     843      522698 :     *numbatches = nbatch;
     844             : }
     845             : 
     846             : 
     847             : /* ----------------------------------------------------------------
     848             :  *      ExecHashTableDestroy
     849             :  *
     850             :  *      destroy a hash table
     851             :  * ----------------------------------------------------------------
     852             :  */
     853             : void
     854      258528 : ExecHashTableDestroy(HashJoinTable hashtable)
     855             : {
     856             :     int         i;
     857             : 
     858             :     /*
     859             :      * Make sure all the temp files are closed.  We skip batch 0, since it
     860             :      * can't have any temp files (and the arrays might not even exist if
     861             :      * nbatch is only 1).  Parallel hash joins don't use these files.
     862             :      */
     863      258528 :     if (hashtable->innerBatchFile != NULL)
     864             :     {
     865        1032 :         for (i = 1; i < hashtable->nbatch; i++)
     866             :         {
     867         912 :             if (hashtable->innerBatchFile[i])
     868           0 :                 BufFileClose(hashtable->innerBatchFile[i]);
     869         912 :             if (hashtable->outerBatchFile[i])
     870           0 :                 BufFileClose(hashtable->outerBatchFile[i]);
     871             :         }
     872             :     }
     873             : 
     874             :     /* Release working memory (batchCxt is a child, so it goes away too) */
     875      258528 :     MemoryContextDelete(hashtable->hashCxt);
     876             : 
     877             :     /* And drop the control block */
     878      258528 :     pfree(hashtable);
     879      258528 : }
     880             : 
     881             : /*
     882             :  * ExecHashIncreaseNumBatches
     883             :  *      increase the original number of batches in order to reduce
     884             :  *      current memory consumption
     885             :  */
     886             : static void
     887      345496 : ExecHashIncreaseNumBatches(HashJoinTable hashtable)
     888             : {
     889      345496 :     int         oldnbatch = hashtable->nbatch;
     890      345496 :     int         curbatch = hashtable->curbatch;
     891             :     int         nbatch;
     892             :     MemoryContext oldcxt;
     893             :     long        ninmemory;
     894             :     long        nfreed;
     895             :     HashMemoryChunk oldchunks;
     896             : 
     897             :     /* do nothing if we've decided to shut off growth */
     898      345496 :     if (!hashtable->growEnabled)
     899      345400 :         return;
     900             : 
     901             :     /* safety check to avoid overflow */
     902          96 :     if (oldnbatch > Min(INT_MAX / 2, MaxAllocSize / (sizeof(void *) * 2)))
     903           0 :         return;
     904             : 
     905          96 :     nbatch = oldnbatch * 2;
     906             :     Assert(nbatch > 1);
     907             : 
     908             : #ifdef HJDEBUG
     909             :     printf("Hashjoin %p: increasing nbatch to %d because space = %zu\n",
     910             :            hashtable, nbatch, hashtable->spaceUsed);
     911             : #endif
     912             : 
     913          96 :     oldcxt = MemoryContextSwitchTo(hashtable->hashCxt);
     914             : 
     915          96 :     if (hashtable->innerBatchFile == NULL)
     916             :     {
     917             :         /* we had no file arrays before */
     918          44 :         hashtable->innerBatchFile = (BufFile **)
     919          44 :             palloc0(nbatch * sizeof(BufFile *));
     920          44 :         hashtable->outerBatchFile = (BufFile **)
     921          44 :             palloc0(nbatch * sizeof(BufFile *));
     922             :         /* time to establish the temp tablespaces, too */
     923          44 :         PrepareTempTablespaces();
     924             :     }
     925             :     else
     926             :     {
     927             :         /* enlarge arrays and zero out added entries */
     928          52 :         hashtable->innerBatchFile = (BufFile **)
     929          52 :             repalloc(hashtable->innerBatchFile, nbatch * sizeof(BufFile *));
     930          52 :         hashtable->outerBatchFile = (BufFile **)
     931          52 :             repalloc(hashtable->outerBatchFile, nbatch * sizeof(BufFile *));
     932          52 :         MemSet(hashtable->innerBatchFile + oldnbatch, 0,
     933             :                (nbatch - oldnbatch) * sizeof(BufFile *));
     934          52 :         MemSet(hashtable->outerBatchFile + oldnbatch, 0,
     935             :                (nbatch - oldnbatch) * sizeof(BufFile *));
     936             :     }
     937             : 
     938          96 :     MemoryContextSwitchTo(oldcxt);
     939             : 
     940          96 :     hashtable->nbatch = nbatch;
     941             : 
     942             :     /*
     943             :      * Scan through the existing hash table entries and dump out any that are
     944             :      * no longer of the current batch.
     945             :      */
     946          96 :     ninmemory = nfreed = 0;
     947             : 
     948             :     /* If know we need to resize nbuckets, we can do it while rebatching. */
     949          96 :     if (hashtable->nbuckets_optimal != hashtable->nbuckets)
     950             :     {
     951             :         /* we never decrease the number of buckets */
     952             :         Assert(hashtable->nbuckets_optimal > hashtable->nbuckets);
     953             : 
     954          44 :         hashtable->nbuckets = hashtable->nbuckets_optimal;
     955          44 :         hashtable->log2_nbuckets = hashtable->log2_nbuckets_optimal;
     956             : 
     957          44 :         hashtable->buckets.unshared =
     958          44 :             repalloc(hashtable->buckets.unshared,
     959          44 :                      sizeof(HashJoinTuple) * hashtable->nbuckets);
     960             :     }
     961             : 
     962             :     /*
     963             :      * We will scan through the chunks directly, so that we can reset the
     964             :      * buckets now and not have to keep track which tuples in the buckets have
     965             :      * already been processed. We will free the old chunks as we go.
     966             :      */
     967          96 :     memset(hashtable->buckets.unshared, 0,
     968          96 :            sizeof(HashJoinTuple) * hashtable->nbuckets);
     969          96 :     oldchunks = hashtable->chunks;
     970          96 :     hashtable->chunks = NULL;
     971             : 
     972             :     /* so, let's scan through the old chunks, and all tuples in each chunk */
     973         568 :     while (oldchunks != NULL)
     974             :     {
     975         376 :         HashMemoryChunk nextchunk = oldchunks->next.unshared;
     976             : 
     977             :         /* position within the buffer (up to oldchunks->used) */
     978         376 :         size_t      idx = 0;
     979             : 
     980             :         /* process all tuples stored in this chunk (and then free it) */
     981      257332 :         while (idx < oldchunks->used)
     982             :         {
     983      256580 :             HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(oldchunks) + idx);
     984      256580 :             MinimalTuple tuple = HJTUPLE_MINTUPLE(hashTuple);
     985      256580 :             int         hashTupleSize = (HJTUPLE_OVERHEAD + tuple->t_len);
     986             :             int         bucketno;
     987             :             int         batchno;
     988             : 
     989      256580 :             ninmemory++;
     990      256580 :             ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
     991             :                                       &bucketno, &batchno);
     992             : 
     993      256580 :             if (batchno == curbatch)
     994             :             {
     995             :                 /* keep tuple in memory - copy it into the new chunk */
     996             :                 HashJoinTuple copyTuple;
     997             : 
     998      100096 :                 copyTuple = (HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
     999      100096 :                 memcpy(copyTuple, hashTuple, hashTupleSize);
    1000             : 
    1001             :                 /* and add it back to the appropriate bucket */
    1002      100096 :                 copyTuple->next.unshared = hashtable->buckets.unshared[bucketno];
    1003      100096 :                 hashtable->buckets.unshared[bucketno] = copyTuple;
    1004             :             }
    1005             :             else
    1006             :             {
    1007             :                 /* dump it out */
    1008             :                 Assert(batchno > curbatch);
    1009      156484 :                 ExecHashJoinSaveTuple(HJTUPLE_MINTUPLE(hashTuple),
    1010             :                                       hashTuple->hashvalue,
    1011      156484 :                                       &hashtable->innerBatchFile[batchno]);
    1012             : 
    1013      156484 :                 hashtable->spaceUsed -= hashTupleSize;
    1014      156484 :                 nfreed++;
    1015             :             }
    1016             : 
    1017             :             /* next tuple in this chunk */
    1018      256580 :             idx += MAXALIGN(hashTupleSize);
    1019             : 
    1020             :             /* allow this loop to be cancellable */
    1021      256580 :             CHECK_FOR_INTERRUPTS();
    1022             :         }
    1023             : 
    1024             :         /* we're done with this chunk - free it and proceed to the next one */
    1025         376 :         pfree(oldchunks);
    1026         376 :         oldchunks = nextchunk;
    1027             :     }
    1028             : 
    1029             : #ifdef HJDEBUG
    1030             :     printf("Hashjoin %p: freed %ld of %ld tuples, space now %zu\n",
    1031             :            hashtable, nfreed, ninmemory, hashtable->spaceUsed);
    1032             : #endif
    1033             : 
    1034             :     /*
    1035             :      * If we dumped out either all or none of the tuples in the table, disable
    1036             :      * further expansion of nbatch.  This situation implies that we have
    1037             :      * enough tuples of identical hashvalues to overflow spaceAllowed.
    1038             :      * Increasing nbatch will not fix it since there's no way to subdivide the
    1039             :      * group any more finely. We have to just gut it out and hope the server
    1040             :      * has enough RAM.
    1041             :      */
    1042          96 :     if (nfreed == 0 || nfreed == ninmemory)
    1043             :     {
    1044          20 :         hashtable->growEnabled = false;
    1045             : #ifdef HJDEBUG
    1046             :         printf("Hashjoin %p: disabling further increase of nbatch\n",
    1047             :                hashtable);
    1048             : #endif
    1049             :     }
    1050             : }
    1051             : 
    1052             : /*
    1053             :  * ExecParallelHashIncreaseNumBatches
    1054             :  *      Every participant attached to grow_batches_barrier must run this
    1055             :  *      function when it observes growth == PHJ_GROWTH_NEED_MORE_BATCHES.
    1056             :  */
    1057             : static void
    1058          38 : ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
    1059             : {
    1060          38 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    1061             :     int         i;
    1062             : 
    1063             :     Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASHING_INNER);
    1064             : 
    1065             :     /*
    1066             :      * It's unlikely, but we need to be prepared for new participants to show
    1067             :      * up while we're in the middle of this operation so we need to switch on
    1068             :      * barrier phase here.
    1069             :      */
    1070          38 :     switch (PHJ_GROW_BATCHES_PHASE(BarrierPhase(&pstate->grow_batches_barrier)))
    1071             :     {
    1072             :         case PHJ_GROW_BATCHES_ELECTING:
    1073             : 
    1074             :             /*
    1075             :              * Elect one participant to prepare to grow the number of batches.
    1076             :              * This involves reallocating or resetting the buckets of batch 0
    1077             :              * in preparation for all participants to begin repartitioning the
    1078             :              * tuples.
    1079             :              */
    1080          36 :             if (BarrierArriveAndWait(&pstate->grow_batches_barrier,
    1081             :                                      WAIT_EVENT_HASH_GROW_BATCHES_ELECTING))
    1082             :             {
    1083             :                 dsa_pointer_atomic *buckets;
    1084             :                 ParallelHashJoinBatch *old_batch0;
    1085             :                 int         new_nbatch;
    1086             :                 int         i;
    1087             : 
    1088             :                 /* Move the old batch out of the way. */
    1089          36 :                 old_batch0 = hashtable->batches[0].shared;
    1090          36 :                 pstate->old_batches = pstate->batches;
    1091          36 :                 pstate->old_nbatch = hashtable->nbatch;
    1092          36 :                 pstate->batches = InvalidDsaPointer;
    1093             : 
    1094             :                 /* Free this backend's old accessors. */
    1095          36 :                 ExecParallelHashCloseBatchAccessors(hashtable);
    1096             : 
    1097             :                 /* Figure out how many batches to use. */
    1098          36 :                 if (hashtable->nbatch == 1)
    1099             :                 {
    1100             :                     /*
    1101             :                      * We are going from single-batch to multi-batch.  We need
    1102             :                      * to switch from one large combined memory budget to the
    1103             :                      * regular work_mem budget.
    1104             :                      */
    1105          24 :                     pstate->space_allowed = work_mem * 1024L;
    1106             : 
    1107             :                     /*
    1108             :                      * The combined work_mem of all participants wasn't
    1109             :                      * enough. Therefore one batch per participant would be
    1110             :                      * approximately equivalent and would probably also be
    1111             :                      * insufficient.  So try two batches per participant,
    1112             :                      * rounded up to a power of two.
    1113             :                      */
    1114          24 :                     new_nbatch = 1 << my_log2(pstate->nparticipants * 2);
    1115             :                 }
    1116             :                 else
    1117             :                 {
    1118             :                     /*
    1119             :                      * We were already multi-batched.  Try doubling the number
    1120             :                      * of batches.
    1121             :                      */
    1122          12 :                     new_nbatch = hashtable->nbatch * 2;
    1123             :                 }
    1124             : 
    1125             :                 /* Allocate new larger generation of batches. */
    1126             :                 Assert(hashtable->nbatch == pstate->nbatch);
    1127          36 :                 ExecParallelHashJoinSetUpBatches(hashtable, new_nbatch);
    1128             :                 Assert(hashtable->nbatch == pstate->nbatch);
    1129             : 
    1130             :                 /* Replace or recycle batch 0's bucket array. */
    1131          36 :                 if (pstate->old_nbatch == 1)
    1132             :                 {
    1133             :                     double      dtuples;
    1134             :                     double      dbuckets;
    1135             :                     int         new_nbuckets;
    1136             : 
    1137             :                     /*
    1138             :                      * We probably also need a smaller bucket array.  How many
    1139             :                      * tuples do we expect per batch, assuming we have only
    1140             :                      * half of them so far?  Normally we don't need to change
    1141             :                      * the bucket array's size, because the size of each batch
    1142             :                      * stays the same as we add more batches, but in this
    1143             :                      * special case we move from a large batch to many smaller
    1144             :                      * batches and it would be wasteful to keep the large
    1145             :                      * array.
    1146             :                      */
    1147          24 :                     dtuples = (old_batch0->ntuples * 2.0) / new_nbatch;
    1148          24 :                     dbuckets = ceil(dtuples / NTUP_PER_BUCKET);
    1149          24 :                     dbuckets = Min(dbuckets,
    1150             :                                    MaxAllocSize / sizeof(dsa_pointer_atomic));
    1151          24 :                     new_nbuckets = (int) dbuckets;
    1152          24 :                     new_nbuckets = Max(new_nbuckets, 1024);
    1153          24 :                     new_nbuckets = 1 << my_log2(new_nbuckets);
    1154          24 :                     dsa_free(hashtable->area, old_batch0->buckets);
    1155          48 :                     hashtable->batches[0].shared->buckets =
    1156          24 :                         dsa_allocate(hashtable->area,
    1157             :                                      sizeof(dsa_pointer_atomic) * new_nbuckets);
    1158          24 :                     buckets = (dsa_pointer_atomic *)
    1159          24 :                         dsa_get_address(hashtable->area,
    1160          24 :                                         hashtable->batches[0].shared->buckets);
    1161       73752 :                     for (i = 0; i < new_nbuckets; ++i)
    1162       73728 :                         dsa_pointer_atomic_init(&buckets[i], InvalidDsaPointer);
    1163          24 :                     pstate->nbuckets = new_nbuckets;
    1164             :                 }
    1165             :                 else
    1166             :                 {
    1167             :                     /* Recycle the existing bucket array. */
    1168          12 :                     hashtable->batches[0].shared->buckets = old_batch0->buckets;
    1169          12 :                     buckets = (dsa_pointer_atomic *)
    1170          12 :                         dsa_get_address(hashtable->area, old_batch0->buckets);
    1171       40972 :                     for (i = 0; i < hashtable->nbuckets; ++i)
    1172       40960 :                         dsa_pointer_atomic_write(&buckets[i], InvalidDsaPointer);
    1173             :                 }
    1174             : 
    1175             :                 /* Move all chunks to the work queue for parallel processing. */
    1176          36 :                 pstate->chunk_work_queue = old_batch0->chunks;
    1177             : 
    1178             :                 /* Disable further growth temporarily while we're growing. */
    1179          36 :                 pstate->growth = PHJ_GROWTH_DISABLED;
    1180             :             }
    1181             :             else
    1182             :             {
    1183             :                 /* All other participants just flush their tuples to disk. */
    1184           0 :                 ExecParallelHashCloseBatchAccessors(hashtable);
    1185             :             }
    1186             :             /* Fall through. */
    1187             : 
    1188             :         case PHJ_GROW_BATCHES_ALLOCATING:
    1189             :             /* Wait for the above to be finished. */
    1190          38 :             BarrierArriveAndWait(&pstate->grow_batches_barrier,
    1191             :                                  WAIT_EVENT_HASH_GROW_BATCHES_ALLOCATING);
    1192             :             /* Fall through. */
    1193             : 
    1194             :         case PHJ_GROW_BATCHES_REPARTITIONING:
    1195             :             /* Make sure that we have the current dimensions and buckets. */
    1196          38 :             ExecParallelHashEnsureBatchAccessors(hashtable);
    1197          38 :             ExecParallelHashTableSetCurrentBatch(hashtable, 0);
    1198             :             /* Then partition, flush counters. */
    1199          38 :             ExecParallelHashRepartitionFirst(hashtable);
    1200          38 :             ExecParallelHashRepartitionRest(hashtable);
    1201          38 :             ExecParallelHashMergeCounters(hashtable);
    1202             :             /* Wait for the above to be finished. */
    1203          38 :             BarrierArriveAndWait(&pstate->grow_batches_barrier,
    1204             :                                  WAIT_EVENT_HASH_GROW_BATCHES_REPARTITIONING);
    1205             :             /* Fall through. */
    1206             : 
    1207             :         case PHJ_GROW_BATCHES_DECIDING:
    1208             : 
    1209             :             /*
    1210             :              * Elect one participant to clean up and decide whether further
    1211             :              * repartitioning is needed, or should be disabled because it's
    1212             :              * not helping.
    1213             :              */
    1214          38 :             if (BarrierArriveAndWait(&pstate->grow_batches_barrier,
    1215             :                                      WAIT_EVENT_HASH_GROW_BATCHES_DECIDING))
    1216             :             {
    1217          36 :                 bool        space_exhausted = false;
    1218          36 :                 bool        extreme_skew_detected = false;
    1219             : 
    1220             :                 /* Make sure that we have the current dimensions and buckets. */
    1221          36 :                 ExecParallelHashEnsureBatchAccessors(hashtable);
    1222          36 :                 ExecParallelHashTableSetCurrentBatch(hashtable, 0);
    1223             : 
    1224             :                 /* Are any of the new generation of batches exhausted? */
    1225         292 :                 for (i = 0; i < hashtable->nbatch; ++i)
    1226             :                 {
    1227         256 :                     ParallelHashJoinBatch *batch = hashtable->batches[i].shared;
    1228             : 
    1229         512 :                     if (batch->space_exhausted ||
    1230         256 :                         batch->estimated_size > pstate->space_allowed)
    1231             :                     {
    1232             :                         int         parent;
    1233             : 
    1234          16 :                         space_exhausted = true;
    1235             : 
    1236             :                         /*
    1237             :                          * Did this batch receive ALL of the tuples from its
    1238             :                          * parent batch?  That would indicate that further
    1239             :                          * repartitioning isn't going to help (the hash values
    1240             :                          * are probably all the same).
    1241             :                          */
    1242          16 :                         parent = i % pstate->old_nbatch;
    1243          16 :                         if (batch->ntuples == hashtable->batches[parent].shared->old_ntuples)
    1244          16 :                             extreme_skew_detected = true;
    1245             :                     }
    1246             :                 }
    1247             : 
    1248             :                 /* Don't keep growing if it's not helping or we'd overflow. */
    1249          36 :                 if (extreme_skew_detected || hashtable->nbatch >= INT_MAX / 2)
    1250          16 :                     pstate->growth = PHJ_GROWTH_DISABLED;
    1251          20 :                 else if (space_exhausted)
    1252           0 :                     pstate->growth = PHJ_GROWTH_NEED_MORE_BATCHES;
    1253             :                 else
    1254          20 :                     pstate->growth = PHJ_GROWTH_OK;
    1255             : 
    1256             :                 /* Free the old batches in shared memory. */
    1257          36 :                 dsa_free(hashtable->area, pstate->old_batches);
    1258          36 :                 pstate->old_batches = InvalidDsaPointer;
    1259             :             }
    1260             :             /* Fall through. */
    1261             : 
    1262             :         case PHJ_GROW_BATCHES_FINISHING:
    1263             :             /* Wait for the above to complete. */
    1264          38 :             BarrierArriveAndWait(&pstate->grow_batches_barrier,
    1265             :                                  WAIT_EVENT_HASH_GROW_BATCHES_FINISHING);
    1266             :     }
    1267          38 : }
    1268             : 
    1269             : /*
    1270             :  * Repartition the tuples currently loaded into memory for inner batch 0
    1271             :  * because the number of batches has been increased.  Some tuples are retained
    1272             :  * in memory and some are written out to a later batch.
    1273             :  */
    1274             : static void
    1275          38 : ExecParallelHashRepartitionFirst(HashJoinTable hashtable)
    1276             : {
    1277             :     dsa_pointer chunk_shared;
    1278             :     HashMemoryChunk chunk;
    1279             : 
    1280             :     Assert(hashtable->nbatch == hashtable->parallel_state->nbatch);
    1281             : 
    1282         276 :     while ((chunk = ExecParallelHashPopChunkQueue(hashtable, &chunk_shared)))
    1283             :     {
    1284         200 :         size_t      idx = 0;
    1285             : 
    1286             :         /* Repartition all tuples in this chunk. */
    1287      152162 :         while (idx < chunk->used)
    1288             :         {
    1289      151762 :             HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
    1290      151762 :             MinimalTuple tuple = HJTUPLE_MINTUPLE(hashTuple);
    1291             :             HashJoinTuple copyTuple;
    1292             :             dsa_pointer shared;
    1293             :             int         bucketno;
    1294             :             int         batchno;
    1295             : 
    1296      151762 :             ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
    1297             :                                       &bucketno, &batchno);
    1298             : 
    1299             :             Assert(batchno < hashtable->nbatch);
    1300      151762 :             if (batchno == 0)
    1301             :             {
    1302             :                 /* It still belongs in batch 0.  Copy to a new chunk. */
    1303       36068 :                 copyTuple =
    1304       36068 :                     ExecParallelHashTupleAlloc(hashtable,
    1305       36068 :                                                HJTUPLE_OVERHEAD + tuple->t_len,
    1306             :                                                &shared);
    1307       36068 :                 copyTuple->hashvalue = hashTuple->hashvalue;
    1308       36068 :                 memcpy(HJTUPLE_MINTUPLE(copyTuple), tuple, tuple->t_len);
    1309       36068 :                 ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
    1310             :                                           copyTuple, shared);
    1311             :             }
    1312             :             else
    1313             :             {
    1314      115694 :                 size_t      tuple_size =
    1315      115694 :                 MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
    1316             : 
    1317             :                 /* It belongs in a later batch. */
    1318      115694 :                 hashtable->batches[batchno].estimated_size += tuple_size;
    1319      115694 :                 sts_puttuple(hashtable->batches[batchno].inner_tuples,
    1320      115694 :                              &hashTuple->hashvalue, tuple);
    1321             :             }
    1322             : 
    1323             :             /* Count this tuple. */
    1324      151762 :             ++hashtable->batches[0].old_ntuples;
    1325      151762 :             ++hashtable->batches[batchno].ntuples;
    1326             : 
    1327      151762 :             idx += MAXALIGN(HJTUPLE_OVERHEAD +
    1328             :                             HJTUPLE_MINTUPLE(hashTuple)->t_len);
    1329             :         }
    1330             : 
    1331             :         /* Free this chunk. */
    1332         200 :         dsa_free(hashtable->area, chunk_shared);
    1333             : 
    1334         200 :         CHECK_FOR_INTERRUPTS();
    1335             :     }
    1336          38 : }
    1337             : 
    1338             : /*
    1339             :  * Help repartition inner batches 1..n.
    1340             :  */
    1341             : static void
    1342          38 : ExecParallelHashRepartitionRest(HashJoinTable hashtable)
    1343             : {
    1344          38 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    1345          38 :     int         old_nbatch = pstate->old_nbatch;
    1346             :     SharedTuplestoreAccessor **old_inner_tuples;
    1347             :     ParallelHashJoinBatch *old_batches;
    1348             :     int         i;
    1349             : 
    1350             :     /* Get our hands on the previous generation of batches. */
    1351          38 :     old_batches = (ParallelHashJoinBatch *)
    1352          38 :         dsa_get_address(hashtable->area, pstate->old_batches);
    1353          38 :     old_inner_tuples = palloc0(sizeof(SharedTuplestoreAccessor *) * old_nbatch);
    1354          96 :     for (i = 1; i < old_nbatch; ++i)
    1355             :     {
    1356          58 :         ParallelHashJoinBatch *shared =
    1357          58 :         NthParallelHashJoinBatch(old_batches, i);
    1358             : 
    1359          58 :         old_inner_tuples[i] = sts_attach(ParallelHashJoinBatchInner(shared),
    1360             :                                          ParallelWorkerNumber + 1,
    1361             :                                          &pstate->fileset);
    1362             :     }
    1363             : 
    1364             :     /* Join in the effort to repartition them. */
    1365          96 :     for (i = 1; i < old_nbatch; ++i)
    1366             :     {
    1367             :         MinimalTuple tuple;
    1368             :         uint32      hashvalue;
    1369             : 
    1370             :         /* Scan one partition from the previous generation. */
    1371          58 :         sts_begin_parallel_scan(old_inner_tuples[i]);
    1372      142762 :         while ((tuple = sts_parallel_scan_next(old_inner_tuples[i], &hashvalue)))
    1373             :         {
    1374      142646 :             size_t      tuple_size = MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
    1375             :             int         bucketno;
    1376             :             int         batchno;
    1377             : 
    1378             :             /* Decide which partition it goes to in the new generation. */
    1379      142646 :             ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno,
    1380             :                                       &batchno);
    1381             : 
    1382      142646 :             hashtable->batches[batchno].estimated_size += tuple_size;
    1383      142646 :             ++hashtable->batches[batchno].ntuples;
    1384      142646 :             ++hashtable->batches[i].old_ntuples;
    1385             : 
    1386             :             /* Store the tuple its new batch. */
    1387      142646 :             sts_puttuple(hashtable->batches[batchno].inner_tuples,
    1388             :                          &hashvalue, tuple);
    1389             : 
    1390      142646 :             CHECK_FOR_INTERRUPTS();
    1391             :         }
    1392          58 :         sts_end_parallel_scan(old_inner_tuples[i]);
    1393             :     }
    1394             : 
    1395          38 :     pfree(old_inner_tuples);
    1396          38 : }
    1397             : 
    1398             : /*
    1399             :  * Transfer the backend-local per-batch counters to the shared totals.
    1400             :  */
    1401             : static void
    1402         192 : ExecParallelHashMergeCounters(HashJoinTable hashtable)
    1403             : {
    1404         192 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    1405             :     int         i;
    1406             : 
    1407         192 :     LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
    1408         192 :     pstate->total_tuples = 0;
    1409        1212 :     for (i = 0; i < hashtable->nbatch; ++i)
    1410             :     {
    1411        1020 :         ParallelHashJoinBatchAccessor *batch = &hashtable->batches[i];
    1412             : 
    1413        1020 :         batch->shared->size += batch->size;
    1414        1020 :         batch->shared->estimated_size += batch->estimated_size;
    1415        1020 :         batch->shared->ntuples += batch->ntuples;
    1416        1020 :         batch->shared->old_ntuples += batch->old_ntuples;
    1417        1020 :         batch->size = 0;
    1418        1020 :         batch->estimated_size = 0;
    1419        1020 :         batch->ntuples = 0;
    1420        1020 :         batch->old_ntuples = 0;
    1421        1020 :         pstate->total_tuples += batch->shared->ntuples;
    1422             :     }
    1423         192 :     LWLockRelease(&pstate->lock);
    1424         192 : }
    1425             : 
    1426             : /*
    1427             :  * ExecHashIncreaseNumBuckets
    1428             :  *      increase the original number of buckets in order to reduce
    1429             :  *      number of tuples per bucket
    1430             :  */
    1431             : static void
    1432         386 : ExecHashIncreaseNumBuckets(HashJoinTable hashtable)
    1433             : {
    1434             :     HashMemoryChunk chunk;
    1435             : 
    1436             :     /* do nothing if not an increase (it's called increase for a reason) */
    1437         386 :     if (hashtable->nbuckets >= hashtable->nbuckets_optimal)
    1438           0 :         return;
    1439             : 
    1440             : #ifdef HJDEBUG
    1441             :     printf("Hashjoin %p: increasing nbuckets %d => %d\n",
    1442             :            hashtable, hashtable->nbuckets, hashtable->nbuckets_optimal);
    1443             : #endif
    1444             : 
    1445         386 :     hashtable->nbuckets = hashtable->nbuckets_optimal;
    1446         386 :     hashtable->log2_nbuckets = hashtable->log2_nbuckets_optimal;
    1447             : 
    1448             :     Assert(hashtable->nbuckets > 1);
    1449             :     Assert(hashtable->nbuckets <= (INT_MAX / 2));
    1450             :     Assert(hashtable->nbuckets == (1 << hashtable->log2_nbuckets));
    1451             : 
    1452             :     /*
    1453             :      * Just reallocate the proper number of buckets - we don't need to walk
    1454             :      * through them - we can walk the dense-allocated chunks (just like in
    1455             :      * ExecHashIncreaseNumBatches, but without all the copying into new
    1456             :      * chunks)
    1457             :      */
    1458         386 :     hashtable->buckets.unshared =
    1459         386 :         (HashJoinTuple *) repalloc(hashtable->buckets.unshared,
    1460         386 :                                    hashtable->nbuckets * sizeof(HashJoinTuple));
    1461             : 
    1462         386 :     memset(hashtable->buckets.unshared, 0,
    1463         386 :            hashtable->nbuckets * sizeof(HashJoinTuple));
    1464             : 
    1465             :     /* scan through all tuples in all chunks to rebuild the hash table */
    1466        2024 :     for (chunk = hashtable->chunks; chunk != NULL; chunk = chunk->next.unshared)
    1467             :     {
    1468             :         /* process all tuples stored in this chunk */
    1469        1638 :         size_t      idx = 0;
    1470             : 
    1471     1231024 :         while (idx < chunk->used)
    1472             :         {
    1473     1227748 :             HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
    1474             :             int         bucketno;
    1475             :             int         batchno;
    1476             : 
    1477     1227748 :             ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
    1478             :                                       &bucketno, &batchno);
    1479             : 
    1480             :             /* add the tuple to the proper bucket */
    1481     1227748 :             hashTuple->next.unshared = hashtable->buckets.unshared[bucketno];
    1482     1227748 :             hashtable->buckets.unshared[bucketno] = hashTuple;
    1483             : 
    1484             :             /* advance index past the tuple */
    1485     1227748 :             idx += MAXALIGN(HJTUPLE_OVERHEAD +
    1486             :                             HJTUPLE_MINTUPLE(hashTuple)->t_len);
    1487             :         }
    1488             : 
    1489             :         /* allow this loop to be cancellable */
    1490        1638 :         CHECK_FOR_INTERRUPTS();
    1491             :     }
    1492             : }
    1493             : 
    1494             : static void
    1495          96 : ExecParallelHashIncreaseNumBuckets(HashJoinTable hashtable)
    1496             : {
    1497          96 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    1498             :     int         i;
    1499             :     HashMemoryChunk chunk;
    1500             :     dsa_pointer chunk_s;
    1501             : 
    1502             :     Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASHING_INNER);
    1503             : 
    1504             :     /*
    1505             :      * It's unlikely, but we need to be prepared for new participants to show
    1506             :      * up while we're in the middle of this operation so we need to switch on
    1507             :      * barrier phase here.
    1508             :      */
    1509          96 :     switch (PHJ_GROW_BUCKETS_PHASE(BarrierPhase(&pstate->grow_buckets_barrier)))
    1510             :     {
    1511             :         case PHJ_GROW_BUCKETS_ELECTING:
    1512             :             /* Elect one participant to prepare to increase nbuckets. */
    1513          96 :             if (BarrierArriveAndWait(&pstate->grow_buckets_barrier,
    1514             :                                      WAIT_EVENT_HASH_GROW_BUCKETS_ELECTING))
    1515             :             {
    1516             :                 size_t      size;
    1517             :                 dsa_pointer_atomic *buckets;
    1518             : 
    1519             :                 /* Double the size of the bucket array. */
    1520          72 :                 pstate->nbuckets *= 2;
    1521          72 :                 size = pstate->nbuckets * sizeof(dsa_pointer_atomic);
    1522          72 :                 hashtable->batches[0].shared->size += size / 2;
    1523          72 :                 dsa_free(hashtable->area, hashtable->batches[0].shared->buckets);
    1524         144 :                 hashtable->batches[0].shared->buckets =
    1525          72 :                     dsa_allocate(hashtable->area, size);
    1526          72 :                 buckets = (dsa_pointer_atomic *)
    1527          72 :                     dsa_get_address(hashtable->area,
    1528          72 :                                     hashtable->batches[0].shared->buckets);
    1529      622664 :                 for (i = 0; i < pstate->nbuckets; ++i)
    1530      622592 :                     dsa_pointer_atomic_init(&buckets[i], InvalidDsaPointer);
    1531             : 
    1532             :                 /* Put the chunk list onto the work queue. */
    1533          72 :                 pstate->chunk_work_queue = hashtable->batches[0].shared->chunks;
    1534             : 
    1535             :                 /* Clear the flag. */
    1536          72 :                 pstate->growth = PHJ_GROWTH_OK;
    1537             :             }
    1538             :             /* Fall through. */
    1539             : 
    1540             :         case PHJ_GROW_BUCKETS_ALLOCATING:
    1541             :             /* Wait for the above to complete. */
    1542          96 :             BarrierArriveAndWait(&pstate->grow_buckets_barrier,
    1543             :                                  WAIT_EVENT_HASH_GROW_BUCKETS_ALLOCATING);
    1544             :             /* Fall through. */
    1545             : 
    1546             :         case PHJ_GROW_BUCKETS_REINSERTING:
    1547             :             /* Reinsert all tuples into the hash table. */
    1548          96 :             ExecParallelHashEnsureBatchAccessors(hashtable);
    1549          96 :             ExecParallelHashTableSetCurrentBatch(hashtable, 0);
    1550         632 :             while ((chunk = ExecParallelHashPopChunkQueue(hashtable, &chunk_s)))
    1551             :             {
    1552         440 :                 size_t      idx = 0;
    1553             : 
    1554      360800 :                 while (idx < chunk->used)
    1555             :                 {
    1556      359920 :                     HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
    1557      359920 :                     dsa_pointer shared = chunk_s + HASH_CHUNK_HEADER_SIZE + idx;
    1558             :                     int         bucketno;
    1559             :                     int         batchno;
    1560             : 
    1561      359920 :                     ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
    1562             :                                               &bucketno, &batchno);
    1563             :                     Assert(batchno == 0);
    1564             : 
    1565             :                     /* add the tuple to the proper bucket */
    1566      359920 :                     ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
    1567             :                                               hashTuple, shared);
    1568             : 
    1569             :                     /* advance index past the tuple */
    1570      359920 :                     idx += MAXALIGN(HJTUPLE_OVERHEAD +
    1571             :                                     HJTUPLE_MINTUPLE(hashTuple)->t_len);
    1572             :                 }
    1573             : 
    1574             :                 /* allow this loop to be cancellable */
    1575         440 :                 CHECK_FOR_INTERRUPTS();
    1576             :             }
    1577          96 :             BarrierArriveAndWait(&pstate->grow_buckets_barrier,
    1578             :                                  WAIT_EVENT_HASH_GROW_BUCKETS_REINSERTING);
    1579             :     }
    1580          96 : }
    1581             : 
    1582             : /*
    1583             :  * ExecHashTableInsert
    1584             :  *      insert a tuple into the hash table depending on the hash value
    1585             :  *      it may just go to a temp file for later batches
    1586             :  *
    1587             :  * Note: the passed TupleTableSlot may contain a regular, minimal, or virtual
    1588             :  * tuple; the minimal case in particular is certain to happen while reloading
    1589             :  * tuples from batch files.  We could save some cycles in the regular-tuple
    1590             :  * case by not forcing the slot contents into minimal form; not clear if it's
    1591             :  * worth the messiness required.
    1592             :  */
    1593             : void
    1594     8893810 : ExecHashTableInsert(HashJoinTable hashtable,
    1595             :                     TupleTableSlot *slot,
    1596             :                     uint32 hashvalue)
    1597             : {
    1598             :     bool        shouldFree;
    1599     8893810 :     MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
    1600             :     int         bucketno;
    1601             :     int         batchno;
    1602             : 
    1603     8893810 :     ExecHashGetBucketAndBatch(hashtable, hashvalue,
    1604             :                               &bucketno, &batchno);
    1605             : 
    1606             :     /*
    1607             :      * decide whether to put the tuple in the hash table or a temp file
    1608             :      */
    1609     8893810 :     if (batchno == hashtable->curbatch)
    1610             :     {
    1611             :         /*
    1612             :          * put the tuple in hash table
    1613             :          */
    1614             :         HashJoinTuple hashTuple;
    1615             :         int         hashTupleSize;
    1616     7199346 :         double      ntuples = (hashtable->totalTuples - hashtable->skewTuples);
    1617             : 
    1618             :         /* Create the HashJoinTuple */
    1619     7199346 :         hashTupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
    1620     7199346 :         hashTuple = (HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
    1621             : 
    1622     7199346 :         hashTuple->hashvalue = hashvalue;
    1623     7199346 :         memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
    1624             : 
    1625             :         /*
    1626             :          * We always reset the tuple-matched flag on insertion.  This is okay
    1627             :          * even when reloading a tuple from a batch file, since the tuple
    1628             :          * could not possibly have been matched to an outer tuple before it
    1629             :          * went into the batch file.
    1630             :          */
    1631     7199346 :         HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
    1632             : 
    1633             :         /* Push it onto the front of the bucket's list */
    1634     7199346 :         hashTuple->next.unshared = hashtable->buckets.unshared[bucketno];
    1635     7199346 :         hashtable->buckets.unshared[bucketno] = hashTuple;
    1636             : 
    1637             :         /*
    1638             :          * Increase the (optimal) number of buckets if we just exceeded the
    1639             :          * NTUP_PER_BUCKET threshold, but only when there's still a single
    1640             :          * batch.
    1641             :          */
    1642    12482460 :         if (hashtable->nbatch == 1 &&
    1643     5283114 :             ntuples > (hashtable->nbuckets_optimal * NTUP_PER_BUCKET))
    1644             :         {
    1645             :             /* Guard against integer overflow and alloc size overflow */
    1646        1588 :             if (hashtable->nbuckets_optimal <= INT_MAX / 2 &&
    1647         794 :                 hashtable->nbuckets_optimal * 2 <= MaxAllocSize / sizeof(HashJoinTuple))
    1648             :             {
    1649         794 :                 hashtable->nbuckets_optimal *= 2;
    1650         794 :                 hashtable->log2_nbuckets_optimal += 1;
    1651             :             }
    1652             :         }
    1653             : 
    1654             :         /* Account for space used, and back off if we've used too much */
    1655     7199346 :         hashtable->spaceUsed += hashTupleSize;
    1656     7199346 :         if (hashtable->spaceUsed > hashtable->spacePeak)
    1657     5721074 :             hashtable->spacePeak = hashtable->spaceUsed;
    1658    21598038 :         if (hashtable->spaceUsed +
    1659     7199346 :             hashtable->nbuckets_optimal * sizeof(HashJoinTuple)
    1660     7199346 :             > hashtable->spaceAllowed)
    1661      345496 :             ExecHashIncreaseNumBatches(hashtable);
    1662             :     }
    1663             :     else
    1664             :     {
    1665             :         /*
    1666             :          * put the tuple into a temp file for later batches
    1667             :          */
    1668             :         Assert(batchno > hashtable->curbatch);
    1669     1694464 :         ExecHashJoinSaveTuple(tuple,
    1670             :                               hashvalue,
    1671     1694464 :                               &hashtable->innerBatchFile[batchno]);
    1672             :     }
    1673             : 
    1674     8893810 :     if (shouldFree)
    1675     6591276 :         heap_free_minimal_tuple(tuple);
    1676     8893810 : }
    1677             : 
    1678             : /*
    1679             :  * ExecParallelHashTableInsert
    1680             :  *      insert a tuple into a shared hash table or shared batch tuplestore
    1681             :  */
    1682             : void
    1683     1200052 : ExecParallelHashTableInsert(HashJoinTable hashtable,
    1684             :                             TupleTableSlot *slot,
    1685             :                             uint32 hashvalue)
    1686             : {
    1687             :     bool        shouldFree;
    1688     1200052 :     MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
    1689             :     dsa_pointer shared;
    1690             :     int         bucketno;
    1691             :     int         batchno;
    1692             : 
    1693             : retry:
    1694     1200292 :     ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
    1695             : 
    1696     1200292 :     if (batchno == 0)
    1697             :     {
    1698             :         HashJoinTuple hashTuple;
    1699             : 
    1700             :         /* Try to load it into memory. */
    1701             :         Assert(BarrierPhase(&hashtable->parallel_state->build_barrier) ==
    1702             :                PHJ_BUILD_HASHING_INNER);
    1703      661598 :         hashTuple = ExecParallelHashTupleAlloc(hashtable,
    1704      661598 :                                                HJTUPLE_OVERHEAD + tuple->t_len,
    1705             :                                                &shared);
    1706      661598 :         if (hashTuple == NULL)
    1707         216 :             goto retry;
    1708             : 
    1709             :         /* Store the hash value in the HashJoinTuple header. */
    1710      661382 :         hashTuple->hashvalue = hashvalue;
    1711      661382 :         memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
    1712             : 
    1713             :         /* Push it onto the front of the bucket's list */
    1714      661382 :         ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
    1715             :                                   hashTuple, shared);
    1716             :     }
    1717             :     else
    1718             :     {
    1719      538694 :         size_t      tuple_size = MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
    1720             : 
    1721             :         Assert(batchno > 0);
    1722             : 
    1723             :         /* Try to preallocate space in the batch if necessary. */
    1724      538694 :         if (hashtable->batches[batchno].preallocated < tuple_size)
    1725             :         {
    1726        1060 :             if (!ExecParallelHashTuplePrealloc(hashtable, batchno, tuple_size))
    1727          24 :                 goto retry;
    1728             :         }
    1729             : 
    1730             :         Assert(hashtable->batches[batchno].preallocated >= tuple_size);
    1731      538670 :         hashtable->batches[batchno].preallocated -= tuple_size;
    1732      538670 :         sts_puttuple(hashtable->batches[batchno].inner_tuples, &hashvalue,
    1733             :                      tuple);
    1734             :     }
    1735     1200052 :     ++hashtable->batches[batchno].ntuples;
    1736             : 
    1737     1200052 :     if (shouldFree)
    1738     1200052 :         heap_free_minimal_tuple(tuple);
    1739     1200052 : }
    1740             : 
    1741             : /*
    1742             :  * Insert a tuple into the current hash table.  Unlike
    1743             :  * ExecParallelHashTableInsert, this version is not prepared to send the tuple
    1744             :  * to other batches or to run out of memory, and should only be called with
    1745             :  * tuples that belong in the current batch once growth has been disabled.
    1746             :  */
    1747             : void
    1748      654364 : ExecParallelHashTableInsertCurrentBatch(HashJoinTable hashtable,
    1749             :                                         TupleTableSlot *slot,
    1750             :                                         uint32 hashvalue)
    1751             : {
    1752             :     bool        shouldFree;
    1753      654364 :     MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
    1754             :     HashJoinTuple hashTuple;
    1755             :     dsa_pointer shared;
    1756             :     int         batchno;
    1757             :     int         bucketno;
    1758             : 
    1759      654364 :     ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
    1760             :     Assert(batchno == hashtable->curbatch);
    1761      654364 :     hashTuple = ExecParallelHashTupleAlloc(hashtable,
    1762      654364 :                                            HJTUPLE_OVERHEAD + tuple->t_len,
    1763             :                                            &shared);
    1764      654364 :     hashTuple->hashvalue = hashvalue;
    1765      654364 :     memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
    1766      654364 :     HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
    1767      654364 :     ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
    1768             :                               hashTuple, shared);
    1769             : 
    1770      654364 :     if (shouldFree)
    1771           0 :         heap_free_minimal_tuple(tuple);
    1772      654364 : }
    1773             : 
    1774             : /*
    1775             :  * ExecHashGetHashValue
    1776             :  *      Compute the hash value for a tuple
    1777             :  *
    1778             :  * The tuple to be tested must be in econtext->ecxt_outertuple (thus Vars in
    1779             :  * the hashkeys expressions need to have OUTER_VAR as varno). If outer_tuple
    1780             :  * is false (meaning it's the HashJoin's inner node, Hash), econtext,
    1781             :  * hashkeys, and slot need to be from Hash, with hashkeys/slot referencing and
    1782             :  * being suitable for tuples from the node below the Hash. Conversely, if
    1783             :  * outer_tuple is true, econtext is from HashJoin, and hashkeys/slot need to
    1784             :  * be appropriate for tuples from HashJoin's outer node.
    1785             :  *
    1786             :  * A true result means the tuple's hash value has been successfully computed
    1787             :  * and stored at *hashvalue.  A false result means the tuple cannot match
    1788             :  * because it contains a null attribute, and hence it should be discarded
    1789             :  * immediately.  (If keep_nulls is true then false is never returned.)
    1790             :  */
    1791             : bool
    1792    17934592 : ExecHashGetHashValue(HashJoinTable hashtable,
    1793             :                      ExprContext *econtext,
    1794             :                      List *hashkeys,
    1795             :                      bool outer_tuple,
    1796             :                      bool keep_nulls,
    1797             :                      uint32 *hashvalue)
    1798             : {
    1799    17934592 :     uint32      hashkey = 0;
    1800             :     FmgrInfo   *hashfunctions;
    1801             :     ListCell   *hk;
    1802    17934592 :     int         i = 0;
    1803             :     MemoryContext oldContext;
    1804             : 
    1805             :     /*
    1806             :      * We reset the eval context each time to reclaim any memory leaked in the
    1807             :      * hashkey expressions.
    1808             :      */
    1809    17934592 :     ResetExprContext(econtext);
    1810             : 
    1811    17934592 :     oldContext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory);
    1812             : 
    1813    17934592 :     if (outer_tuple)
    1814     9691586 :         hashfunctions = hashtable->outer_hashfunctions;
    1815             :     else
    1816     8243006 :         hashfunctions = hashtable->inner_hashfunctions;
    1817             : 
    1818    36902268 :     foreach(hk, hashkeys)
    1819             :     {
    1820    18968144 :         ExprState  *keyexpr = (ExprState *) lfirst(hk);
    1821             :         Datum       keyval;
    1822             :         bool        isNull;
    1823             : 
    1824             :         /* rotate hashkey left 1 bit at each step */
    1825    18968144 :         hashkey = (hashkey << 1) | ((hashkey & 0x80000000) ? 1 : 0);
    1826             : 
    1827             :         /*
    1828             :          * Get the join attribute value of the tuple
    1829             :          */
    1830    18968144 :         keyval = ExecEvalExpr(keyexpr, econtext, &isNull);
    1831             : 
    1832             :         /*
    1833             :          * If the attribute is NULL, and the join operator is strict, then
    1834             :          * this tuple cannot pass the join qual so we can reject it
    1835             :          * immediately (unless we're scanning the outside of an outer join, in
    1836             :          * which case we must not reject it).  Otherwise we act like the
    1837             :          * hashcode of NULL is zero (this will support operators that act like
    1838             :          * IS NOT DISTINCT, though not any more-random behavior).  We treat
    1839             :          * the hash support function as strict even if the operator is not.
    1840             :          *
    1841             :          * Note: currently, all hashjoinable operators must be strict since
    1842             :          * the hash index AM assumes that.  However, it takes so little extra
    1843             :          * code here to allow non-strict that we may as well do it.
    1844             :          */
    1845    18968144 :         if (isNull)
    1846             :         {
    1847         610 :             if (hashtable->hashStrict[i] && !keep_nulls)
    1848             :             {
    1849         468 :                 MemoryContextSwitchTo(oldContext);
    1850         468 :                 return false;   /* cannot match */
    1851             :             }
    1852             :             /* else, leave hashkey unmodified, equivalent to hashcode 0 */
    1853             :         }
    1854             :         else
    1855             :         {
    1856             :             /* Compute the hash function */
    1857             :             uint32      hkey;
    1858             : 
    1859    18967534 :             hkey = DatumGetUInt32(FunctionCall1Coll(&hashfunctions[i], hashtable->collations[i], keyval));
    1860    18967534 :             hashkey ^= hkey;
    1861             :         }
    1862             : 
    1863    18967676 :         i++;
    1864             :     }
    1865             : 
    1866    17934124 :     MemoryContextSwitchTo(oldContext);
    1867             : 
    1868    17934124 :     *hashvalue = hashkey;
    1869    17934124 :     return true;
    1870             : }
    1871             : 
    1872             : /*
    1873             :  * ExecHashGetBucketAndBatch
    1874             :  *      Determine the bucket number and batch number for a hash value
    1875             :  *
    1876             :  * Note: on-the-fly increases of nbatch must not change the bucket number
    1877             :  * for a given hash code (since we don't move tuples to different hash
    1878             :  * chains), and must only cause the batch number to remain the same or
    1879             :  * increase.  Our algorithm is
    1880             :  *      bucketno = hashvalue MOD nbuckets
    1881             :  *      batchno = (hashvalue DIV nbuckets) MOD nbatch
    1882             :  * where nbuckets and nbatch are both expected to be powers of 2, so we can
    1883             :  * do the computations by shifting and masking.  (This assumes that all hash
    1884             :  * functions are good about randomizing all their output bits, else we are
    1885             :  * likely to have very skewed bucket or batch occupancy.)
    1886             :  *
    1887             :  * nbuckets and log2_nbuckets may change while nbatch == 1 because of dynamic
    1888             :  * bucket count growth.  Once we start batching, the value is fixed and does
    1889             :  * not change over the course of the join (making it possible to compute batch
    1890             :  * number the way we do here).
    1891             :  *
    1892             :  * nbatch is always a power of 2; we increase it only by doubling it.  This
    1893             :  * effectively adds one more bit to the top of the batchno.
    1894             :  */
    1895             : void
    1896    24284420 : ExecHashGetBucketAndBatch(HashJoinTable hashtable,
    1897             :                           uint32 hashvalue,
    1898             :                           int *bucketno,
    1899             :                           int *batchno)
    1900             : {
    1901    24284420 :     uint32      nbuckets = (uint32) hashtable->nbuckets;
    1902    24284420 :     uint32      nbatch = (uint32) hashtable->nbatch;
    1903             : 
    1904    24284420 :     if (nbatch > 1)
    1905             :     {
    1906             :         /* we can do MOD by masking, DIV by shifting */
    1907     9091024 :         *bucketno = hashvalue & (nbuckets - 1);
    1908     9091024 :         *batchno = (hashvalue >> hashtable->log2_nbuckets) & (nbatch - 1);
    1909             :     }
    1910             :     else
    1911             :     {
    1912    15193396 :         *bucketno = hashvalue & (nbuckets - 1);
    1913    15193396 :         *batchno = 0;
    1914             :     }
    1915    24284420 : }
    1916             : 
    1917             : /*
    1918             :  * ExecScanHashBucket
    1919             :  *      scan a hash bucket for matches to the current outer tuple
    1920             :  *
    1921             :  * The current outer tuple must be stored in econtext->ecxt_outertuple.
    1922             :  *
    1923             :  * On success, the inner tuple is stored into hjstate->hj_CurTuple and
    1924             :  * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
    1925             :  * for the latter.
    1926             :  */
    1927             : bool
    1928    12204636 : ExecScanHashBucket(HashJoinState *hjstate,
    1929             :                    ExprContext *econtext)
    1930             : {
    1931    12204636 :     ExprState  *hjclauses = hjstate->hashclauses;
    1932    12204636 :     HashJoinTable hashtable = hjstate->hj_HashTable;
    1933    12204636 :     HashJoinTuple hashTuple = hjstate->hj_CurTuple;
    1934    12204636 :     uint32      hashvalue = hjstate->hj_CurHashValue;
    1935             : 
    1936             :     /*
    1937             :      * hj_CurTuple is the address of the tuple last returned from the current
    1938             :      * bucket, or NULL if it's time to start scanning a new bucket.
    1939             :      *
    1940             :      * If the tuple hashed to a skew bucket then scan the skew bucket
    1941             :      * otherwise scan the standard hashtable bucket.
    1942             :      */
    1943    12204636 :     if (hashTuple != NULL)
    1944     3713530 :         hashTuple = hashTuple->next.unshared;
    1945     8491106 :     else if (hjstate->hj_CurSkewBucketNo != INVALID_SKEW_BUCKET_NO)
    1946         800 :         hashTuple = hashtable->skewBucket[hjstate->hj_CurSkewBucketNo]->tuples;
    1947             :     else
    1948     8490306 :         hashTuple = hashtable->buckets.unshared[hjstate->hj_CurBucketNo];
    1949             : 
    1950    26823062 :     while (hashTuple != NULL)
    1951             :     {
    1952     7932336 :         if (hashTuple->hashvalue == hashvalue)
    1953             :         {
    1954             :             TupleTableSlot *inntuple;
    1955             : 
    1956             :             /* insert hashtable's tuple into exec slot so ExecQual sees it */
    1957     5518546 :             inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
    1958             :                                              hjstate->hj_HashTupleSlot,
    1959             :                                              false);    /* do not pfree */
    1960     5518546 :             econtext->ecxt_innertuple = inntuple;
    1961             : 
    1962     5518546 :             if (ExecQualAndReset(hjclauses, econtext))
    1963             :             {
    1964     5518546 :                 hjstate->hj_CurTuple = hashTuple;
    1965     5518546 :                 return true;
    1966             :             }
    1967             :         }
    1968             : 
    1969     2413790 :         hashTuple = hashTuple->next.unshared;
    1970             :     }
    1971             : 
    1972             :     /*
    1973             :      * no match
    1974             :      */
    1975     6686090 :     return false;
    1976             : }
    1977             : 
    1978             : /*
    1979             :  * ExecParallelScanHashBucket
    1980             :  *      scan a hash bucket for matches to the current outer tuple
    1981             :  *
    1982             :  * The current outer tuple must be stored in econtext->ecxt_outertuple.
    1983             :  *
    1984             :  * On success, the inner tuple is stored into hjstate->hj_CurTuple and
    1985             :  * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
    1986             :  * for the latter.
    1987             :  */
    1988             : bool
    1989     2400032 : ExecParallelScanHashBucket(HashJoinState *hjstate,
    1990             :                            ExprContext *econtext)
    1991             : {
    1992     2400032 :     ExprState  *hjclauses = hjstate->hashclauses;
    1993     2400032 :     HashJoinTable hashtable = hjstate->hj_HashTable;
    1994     2400032 :     HashJoinTuple hashTuple = hjstate->hj_CurTuple;
    1995     2400032 :     uint32      hashvalue = hjstate->hj_CurHashValue;
    1996             : 
    1997             :     /*
    1998             :      * hj_CurTuple is the address of the tuple last returned from the current
    1999             :      * bucket, or NULL if it's time to start scanning a new bucket.
    2000             :      */
    2001     2400032 :     if (hashTuple != NULL)
    2002     1200016 :         hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
    2003             :     else
    2004     1200016 :         hashTuple = ExecParallelHashFirstTuple(hashtable,
    2005             :                                                hjstate->hj_CurBucketNo);
    2006             : 
    2007     5580472 :     while (hashTuple != NULL)
    2008             :     {
    2009     1980424 :         if (hashTuple->hashvalue == hashvalue)
    2010             :         {
    2011             :             TupleTableSlot *inntuple;
    2012             : 
    2013             :             /* insert hashtable's tuple into exec slot so ExecQual sees it */
    2014     1200016 :             inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
    2015             :                                              hjstate->hj_HashTupleSlot,
    2016             :                                              false);    /* do not pfree */
    2017     1200016 :             econtext->ecxt_innertuple = inntuple;
    2018             : 
    2019     1200016 :             if (ExecQualAndReset(hjclauses, econtext))
    2020             :             {
    2021     1200016 :                 hjstate->hj_CurTuple = hashTuple;
    2022     1200016 :                 return true;
    2023             :             }
    2024             :         }
    2025             : 
    2026      780408 :         hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
    2027             :     }
    2028             : 
    2029             :     /*
    2030             :      * no match
    2031             :      */
    2032     1200016 :     return false;
    2033             : }
    2034             : 
    2035             : /*
    2036             :  * ExecPrepHashTableForUnmatched
    2037             :  *      set up for a series of ExecScanHashTableForUnmatched calls
    2038             :  */
    2039             : void
    2040        4364 : ExecPrepHashTableForUnmatched(HashJoinState *hjstate)
    2041             : {
    2042             :     /*----------
    2043             :      * During this scan we use the HashJoinState fields as follows:
    2044             :      *
    2045             :      * hj_CurBucketNo: next regular bucket to scan
    2046             :      * hj_CurSkewBucketNo: next skew bucket (an index into skewBucketNums)
    2047             :      * hj_CurTuple: last tuple returned, or NULL to start next bucket
    2048             :      *----------
    2049             :      */
    2050        4364 :     hjstate->hj_CurBucketNo = 0;
    2051        4364 :     hjstate->hj_CurSkewBucketNo = 0;
    2052        4364 :     hjstate->hj_CurTuple = NULL;
    2053        4364 : }
    2054             : 
    2055             : /*
    2056             :  * ExecScanHashTableForUnmatched
    2057             :  *      scan the hash table for unmatched inner tuples
    2058             :  *
    2059             :  * On success, the inner tuple is stored into hjstate->hj_CurTuple and
    2060             :  * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
    2061             :  * for the latter.
    2062             :  */
    2063             : bool
    2064      207888 : ExecScanHashTableForUnmatched(HashJoinState *hjstate, ExprContext *econtext)
    2065             : {
    2066      207888 :     HashJoinTable hashtable = hjstate->hj_HashTable;
    2067      207888 :     HashJoinTuple hashTuple = hjstate->hj_CurTuple;
    2068             : 
    2069             :     for (;;)
    2070             :     {
    2071             :         /*
    2072             :          * hj_CurTuple is the address of the tuple last returned from the
    2073             :          * current bucket, or NULL if it's time to start scanning a new
    2074             :          * bucket.
    2075             :          */
    2076    10382352 :         if (hashTuple != NULL)
    2077      203524 :             hashTuple = hashTuple->next.unshared;
    2078     5091596 :         else if (hjstate->hj_CurBucketNo < hashtable->nbuckets)
    2079             :         {
    2080     5087236 :             hashTuple = hashtable->buckets.unshared[hjstate->hj_CurBucketNo];
    2081     5087236 :             hjstate->hj_CurBucketNo++;
    2082             :         }
    2083        4360 :         else if (hjstate->hj_CurSkewBucketNo < hashtable->nSkewBuckets)
    2084             :         {
    2085           0 :             int         j = hashtable->skewBucketNums[hjstate->hj_CurSkewBucketNo];
    2086             : 
    2087           0 :             hashTuple = hashtable->skewBucket[j]->tuples;
    2088           0 :             hjstate->hj_CurSkewBucketNo++;
    2089             :         }
    2090             :         else
    2091        4360 :             break;              /* finished all buckets */
    2092             : 
    2093    10806580 :         while (hashTuple != NULL)
    2094             :         {
    2095      428588 :             if (!HeapTupleHeaderHasMatch(HJTUPLE_MINTUPLE(hashTuple)))
    2096             :             {
    2097             :                 TupleTableSlot *inntuple;
    2098             : 
    2099             :                 /* insert hashtable's tuple into exec slot */
    2100      203528 :                 inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
    2101             :                                                  hjstate->hj_HashTupleSlot,
    2102             :                                                  false);    /* do not pfree */
    2103      203528 :                 econtext->ecxt_innertuple = inntuple;
    2104             : 
    2105             :                 /*
    2106             :                  * Reset temp memory each time; although this function doesn't
    2107             :                  * do any qual eval, the caller will, so let's keep it
    2108             :                  * parallel to ExecScanHashBucket.
    2109             :                  */
    2110      203528 :                 ResetExprContext(econtext);
    2111             : 
    2112      203528 :                 hjstate->hj_CurTuple = hashTuple;
    2113      203528 :                 return true;
    2114             :             }
    2115             : 
    2116      225060 :             hashTuple = hashTuple->next.unshared;
    2117             :         }
    2118             : 
    2119             :         /* allow this loop to be cancellable */
    2120     5087232 :         CHECK_FOR_INTERRUPTS();
    2121             :     }
    2122             : 
    2123             :     /*
    2124             :      * no more unmatched tuples
    2125             :      */
    2126        4360 :     return false;
    2127             : }
    2128             : 
    2129             : /*
    2130             :  * ExecHashTableReset
    2131             :  *
    2132             :  *      reset hash table header for new batch
    2133             :  */
    2134             : void
    2135         912 : ExecHashTableReset(HashJoinTable hashtable)
    2136             : {
    2137             :     MemoryContext oldcxt;
    2138         912 :     int         nbuckets = hashtable->nbuckets;
    2139             : 
    2140             :     /*
    2141             :      * Release all the hash buckets and tuples acquired in the prior pass, and
    2142             :      * reinitialize the context for a new pass.
    2143             :      */
    2144         912 :     MemoryContextReset(hashtable->batchCxt);
    2145         912 :     oldcxt = MemoryContextSwitchTo(hashtable->batchCxt);
    2146             : 
    2147             :     /* Reallocate and reinitialize the hash bucket headers. */
    2148         912 :     hashtable->buckets.unshared = (HashJoinTuple *)
    2149         912 :         palloc0(nbuckets * sizeof(HashJoinTuple));
    2150             : 
    2151         912 :     hashtable->spaceUsed = 0;
    2152             : 
    2153         912 :     MemoryContextSwitchTo(oldcxt);
    2154             : 
    2155             :     /* Forget the chunks (the memory was freed by the context reset above). */
    2156         912 :     hashtable->chunks = NULL;
    2157         912 : }
    2158             : 
    2159             : /*
    2160             :  * ExecHashTableResetMatchFlags
    2161             :  *      Clear all the HeapTupleHeaderHasMatch flags in the table
    2162             :  */
    2163             : void
    2164          20 : ExecHashTableResetMatchFlags(HashJoinTable hashtable)
    2165             : {
    2166             :     HashJoinTuple tuple;
    2167             :     int         i;
    2168             : 
    2169             :     /* Reset all flags in the main table ... */
    2170       20500 :     for (i = 0; i < hashtable->nbuckets; i++)
    2171             :     {
    2172       41048 :         for (tuple = hashtable->buckets.unshared[i]; tuple != NULL;
    2173          88 :              tuple = tuple->next.unshared)
    2174          88 :             HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(tuple));
    2175             :     }
    2176             : 
    2177             :     /* ... and the same for the skew buckets, if any */
    2178          20 :     for (i = 0; i < hashtable->nSkewBuckets; i++)
    2179             :     {
    2180           0 :         int         j = hashtable->skewBucketNums[i];
    2181           0 :         HashSkewBucket *skewBucket = hashtable->skewBucket[j];
    2182             : 
    2183           0 :         for (tuple = skewBucket->tuples; tuple != NULL; tuple = tuple->next.unshared)
    2184           0 :             HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(tuple));
    2185             :     }
    2186          20 : }
    2187             : 
    2188             : 
    2189             : void
    2190      244270 : ExecReScanHash(HashState *node)
    2191             : {
    2192             :     /*
    2193             :      * if chgParam of subnode is not null then plan will be re-scanned by
    2194             :      * first ExecProcNode.
    2195             :      */
    2196      244270 :     if (node->ps.lefttree->chgParam == NULL)
    2197           0 :         ExecReScan(node->ps.lefttree);
    2198      244270 : }
    2199             : 
    2200             : 
    2201             : /*
    2202             :  * ExecHashBuildSkewHash
    2203             :  *
    2204             :  *      Set up for skew optimization if we can identify the most common values
    2205             :  *      (MCVs) of the outer relation's join key.  We make a skew hash bucket
    2206             :  *      for the hash value of each MCV, up to the number of slots allowed
    2207             :  *      based on available memory.
    2208             :  */
    2209             : static void
    2210          76 : ExecHashBuildSkewHash(HashJoinTable hashtable, Hash *node, int mcvsToUse)
    2211             : {
    2212             :     HeapTupleData *statsTuple;
    2213             :     AttStatsSlot sslot;
    2214             : 
    2215             :     /* Do nothing if planner didn't identify the outer relation's join key */
    2216          76 :     if (!OidIsValid(node->skewTable))
    2217          48 :         return;
    2218             :     /* Also, do nothing if we don't have room for at least one skew bucket */
    2219          76 :     if (mcvsToUse <= 0)
    2220           0 :         return;
    2221             : 
    2222             :     /*
    2223             :      * Try to find the MCV statistics for the outer relation's join key.
    2224             :      */
    2225         228 :     statsTuple = SearchSysCache3(STATRELATTINH,
    2226          76 :                                  ObjectIdGetDatum(node->skewTable),
    2227          76 :                                  Int16GetDatum(node->skewColumn),
    2228          76 :                                  BoolGetDatum(node->skewInherit));
    2229          76 :     if (!HeapTupleIsValid(statsTuple))
    2230          48 :         return;
    2231             : 
    2232          28 :     if (get_attstatsslot(&sslot, statsTuple,
    2233             :                          STATISTIC_KIND_MCV, InvalidOid,
    2234             :                          ATTSTATSSLOT_VALUES | ATTSTATSSLOT_NUMBERS))
    2235             :     {
    2236             :         double      frac;
    2237             :         int         nbuckets;
    2238             :         FmgrInfo   *hashfunctions;
    2239             :         int         i;
    2240             : 
    2241           4 :         if (mcvsToUse > sslot.nvalues)
    2242           0 :             mcvsToUse = sslot.nvalues;
    2243             : 
    2244             :         /*
    2245             :          * Calculate the expected fraction of outer relation that will
    2246             :          * participate in the skew optimization.  If this isn't at least
    2247             :          * SKEW_MIN_OUTER_FRACTION, don't use skew optimization.
    2248             :          */
    2249           4 :         frac = 0;
    2250          44 :         for (i = 0; i < mcvsToUse; i++)
    2251          40 :             frac += sslot.numbers[i];
    2252           4 :         if (frac < SKEW_MIN_OUTER_FRACTION)
    2253             :         {
    2254           0 :             free_attstatsslot(&sslot);
    2255           0 :             ReleaseSysCache(statsTuple);
    2256           0 :             return;
    2257             :         }
    2258             : 
    2259             :         /*
    2260             :          * Okay, set up the skew hashtable.
    2261             :          *
    2262             :          * skewBucket[] is an open addressing hashtable with a power of 2 size
    2263             :          * that is greater than the number of MCV values.  (This ensures there
    2264             :          * will be at least one null entry, so searches will always
    2265             :          * terminate.)
    2266             :          *
    2267             :          * Note: this code could fail if mcvsToUse exceeds INT_MAX/8 or
    2268             :          * MaxAllocSize/sizeof(void *)/8, but that is not currently possible
    2269             :          * since we limit pg_statistic entries to much less than that.
    2270             :          */
    2271           4 :         nbuckets = 2;
    2272          20 :         while (nbuckets <= mcvsToUse)
    2273          12 :             nbuckets <<= 1;
    2274             :         /* use two more bits just to help avoid collisions */
    2275           4 :         nbuckets <<= 2;
    2276             : 
    2277           4 :         hashtable->skewEnabled = true;
    2278           4 :         hashtable->skewBucketLen = nbuckets;
    2279             : 
    2280             :         /*
    2281             :          * We allocate the bucket memory in the hashtable's batch context. It
    2282             :          * is only needed during the first batch, and this ensures it will be
    2283             :          * automatically removed once the first batch is done.
    2284             :          */
    2285           4 :         hashtable->skewBucket = (HashSkewBucket **)
    2286           4 :             MemoryContextAllocZero(hashtable->batchCxt,
    2287             :                                    nbuckets * sizeof(HashSkewBucket *));
    2288           4 :         hashtable->skewBucketNums = (int *)
    2289           4 :             MemoryContextAllocZero(hashtable->batchCxt,
    2290             :                                    mcvsToUse * sizeof(int));
    2291             : 
    2292           8 :         hashtable->spaceUsed += nbuckets * sizeof(HashSkewBucket *)
    2293           4 :             + mcvsToUse * sizeof(int);
    2294           8 :         hashtable->spaceUsedSkew += nbuckets * sizeof(HashSkewBucket *)
    2295           4 :             + mcvsToUse * sizeof(int);
    2296           4 :         if (hashtable->spaceUsed > hashtable->spacePeak)
    2297           4 :             hashtable->spacePeak = hashtable->spaceUsed;
    2298             : 
    2299             :         /*
    2300             :          * Create a skew bucket for each MCV hash value.
    2301             :          *
    2302             :          * Note: it is very important that we create the buckets in order of
    2303             :          * decreasing MCV frequency.  If we have to remove some buckets, they
    2304             :          * must be removed in reverse order of creation (see notes in
    2305             :          * ExecHashRemoveNextSkewBucket) and we want the least common MCVs to
    2306             :          * be removed first.
    2307             :          */
    2308           4 :         hashfunctions = hashtable->outer_hashfunctions;
    2309             : 
    2310          44 :         for (i = 0; i < mcvsToUse; i++)
    2311             :         {
    2312             :             uint32      hashvalue;
    2313             :             int         bucket;
    2314             : 
    2315          40 :             hashvalue = DatumGetUInt32(FunctionCall1Coll(&hashfunctions[0],
    2316             :                                                          hashtable->collations[0],
    2317             :                                                          sslot.values[i]));
    2318             : 
    2319             :             /*
    2320             :              * While we have not hit a hole in the hashtable and have not hit
    2321             :              * the desired bucket, we have collided with some previous hash
    2322             :              * value, so try the next bucket location.  NB: this code must
    2323             :              * match ExecHashGetSkewBucket.
    2324             :              */
    2325          40 :             bucket = hashvalue & (nbuckets - 1);
    2326          80 :             while (hashtable->skewBucket[bucket] != NULL &&
    2327           0 :                    hashtable->skewBucket[bucket]->hashvalue != hashvalue)
    2328           0 :                 bucket = (bucket + 1) & (nbuckets - 1);
    2329             : 
    2330             :             /*
    2331             :              * If we found an existing bucket with the same hashvalue, leave
    2332             :              * it alone.  It's okay for two MCVs to share a hashvalue.
    2333             :              */
    2334          40 :             if (hashtable->skewBucket[bucket] != NULL)
    2335           0 :                 continue;
    2336             : 
    2337             :             /* Okay, create a new skew bucket for this hashvalue. */
    2338          80 :             hashtable->skewBucket[bucket] = (HashSkewBucket *)
    2339          40 :                 MemoryContextAlloc(hashtable->batchCxt,
    2340             :                                    sizeof(HashSkewBucket));
    2341          40 :             hashtable->skewBucket[bucket]->hashvalue = hashvalue;
    2342          40 :             hashtable->skewBucket[bucket]->tuples = NULL;
    2343          40 :             hashtable->skewBucketNums[hashtable->nSkewBuckets] = bucket;
    2344          40 :             hashtable->nSkewBuckets++;
    2345          40 :             hashtable->spaceUsed += SKEW_BUCKET_OVERHEAD;
    2346          40 :             hashtable->spaceUsedSkew += SKEW_BUCKET_OVERHEAD;
    2347          40 :             if (hashtable->spaceUsed > hashtable->spacePeak)
    2348          40 :                 hashtable->spacePeak = hashtable->spaceUsed;
    2349             :         }
    2350             : 
    2351           4 :         free_attstatsslot(&sslot);
    2352             :     }
    2353             : 
    2354          28 :     ReleaseSysCache(statsTuple);
    2355             : }
    2356             : 
    2357             : /*
    2358             :  * ExecHashGetSkewBucket
    2359             :  *
    2360             :  *      Returns the index of the skew bucket for this hashvalue,
    2361             :  *      or INVALID_SKEW_BUCKET_NO if the hashvalue is not
    2362             :  *      associated with any active skew bucket.
    2363             :  */
    2364             : int
    2365    17720200 : ExecHashGetSkewBucket(HashJoinTable hashtable, uint32 hashvalue)
    2366             : {
    2367             :     int         bucket;
    2368             : 
    2369             :     /*
    2370             :      * Always return INVALID_SKEW_BUCKET_NO if not doing skew optimization (in
    2371             :      * particular, this happens after the initial batch is done).
    2372             :      */
    2373    17720200 :     if (!hashtable->skewEnabled)
    2374    17640200 :         return INVALID_SKEW_BUCKET_NO;
    2375             : 
    2376             :     /*
    2377             :      * Since skewBucketLen is a power of 2, we can do a modulo by ANDing.
    2378             :      */
    2379       80000 :     bucket = hashvalue & (hashtable->skewBucketLen - 1);
    2380             : 
    2381             :     /*
    2382             :      * While we have not hit a hole in the hashtable and have not hit the
    2383             :      * desired bucket, we have collided with some other hash value, so try the
    2384             :      * next bucket location.
    2385             :      */
    2386      169940 :     while (hashtable->skewBucket[bucket] != NULL &&
    2387        5456 :            hashtable->skewBucket[bucket]->hashvalue != hashvalue)
    2388        4484 :         bucket = (bucket + 1) & (hashtable->skewBucketLen - 1);
    2389             : 
    2390             :     /*
    2391             :      * Found the desired bucket?
    2392             :      */
    2393       80000 :     if (hashtable->skewBucket[bucket] != NULL)
    2394         972 :         return bucket;
    2395             : 
    2396             :     /*
    2397             :      * There must not be any hashtable entry for this hash value.
    2398             :      */
    2399       79028 :     return INVALID_SKEW_BUCKET_NO;
    2400             : }
    2401             : 
    2402             : /*
    2403             :  * ExecHashSkewTableInsert
    2404             :  *
    2405             :  *      Insert a tuple into the skew hashtable.
    2406             :  *
    2407             :  * This should generally match up with the current-batch case in
    2408             :  * ExecHashTableInsert.
    2409             :  */
    2410             : static void
    2411         172 : ExecHashSkewTableInsert(HashJoinTable hashtable,
    2412             :                         TupleTableSlot *slot,
    2413             :                         uint32 hashvalue,
    2414             :                         int bucketNumber)
    2415             : {
    2416             :     bool        shouldFree;
    2417         172 :     MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
    2418             :     HashJoinTuple hashTuple;
    2419             :     int         hashTupleSize;
    2420             : 
    2421             :     /* Create the HashJoinTuple */
    2422         172 :     hashTupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
    2423         172 :     hashTuple = (HashJoinTuple) MemoryContextAlloc(hashtable->batchCxt,
    2424             :                                                    hashTupleSize);
    2425         172 :     hashTuple->hashvalue = hashvalue;
    2426         172 :     memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
    2427         172 :     HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
    2428             : 
    2429             :     /* Push it onto the front of the skew bucket's list */
    2430         172 :     hashTuple->next.unshared = hashtable->skewBucket[bucketNumber]->tuples;
    2431         172 :     hashtable->skewBucket[bucketNumber]->tuples = hashTuple;
    2432             :     Assert(hashTuple != hashTuple->next.unshared);
    2433             : 
    2434             :     /* Account for space used, and back off if we've used too much */
    2435         172 :     hashtable->spaceUsed += hashTupleSize;
    2436         172 :     hashtable->spaceUsedSkew += hashTupleSize;
    2437         172 :     if (hashtable->spaceUsed > hashtable->spacePeak)
    2438         128 :         hashtable->spacePeak = hashtable->spaceUsed;
    2439         376 :     while (hashtable->spaceUsedSkew > hashtable->spaceAllowedSkew)
    2440          32 :         ExecHashRemoveNextSkewBucket(hashtable);
    2441             : 
    2442             :     /* Check we are not over the total spaceAllowed, either */
    2443         172 :     if (hashtable->spaceUsed > hashtable->spaceAllowed)
    2444           0 :         ExecHashIncreaseNumBatches(hashtable);
    2445             : 
    2446         172 :     if (shouldFree)
    2447         172 :         heap_free_minimal_tuple(tuple);
    2448         172 : }
    2449             : 
    2450             : /*
    2451             :  *      ExecHashRemoveNextSkewBucket
    2452             :  *
    2453             :  *      Remove the least valuable skew bucket by pushing its tuples into
    2454             :  *      the main hash table.
    2455             :  */
    2456             : static void
    2457          32 : ExecHashRemoveNextSkewBucket(HashJoinTable hashtable)
    2458             : {
    2459             :     int         bucketToRemove;
    2460             :     HashSkewBucket *bucket;
    2461             :     uint32      hashvalue;
    2462             :     int         bucketno;
    2463             :     int         batchno;
    2464             :     HashJoinTuple hashTuple;
    2465             : 
    2466             :     /* Locate the bucket to remove */
    2467          32 :     bucketToRemove = hashtable->skewBucketNums[hashtable->nSkewBuckets - 1];
    2468          32 :     bucket = hashtable->skewBucket[bucketToRemove];
    2469             : 
    2470             :     /*
    2471             :      * Calculate which bucket and batch the tuples belong to in the main
    2472             :      * hashtable.  They all have the same hash value, so it's the same for all
    2473             :      * of them.  Also note that it's not possible for nbatch to increase while
    2474             :      * we are processing the tuples.
    2475             :      */
    2476          32 :     hashvalue = bucket->hashvalue;
    2477          32 :     ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
    2478             : 
    2479             :     /* Process all tuples in the bucket */
    2480          32 :     hashTuple = bucket->tuples;
    2481         156 :     while (hashTuple != NULL)
    2482             :     {
    2483          92 :         HashJoinTuple nextHashTuple = hashTuple->next.unshared;
    2484             :         MinimalTuple tuple;
    2485             :         Size        tupleSize;
    2486             : 
    2487             :         /*
    2488             :          * This code must agree with ExecHashTableInsert.  We do not use
    2489             :          * ExecHashTableInsert directly as ExecHashTableInsert expects a
    2490             :          * TupleTableSlot while we already have HashJoinTuples.
    2491             :          */
    2492          92 :         tuple = HJTUPLE_MINTUPLE(hashTuple);
    2493          92 :         tupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
    2494             : 
    2495             :         /* Decide whether to put the tuple in the hash table or a temp file */
    2496          92 :         if (batchno == hashtable->curbatch)
    2497             :         {
    2498             :             /* Move the tuple to the main hash table */
    2499             :             HashJoinTuple copyTuple;
    2500             : 
    2501             :             /*
    2502             :              * We must copy the tuple into the dense storage, else it will not
    2503             :              * be found by, eg, ExecHashIncreaseNumBatches.
    2504             :              */
    2505           8 :             copyTuple = (HashJoinTuple) dense_alloc(hashtable, tupleSize);
    2506           8 :             memcpy(copyTuple, hashTuple, tupleSize);
    2507           8 :             pfree(hashTuple);
    2508             : 
    2509           8 :             copyTuple->next.unshared = hashtable->buckets.unshared[bucketno];
    2510           8 :             hashtable->buckets.unshared[bucketno] = copyTuple;
    2511             : 
    2512             :             /* We have reduced skew space, but overall space doesn't change */
    2513           8 :             hashtable->spaceUsedSkew -= tupleSize;
    2514             :         }
    2515             :         else
    2516             :         {
    2517             :             /* Put the tuple into a temp file for later batches */
    2518             :             Assert(batchno > hashtable->curbatch);
    2519          84 :             ExecHashJoinSaveTuple(tuple, hashvalue,
    2520          84 :                                   &hashtable->innerBatchFile[batchno]);
    2521          84 :             pfree(hashTuple);
    2522          84 :             hashtable->spaceUsed -= tupleSize;
    2523          84 :             hashtable->spaceUsedSkew -= tupleSize;
    2524             :         }
    2525             : 
    2526          92 :         hashTuple = nextHashTuple;
    2527             : 
    2528             :         /* allow this loop to be cancellable */
    2529          92 :         CHECK_FOR_INTERRUPTS();
    2530             :     }
    2531             : 
    2532             :     /*
    2533             :      * Free the bucket struct itself and reset the hashtable entry to NULL.
    2534             :      *
    2535             :      * NOTE: this is not nearly as simple as it looks on the surface, because
    2536             :      * of the possibility of collisions in the hashtable.  Suppose that hash
    2537             :      * values A and B collide at a particular hashtable entry, and that A was
    2538             :      * entered first so B gets shifted to a different table entry.  If we were
    2539             :      * to remove A first then ExecHashGetSkewBucket would mistakenly start
    2540             :      * reporting that B is not in the hashtable, because it would hit the NULL
    2541             :      * before finding B.  However, we always remove entries in the reverse
    2542             :      * order of creation, so this failure cannot happen.
    2543             :      */
    2544          32 :     hashtable->skewBucket[bucketToRemove] = NULL;
    2545          32 :     hashtable->nSkewBuckets--;
    2546          32 :     pfree(bucket);
    2547          32 :     hashtable->spaceUsed -= SKEW_BUCKET_OVERHEAD;
    2548          32 :     hashtable->spaceUsedSkew -= SKEW_BUCKET_OVERHEAD;
    2549             : 
    2550             :     /*
    2551             :      * If we have removed all skew buckets then give up on skew optimization.
    2552             :      * Release the arrays since they aren't useful any more.
    2553             :      */
    2554          32 :     if (hashtable->nSkewBuckets == 0)
    2555             :     {
    2556           0 :         hashtable->skewEnabled = false;
    2557           0 :         pfree(hashtable->skewBucket);
    2558           0 :         pfree(hashtable->skewBucketNums);
    2559           0 :         hashtable->skewBucket = NULL;
    2560           0 :         hashtable->skewBucketNums = NULL;
    2561           0 :         hashtable->spaceUsed -= hashtable->spaceUsedSkew;
    2562           0 :         hashtable->spaceUsedSkew = 0;
    2563             :     }
    2564          32 : }
    2565             : 
    2566             : /*
    2567             :  * Reserve space in the DSM segment for instrumentation data.
    2568             :  */
    2569             : void
    2570         112 : ExecHashEstimate(HashState *node, ParallelContext *pcxt)
    2571             : {
    2572             :     size_t      size;
    2573             : 
    2574             :     /* don't need this if not instrumenting or no workers */
    2575         112 :     if (!node->ps.instrument || pcxt->nworkers == 0)
    2576          56 :         return;
    2577             : 
    2578          56 :     size = mul_size(pcxt->nworkers, sizeof(HashInstrumentation));
    2579          56 :     size = add_size(size, offsetof(SharedHashInfo, hinstrument));
    2580          56 :     shm_toc_estimate_chunk(&pcxt->estimator, size);
    2581          56 :     shm_toc_estimate_keys(&pcxt->estimator, 1);
    2582             : }
    2583             : 
    2584             : /*
    2585             :  * Set up a space in the DSM for all workers to record instrumentation data
    2586             :  * about their hash table.
    2587             :  */
    2588             : void
    2589         112 : ExecHashInitializeDSM(HashState *node, ParallelContext *pcxt)
    2590             : {
    2591             :     size_t      size;
    2592             : 
    2593             :     /* don't need this if not instrumenting or no workers */
    2594         112 :     if (!node->ps.instrument || pcxt->nworkers == 0)
    2595          56 :         return;
    2596             : 
    2597          56 :     size = offsetof(SharedHashInfo, hinstrument) +
    2598          56 :         pcxt->nworkers * sizeof(HashInstrumentation);
    2599          56 :     node->shared_info = (SharedHashInfo *) shm_toc_allocate(pcxt->toc, size);
    2600          56 :     memset(node->shared_info, 0, size);
    2601          56 :     node->shared_info->num_workers = pcxt->nworkers;
    2602          56 :     shm_toc_insert(pcxt->toc, node->ps.plan->plan_node_id,
    2603          56 :                    node->shared_info);
    2604             : }
    2605             : 
    2606             : /*
    2607             :  * Locate the DSM space for hash table instrumentation data that we'll write
    2608             :  * to at shutdown time.
    2609             :  */
    2610             : void
    2611         340 : ExecHashInitializeWorker(HashState *node, ParallelWorkerContext *pwcxt)
    2612             : {
    2613             :     SharedHashInfo *shared_info;
    2614             : 
    2615             :     /* don't need this if not instrumenting */
    2616         340 :     if (!node->ps.instrument)
    2617         172 :         return;
    2618             : 
    2619         168 :     shared_info = (SharedHashInfo *)
    2620         168 :         shm_toc_lookup(pwcxt->toc, node->ps.plan->plan_node_id, false);
    2621         168 :     node->hinstrument = &shared_info->hinstrument[ParallelWorkerNumber];
    2622             : }
    2623             : 
    2624             : /*
    2625             :  * Copy instrumentation data from this worker's hash table (if it built one)
    2626             :  * to DSM memory so the leader can retrieve it.  This must be done in an
    2627             :  * ExecShutdownHash() rather than ExecEndHash() because the latter runs after
    2628             :  * we've detached from the DSM segment.
    2629             :  */
    2630             : void
    2631       26336 : ExecShutdownHash(HashState *node)
    2632             : {
    2633       26336 :     if (node->hinstrument && node->hashtable)
    2634         162 :         ExecHashGetInstrumentation(node->hinstrument, node->hashtable);
    2635       26336 : }
    2636             : 
    2637             : /*
    2638             :  * Retrieve instrumentation data from workers before the DSM segment is
    2639             :  * detached, so that EXPLAIN can access it.
    2640             :  */
    2641             : void
    2642          56 : ExecHashRetrieveInstrumentation(HashState *node)
    2643             : {
    2644          56 :     SharedHashInfo *shared_info = node->shared_info;
    2645             :     size_t      size;
    2646             : 
    2647          56 :     if (shared_info == NULL)
    2648           0 :         return;
    2649             : 
    2650             :     /* Replace node->shared_info with a copy in backend-local memory. */
    2651          56 :     size = offsetof(SharedHashInfo, hinstrument) +
    2652          56 :         shared_info->num_workers * sizeof(HashInstrumentation);
    2653          56 :     node->shared_info = palloc(size);
    2654          56 :     memcpy(node->shared_info, shared_info, size);
    2655             : }
    2656             : 
    2657             : /*
    2658             :  * Copy the instrumentation data from 'hashtable' into a HashInstrumentation
    2659             :  * struct.
    2660             :  */
    2661             : void
    2662         214 : ExecHashGetInstrumentation(HashInstrumentation *instrument,
    2663             :                            HashJoinTable hashtable)
    2664             : {
    2665         214 :     instrument->nbuckets = hashtable->nbuckets;
    2666         214 :     instrument->nbuckets_original = hashtable->nbuckets_original;
    2667         214 :     instrument->nbatch = hashtable->nbatch;
    2668         214 :     instrument->nbatch_original = hashtable->nbatch_original;
    2669         214 :     instrument->space_peak = hashtable->spacePeak;
    2670         214 : }
    2671             : 
    2672             : /*
    2673             :  * Allocate 'size' bytes from the currently active HashMemoryChunk
    2674             :  */
    2675             : static void *
    2676     7299450 : dense_alloc(HashJoinTable hashtable, Size size)
    2677             : {
    2678             :     HashMemoryChunk newChunk;
    2679             :     char       *ptr;
    2680             : 
    2681             :     /* just in case the size is not already aligned properly */
    2682     7299450 :     size = MAXALIGN(size);
    2683             : 
    2684             :     /*
    2685             :      * If tuple size is larger than threshold, allocate a separate chunk.
    2686             :      */
    2687     7299450 :     if (size > HASH_CHUNK_THRESHOLD)
    2688             :     {
    2689             :         /* allocate new chunk and put it at the beginning of the list */
    2690           0 :         newChunk = (HashMemoryChunk) MemoryContextAlloc(hashtable->batchCxt,
    2691             :                                                         HASH_CHUNK_HEADER_SIZE + size);
    2692           0 :         newChunk->maxlen = size;
    2693           0 :         newChunk->used = size;
    2694           0 :         newChunk->ntuples = 1;
    2695             : 
    2696             :         /*
    2697             :          * Add this chunk to the list after the first existing chunk, so that
    2698             :          * we don't lose the remaining space in the "current" chunk.
    2699             :          */
    2700           0 :         if (hashtable->chunks != NULL)
    2701             :         {
    2702           0 :             newChunk->next = hashtable->chunks->next;
    2703           0 :             hashtable->chunks->next.unshared = newChunk;
    2704             :         }
    2705             :         else
    2706             :         {
    2707           0 :             newChunk->next.unshared = hashtable->chunks;
    2708           0 :             hashtable->chunks = newChunk;
    2709             :         }
    2710             : 
    2711           0 :         return HASH_CHUNK_DATA(newChunk);
    2712             :     }
    2713             : 
    2714             :     /*
    2715             :      * See if we have enough space for it in the current chunk (if any). If
    2716             :      * not, allocate a fresh chunk.
    2717             :      */
    2718    14345204 :     if ((hashtable->chunks == NULL) ||
    2719     7045754 :         (hashtable->chunks->maxlen - hashtable->chunks->used) < size)
    2720             :     {
    2721             :         /* allocate new chunk and put it at the beginning of the list */
    2722      262560 :         newChunk = (HashMemoryChunk) MemoryContextAlloc(hashtable->batchCxt,
    2723             :                                                         HASH_CHUNK_HEADER_SIZE + HASH_CHUNK_SIZE);
    2724             : 
    2725      262560 :         newChunk->maxlen = HASH_CHUNK_SIZE;
    2726      262560 :         newChunk->used = size;
    2727      262560 :         newChunk->ntuples = 1;
    2728             : 
    2729      262560 :         newChunk->next.unshared = hashtable->chunks;
    2730      262560 :         hashtable->chunks = newChunk;
    2731             : 
    2732      262560 :         return HASH_CHUNK_DATA(newChunk);
    2733             :     }
    2734             : 
    2735             :     /* There is enough space in the current chunk, let's add the tuple */
    2736     7036890 :     ptr = HASH_CHUNK_DATA(hashtable->chunks) + hashtable->chunks->used;
    2737     7036890 :     hashtable->chunks->used += size;
    2738     7036890 :     hashtable->chunks->ntuples += 1;
    2739             : 
    2740             :     /* return pointer to the start of the tuple memory */
    2741     7036890 :     return ptr;
    2742             : }
    2743             : 
    2744             : /*
    2745             :  * Allocate space for a tuple in shared dense storage.  This is equivalent to
    2746             :  * dense_alloc but for Parallel Hash using shared memory.
    2747             :  *
    2748             :  * While loading a tuple into shared memory, we might run out of memory and
    2749             :  * decide to repartition, or determine that the load factor is too high and
    2750             :  * decide to expand the bucket array, or discover that another participant has
    2751             :  * commanded us to help do that.  Return NULL if number of buckets or batches
    2752             :  * has changed, indicating that the caller must retry (considering the
    2753             :  * possibility that the tuple no longer belongs in the same batch).
    2754             :  */
    2755             : static HashJoinTuple
    2756     1352030 : ExecParallelHashTupleAlloc(HashJoinTable hashtable, size_t size,
    2757             :                            dsa_pointer *shared)
    2758             : {
    2759     1352030 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    2760             :     dsa_pointer chunk_shared;
    2761             :     HashMemoryChunk chunk;
    2762             :     Size        chunk_size;
    2763             :     HashJoinTuple result;
    2764     1352030 :     int         curbatch = hashtable->curbatch;
    2765             : 
    2766     1352030 :     size = MAXALIGN(size);
    2767             : 
    2768             :     /*
    2769             :      * Fast path: if there is enough space in this backend's current chunk,
    2770             :      * then we can allocate without any locking.
    2771             :      */
    2772     1352030 :     chunk = hashtable->current_chunk;
    2773     1352030 :     if (chunk != NULL &&
    2774     1351388 :         size <= HASH_CHUNK_THRESHOLD &&
    2775     1351388 :         chunk->maxlen - chunk->used >= size)
    2776             :     {
    2777             : 
    2778     1349854 :         chunk_shared = hashtable->current_chunk_shared;
    2779             :         Assert(chunk == dsa_get_address(hashtable->area, chunk_shared));
    2780     1349854 :         *shared = chunk_shared + HASH_CHUNK_HEADER_SIZE + chunk->used;
    2781     1349854 :         result = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + chunk->used);
    2782     1349854 :         chunk->used += size;
    2783             : 
    2784             :         Assert(chunk->used <= chunk->maxlen);
    2785             :         Assert(result == dsa_get_address(hashtable->area, *shared));
    2786             : 
    2787     1349854 :         return result;
    2788             :     }
    2789             : 
    2790             :     /* Slow path: try to allocate a new chunk. */
    2791        2176 :     LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
    2792             : 
    2793             :     /*
    2794             :      * Check if we need to help increase the number of buckets or batches.
    2795             :      */
    2796        4328 :     if (pstate->growth == PHJ_GROWTH_NEED_MORE_BATCHES ||
    2797        2152 :         pstate->growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
    2798             :     {
    2799         120 :         ParallelHashGrowth growth = pstate->growth;
    2800             : 
    2801         120 :         hashtable->current_chunk = NULL;
    2802         120 :         LWLockRelease(&pstate->lock);
    2803             : 
    2804             :         /* Another participant has commanded us to help grow. */
    2805         120 :         if (growth == PHJ_GROWTH_NEED_MORE_BATCHES)
    2806          24 :             ExecParallelHashIncreaseNumBatches(hashtable);
    2807          96 :         else if (growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
    2808          96 :             ExecParallelHashIncreaseNumBuckets(hashtable);
    2809             : 
    2810             :         /* The caller must retry. */
    2811         120 :         return NULL;
    2812             :     }
    2813             : 
    2814             :     /* Oversized tuples get their own chunk. */
    2815        2056 :     if (size > HASH_CHUNK_THRESHOLD)
    2816          32 :         chunk_size = size + HASH_CHUNK_HEADER_SIZE;
    2817             :     else
    2818        2024 :         chunk_size = HASH_CHUNK_SIZE;
    2819             : 
    2820             :     /* Check if it's time to grow batches or buckets. */
    2821        2056 :     if (pstate->growth != PHJ_GROWTH_DISABLED)
    2822             :     {
    2823             :         Assert(curbatch == 0);
    2824             :         Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASHING_INNER);
    2825             : 
    2826             :         /*
    2827             :          * Check if our space limit would be exceeded.  To avoid choking on
    2828             :          * very large tuples or very low work_mem setting, we'll always allow
    2829             :          * each backend to allocate at least one chunk.
    2830             :          */
    2831        1748 :         if (hashtable->batches[0].at_least_one_chunk &&
    2832         754 :             hashtable->batches[0].shared->size +
    2833         754 :             chunk_size > pstate->space_allowed)
    2834             :         {
    2835          24 :             pstate->growth = PHJ_GROWTH_NEED_MORE_BATCHES;
    2836          24 :             hashtable->batches[0].shared->space_exhausted = true;
    2837          24 :             LWLockRelease(&pstate->lock);
    2838             : 
    2839          24 :             return NULL;
    2840             :         }
    2841             : 
    2842             :         /* Check if our load factor limit would be exceeded. */
    2843         970 :         if (hashtable->nbatch == 1)
    2844             :         {
    2845         836 :             hashtable->batches[0].shared->ntuples += hashtable->batches[0].ntuples;
    2846         836 :             hashtable->batches[0].ntuples = 0;
    2847             :             /* Guard against integer overflow and alloc size overflow */
    2848        1672 :             if (hashtable->batches[0].shared->ntuples + 1 >
    2849         908 :                 hashtable->nbuckets * NTUP_PER_BUCKET &&
    2850         144 :                 hashtable->nbuckets < (INT_MAX / 2) &&
    2851          72 :                 hashtable->nbuckets * 2 <=
    2852             :                 MaxAllocSize / sizeof(dsa_pointer_atomic))
    2853             :             {
    2854          72 :                 pstate->growth = PHJ_GROWTH_NEED_MORE_BUCKETS;
    2855          72 :                 LWLockRelease(&pstate->lock);
    2856             : 
    2857          72 :                 return NULL;
    2858             :             }
    2859             :         }
    2860             :     }
    2861             : 
    2862             :     /* We are cleared to allocate a new chunk. */
    2863        1960 :     chunk_shared = dsa_allocate(hashtable->area, chunk_size);
    2864        1960 :     hashtable->batches[curbatch].shared->size += chunk_size;
    2865        1960 :     hashtable->batches[curbatch].at_least_one_chunk = true;
    2866             : 
    2867             :     /* Set up the chunk. */
    2868        1960 :     chunk = (HashMemoryChunk) dsa_get_address(hashtable->area, chunk_shared);
    2869        1960 :     *shared = chunk_shared + HASH_CHUNK_HEADER_SIZE;
    2870        1960 :     chunk->maxlen = chunk_size - HASH_CHUNK_HEADER_SIZE;
    2871        1960 :     chunk->used = size;
    2872             : 
    2873             :     /*
    2874             :      * Push it onto the list of chunks, so that it can be found if we need to
    2875             :      * increase the number of buckets or batches (batch 0 only) and later for
    2876             :      * freeing the memory (all batches).
    2877             :      */
    2878        1960 :     chunk->next.shared = hashtable->batches[curbatch].shared->chunks;
    2879        1960 :     hashtable->batches[curbatch].shared->chunks = chunk_shared;
    2880             : 
    2881        1960 :     if (size <= HASH_CHUNK_THRESHOLD)
    2882             :     {
    2883             :         /*
    2884             :          * Make this the current chunk so that we can use the fast path to
    2885             :          * fill the rest of it up in future calls.
    2886             :          */
    2887        1936 :         hashtable->current_chunk = chunk;
    2888        1936 :         hashtable->current_chunk_shared = chunk_shared;
    2889             :     }
    2890        1960 :     LWLockRelease(&pstate->lock);
    2891             : 
    2892             :     Assert(HASH_CHUNK_DATA(chunk) == dsa_get_address(hashtable->area, *shared));
    2893        1960 :     result = (HashJoinTuple) HASH_CHUNK_DATA(chunk);
    2894             : 
    2895        1960 :     return result;
    2896             : }
    2897             : 
    2898             : /*
    2899             :  * One backend needs to set up the shared batch state including tuplestores.
    2900             :  * Other backends will ensure they have correctly configured accessors by
    2901             :  * called ExecParallelHashEnsureBatchAccessors().
    2902             :  */
    2903             : static void
    2904         132 : ExecParallelHashJoinSetUpBatches(HashJoinTable hashtable, int nbatch)
    2905             : {
    2906         132 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    2907             :     ParallelHashJoinBatch *batches;
    2908             :     MemoryContext oldcxt;
    2909             :     int         i;
    2910             : 
    2911             :     Assert(hashtable->batches == NULL);
    2912             : 
    2913             :     /* Allocate space. */
    2914         132 :     pstate->batches =
    2915         132 :         dsa_allocate0(hashtable->area,
    2916             :                       EstimateParallelHashJoinBatch(hashtable) * nbatch);
    2917         132 :     pstate->nbatch = nbatch;
    2918         132 :     batches = dsa_get_address(hashtable->area, pstate->batches);
    2919             : 
    2920             :     /* Use hash join memory context. */
    2921         132 :     oldcxt = MemoryContextSwitchTo(hashtable->hashCxt);
    2922             : 
    2923             :     /* Allocate this backend's accessor array. */
    2924         132 :     hashtable->nbatch = nbatch;
    2925         132 :     hashtable->batches = (ParallelHashJoinBatchAccessor *)
    2926         132 :         palloc0(sizeof(ParallelHashJoinBatchAccessor) * hashtable->nbatch);
    2927             : 
    2928             :     /* Set up the shared state, tuplestores and backend-local accessors. */
    2929         708 :     for (i = 0; i < hashtable->nbatch; ++i)
    2930             :     {
    2931         576 :         ParallelHashJoinBatchAccessor *accessor = &hashtable->batches[i];
    2932         576 :         ParallelHashJoinBatch *shared = NthParallelHashJoinBatch(batches, i);
    2933             :         char        name[MAXPGPATH];
    2934             : 
    2935             :         /*
    2936             :          * All members of shared were zero-initialized.  We just need to set
    2937             :          * up the Barrier.
    2938             :          */
    2939         576 :         BarrierInit(&shared->batch_barrier, 0);
    2940         576 :         if (i == 0)
    2941             :         {
    2942             :             /* Batch 0 doesn't need to be loaded. */
    2943         132 :             BarrierAttach(&shared->batch_barrier);
    2944         660 :             while (BarrierPhase(&shared->batch_barrier) < PHJ_BATCH_PROBING)
    2945         396 :                 BarrierArriveAndWait(&shared->batch_barrier, 0);
    2946         132 :             BarrierDetach(&shared->batch_barrier);
    2947             :         }
    2948             : 
    2949             :         /* Initialize accessor state.  All members were zero-initialized. */
    2950         576 :         accessor->shared = shared;
    2951             : 
    2952             :         /* Initialize the shared tuplestores. */
    2953         576 :         snprintf(name, sizeof(name), "i%dof%d", i, hashtable->nbatch);
    2954         576 :         accessor->inner_tuples =
    2955         576 :             sts_initialize(ParallelHashJoinBatchInner(shared),
    2956             :                            pstate->nparticipants,
    2957             :                            ParallelWorkerNumber + 1,
    2958             :                            sizeof(uint32),
    2959             :                            SHARED_TUPLESTORE_SINGLE_PASS,
    2960             :                            &pstate->fileset,
    2961             :                            name);
    2962         576 :         snprintf(name, sizeof(name), "o%dof%d", i, hashtable->nbatch);
    2963         576 :         accessor->outer_tuples =
    2964         576 :             sts_initialize(ParallelHashJoinBatchOuter(shared,
    2965             :                                                       pstate->nparticipants),
    2966             :                            pstate->nparticipants,
    2967             :                            ParallelWorkerNumber + 1,
    2968             :                            sizeof(uint32),
    2969             :                            SHARED_TUPLESTORE_SINGLE_PASS,
    2970             :                            &pstate->fileset,
    2971             :                            name);
    2972             :     }
    2973             : 
    2974         132 :     MemoryContextSwitchTo(oldcxt);
    2975         132 : }
    2976             : 
    2977             : /*
    2978             :  * Free the current set of ParallelHashJoinBatchAccessor objects.
    2979             :  */
    2980             : static void
    2981          40 : ExecParallelHashCloseBatchAccessors(HashJoinTable hashtable)
    2982             : {
    2983             :     int         i;
    2984             : 
    2985         160 :     for (i = 0; i < hashtable->nbatch; ++i)
    2986             :     {
    2987             :         /* Make sure no files are left open. */
    2988         120 :         sts_end_write(hashtable->batches[i].inner_tuples);
    2989         120 :         sts_end_write(hashtable->batches[i].outer_tuples);
    2990         120 :         sts_end_parallel_scan(hashtable->batches[i].inner_tuples);
    2991         120 :         sts_end_parallel_scan(hashtable->batches[i].outer_tuples);
    2992             :     }
    2993          40 :     pfree(hashtable->batches);
    2994          40 :     hashtable->batches = NULL;
    2995          40 : }
    2996             : 
    2997             : /*
    2998             :  * Make sure this backend has up-to-date accessors for the current set of
    2999             :  * batches.
    3000             :  */
    3001             : static void
    3002         548 : ExecParallelHashEnsureBatchAccessors(HashJoinTable hashtable)
    3003             : {
    3004         548 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    3005             :     ParallelHashJoinBatch *batches;
    3006             :     MemoryContext oldcxt;
    3007             :     int         i;
    3008             : 
    3009         548 :     if (hashtable->batches != NULL)
    3010             :     {
    3011         420 :         if (hashtable->nbatch == pstate->nbatch)
    3012         416 :             return;
    3013           4 :         ExecParallelHashCloseBatchAccessors(hashtable);
    3014             :     }
    3015             : 
    3016             :     /*
    3017             :      * It's possible for a backend to start up very late so that the whole
    3018             :      * join is finished and the shm state for tracking batches has already
    3019             :      * been freed by ExecHashTableDetach().  In that case we'll just leave
    3020             :      * hashtable->batches as NULL so that ExecParallelHashJoinNewBatch() gives
    3021             :      * up early.
    3022             :      */
    3023         132 :     if (!DsaPointerIsValid(pstate->batches))
    3024           0 :         return;
    3025             : 
    3026             :     /* Use hash join memory context. */
    3027         132 :     oldcxt = MemoryContextSwitchTo(hashtable->hashCxt);
    3028             : 
    3029             :     /* Allocate this backend's accessor array. */
    3030         132 :     hashtable->nbatch = pstate->nbatch;
    3031         132 :     hashtable->batches = (ParallelHashJoinBatchAccessor *)
    3032         132 :         palloc0(sizeof(ParallelHashJoinBatchAccessor) * hashtable->nbatch);
    3033             : 
    3034             :     /* Find the base of the pseudo-array of ParallelHashJoinBatch objects. */
    3035         132 :     batches = (ParallelHashJoinBatch *)
    3036         132 :         dsa_get_address(hashtable->area, pstate->batches);
    3037             : 
    3038             :     /* Set up the accessor array and attach to the tuplestores. */
    3039         796 :     for (i = 0; i < hashtable->nbatch; ++i)
    3040             :     {
    3041         664 :         ParallelHashJoinBatchAccessor *accessor = &hashtable->batches[i];
    3042         664 :         ParallelHashJoinBatch *shared = NthParallelHashJoinBatch(batches, i);
    3043             : 
    3044         664 :         accessor->shared = shared;
    3045         664 :         accessor->preallocated = 0;
    3046         664 :         accessor->done = false;
    3047         664 :         accessor->inner_tuples =
    3048         664 :             sts_attach(ParallelHashJoinBatchInner(shared),
    3049             :                        ParallelWorkerNumber + 1,
    3050             :                        &pstate->fileset);
    3051         664 :         accessor->outer_tuples =
    3052         664 :             sts_attach(ParallelHashJoinBatchOuter(shared,
    3053             :                                                   pstate->nparticipants),
    3054             :                        ParallelWorkerNumber + 1,
    3055             :                        &pstate->fileset);
    3056             :     }
    3057             : 
    3058         132 :     MemoryContextSwitchTo(oldcxt);
    3059             : }
    3060             : 
    3061             : /*
    3062             :  * Allocate an empty shared memory hash table for a given batch.
    3063             :  */
    3064             : void
    3065         488 : ExecParallelHashTableAlloc(HashJoinTable hashtable, int batchno)
    3066             : {
    3067         488 :     ParallelHashJoinBatch *batch = hashtable->batches[batchno].shared;
    3068             :     dsa_pointer_atomic *buckets;
    3069         488 :     int         nbuckets = hashtable->parallel_state->nbuckets;
    3070             :     int         i;
    3071             : 
    3072         488 :     batch->buckets =
    3073         488 :         dsa_allocate(hashtable->area, sizeof(dsa_pointer_atomic) * nbuckets);
    3074         488 :     buckets = (dsa_pointer_atomic *)
    3075         488 :         dsa_get_address(hashtable->area, batch->buckets);
    3076     1733096 :     for (i = 0; i < nbuckets; ++i)
    3077     1732608 :         dsa_pointer_atomic_init(&buckets[i], InvalidDsaPointer);
    3078         488 : }
    3079             : 
    3080             : /*
    3081             :  * If we are currently attached to a shared hash join batch, detach.  If we
    3082             :  * are last to detach, clean up.
    3083             :  */
    3084             : void
    3085       17858 : ExecHashTableDetachBatch(HashJoinTable hashtable)
    3086             : {
    3087       18660 :     if (hashtable->parallel_state != NULL &&
    3088         802 :         hashtable->curbatch >= 0)
    3089             :     {
    3090         578 :         int         curbatch = hashtable->curbatch;
    3091         578 :         ParallelHashJoinBatch *batch = hashtable->batches[curbatch].shared;
    3092             : 
    3093             :         /* Make sure any temporary files are closed. */
    3094         578 :         sts_end_parallel_scan(hashtable->batches[curbatch].inner_tuples);
    3095         578 :         sts_end_parallel_scan(hashtable->batches[curbatch].outer_tuples);
    3096             : 
    3097             :         /* Detach from the batch we were last working on. */
    3098         578 :         if (BarrierArriveAndDetach(&batch->batch_barrier))
    3099             :         {
    3100             :             /*
    3101             :              * Technically we shouldn't access the barrier because we're no
    3102             :              * longer attached, but since there is no way it's moving after
    3103             :              * this point it seems safe to make the following assertion.
    3104             :              */
    3105             :             Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_DONE);
    3106             : 
    3107             :             /* Free shared chunks and buckets. */
    3108        2736 :             while (DsaPointerIsValid(batch->chunks))
    3109             :             {
    3110        1760 :                 HashMemoryChunk chunk =
    3111        1760 :                 dsa_get_address(hashtable->area, batch->chunks);
    3112        1760 :                 dsa_pointer next = chunk->next.shared;
    3113             : 
    3114        1760 :                 dsa_free(hashtable->area, batch->chunks);
    3115        1760 :                 batch->chunks = next;
    3116             :             }
    3117         488 :             if (DsaPointerIsValid(batch->buckets))
    3118             :             {
    3119         488 :                 dsa_free(hashtable->area, batch->buckets);
    3120         488 :                 batch->buckets = InvalidDsaPointer;
    3121             :             }
    3122             :         }
    3123             : 
    3124             :         /*
    3125             :          * Track the largest batch we've been attached to.  Though each
    3126             :          * backend might see a different subset of batches, explain.c will
    3127             :          * scan the results from all backends to find the largest value.
    3128             :          */
    3129         578 :         hashtable->spacePeak =
    3130         578 :             Max(hashtable->spacePeak,
    3131             :                 batch->size + sizeof(dsa_pointer_atomic) * hashtable->nbuckets);
    3132             : 
    3133             :         /* Remember that we are not attached to a batch. */
    3134         578 :         hashtable->curbatch = -1;
    3135             :     }
    3136       17858 : }
    3137             : 
    3138             : /*
    3139             :  * Detach from all shared resources.  If we are last to detach, clean up.
    3140             :  */
    3141             : void
    3142       17280 : ExecHashTableDetach(HashJoinTable hashtable)
    3143             : {
    3144       17280 :     if (hashtable->parallel_state)
    3145             :     {
    3146         224 :         ParallelHashJoinState *pstate = hashtable->parallel_state;
    3147             :         int         i;
    3148             : 
    3149             :         /* Make sure any temporary files are closed. */
    3150         224 :         if (hashtable->batches)
    3151             :         {
    3152        1344 :             for (i = 0; i < hashtable->nbatch; ++i)
    3153             :             {
    3154        1120 :                 sts_end_write(hashtable->batches[i].inner_tuples);
    3155        1120 :                 sts_end_write(hashtable->batches[i].outer_tuples);
    3156        1120 :                 sts_end_parallel_scan(hashtable->batches[i].inner_tuples);
    3157        1120 :                 sts_end_parallel_scan(hashtable->batches[i].outer_tuples);
    3158             :             }
    3159             :         }
    3160             : 
    3161             :         /* If we're last to detach, clean up shared memory. */
    3162         224 :         if (BarrierDetach(&pstate->build_barrier))
    3163             :         {
    3164          96 :             if (DsaPointerIsValid(pstate->batches))
    3165             :             {
    3166          96 :                 dsa_free(hashtable->area, pstate->batches);
    3167          96 :                 pstate->batches = InvalidDsaPointer;
    3168             :             }
    3169             :         }
    3170             : 
    3171         224 :         hashtable->parallel_state = NULL;
    3172             :     }
    3173       17280 : }
    3174             : 
    3175             : /*
    3176             :  * Get the first tuple in a given bucket identified by number.
    3177             :  */
    3178             : static inline HashJoinTuple
    3179     1200016 : ExecParallelHashFirstTuple(HashJoinTable hashtable, int bucketno)
    3180             : {
    3181             :     HashJoinTuple tuple;
    3182             :     dsa_pointer p;
    3183             : 
    3184             :     Assert(hashtable->parallel_state);
    3185     1200016 :     p = dsa_pointer_atomic_read(&hashtable->buckets.shared[bucketno]);
    3186     1200016 :     tuple = (HashJoinTuple) dsa_get_address(hashtable->area, p);
    3187             : 
    3188     1200016 :     return tuple;
    3189             : }
    3190             : 
    3191             : /*
    3192             :  * Get the next tuple in the same bucket as 'tuple'.
    3193             :  */
    3194             : static inline HashJoinTuple
    3195     1980424 : ExecParallelHashNextTuple(HashJoinTable hashtable, HashJoinTuple tuple)
    3196             : {
    3197             :     HashJoinTuple next;
    3198             : 
    3199             :     Assert(hashtable->parallel_state);
    3200     1980424 :     next = (HashJoinTuple) dsa_get_address(hashtable->area, tuple->next.shared);
    3201             : 
    3202     1980424 :     return next;
    3203             : }
    3204             : 
    3205             : /*
    3206             :  * Insert a tuple at the front of a chain of tuples in DSA memory atomically.
    3207             :  */
    3208             : static inline void
    3209     1717332 : ExecParallelHashPushTuple(dsa_pointer_atomic *head,
    3210             :                           HashJoinTuple tuple,
    3211             :                           dsa_pointer tuple_shared)
    3212             : {
    3213             :     for (;;)
    3214             :     {
    3215     1722930 :         tuple->next.shared = dsa_pointer_atomic_read(head);
    3216     1717332 :         if (dsa_pointer_atomic_compare_exchange(head,
    3217     1717332 :                                                 &tuple->next.shared,
    3218             :                                                 tuple_shared))
    3219     1711734 :             break;
    3220             :     }
    3221     1711734 : }
    3222             : 
    3223             : /*
    3224             :  * Prepare to work on a given batch.
    3225             :  */
    3226             : void
    3227        1298 : ExecParallelHashTableSetCurrentBatch(HashJoinTable hashtable, int batchno)
    3228             : {
    3229             :     Assert(hashtable->batches[batchno].shared->buckets != InvalidDsaPointer);
    3230             : 
    3231        1298 :     hashtable->curbatch = batchno;
    3232        1298 :     hashtable->buckets.shared = (dsa_pointer_atomic *)
    3233        1298 :         dsa_get_address(hashtable->area,
    3234        1298 :                         hashtable->batches[batchno].shared->buckets);
    3235        1298 :     hashtable->nbuckets = hashtable->parallel_state->nbuckets;
    3236        1298 :     hashtable->log2_nbuckets = my_log2(hashtable->nbuckets);
    3237        1298 :     hashtable->current_chunk = NULL;
    3238        1298 :     hashtable->current_chunk_shared = InvalidDsaPointer;
    3239        1298 :     hashtable->batches[batchno].at_least_one_chunk = false;
    3240        1298 : }
    3241             : 
    3242             : /*
    3243             :  * Take the next available chunk from the queue of chunks being worked on in
    3244             :  * parallel.  Return NULL if there are none left.  Otherwise return a pointer
    3245             :  * to the chunk, and set *shared to the DSA pointer to the chunk.
    3246             :  */
    3247             : static HashMemoryChunk
    3248         774 : ExecParallelHashPopChunkQueue(HashJoinTable hashtable, dsa_pointer *shared)
    3249             : {
    3250         774 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    3251             :     HashMemoryChunk chunk;
    3252             : 
    3253         774 :     LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
    3254         774 :     if (DsaPointerIsValid(pstate->chunk_work_queue))
    3255             :     {
    3256         640 :         *shared = pstate->chunk_work_queue;
    3257         640 :         chunk = (HashMemoryChunk)
    3258         640 :             dsa_get_address(hashtable->area, *shared);
    3259         640 :         pstate->chunk_work_queue = chunk->next.shared;
    3260             :     }
    3261             :     else
    3262         134 :         chunk = NULL;
    3263         774 :     LWLockRelease(&pstate->lock);
    3264             : 
    3265         774 :     return chunk;
    3266             : }
    3267             : 
    3268             : /*
    3269             :  * Increase the space preallocated in this backend for a given inner batch by
    3270             :  * at least a given amount.  This allows us to track whether a given batch
    3271             :  * would fit in memory when loaded back in.  Also increase the number of
    3272             :  * batches or buckets if required.
    3273             :  *
    3274             :  * This maintains a running estimation of how much space will be taken when we
    3275             :  * load the batch back into memory by simulating the way chunks will be handed
    3276             :  * out to workers.  It's not perfectly accurate because the tuples will be
    3277             :  * packed into memory chunks differently by ExecParallelHashTupleAlloc(), but
    3278             :  * it should be pretty close.  It tends to overestimate by a fraction of a
    3279             :  * chunk per worker since all workers gang up to preallocate during hashing,
    3280             :  * but workers tend to reload batches alone if there are enough to go around,
    3281             :  * leaving fewer partially filled chunks.  This effect is bounded by
    3282             :  * nparticipants.
    3283             :  *
    3284             :  * Return false if the number of batches or buckets has changed, and the
    3285             :  * caller should reconsider which batch a given tuple now belongs in and call
    3286             :  * again.
    3287             :  */
    3288             : static bool
    3289        1060 : ExecParallelHashTuplePrealloc(HashJoinTable hashtable, int batchno, size_t size)
    3290             : {
    3291        1060 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    3292        1060 :     ParallelHashJoinBatchAccessor *batch = &hashtable->batches[batchno];
    3293        1060 :     size_t      want = Max(size, HASH_CHUNK_SIZE - HASH_CHUNK_HEADER_SIZE);
    3294             : 
    3295             :     Assert(batchno > 0);
    3296             :     Assert(batchno < hashtable->nbatch);
    3297             :     Assert(size == MAXALIGN(size));
    3298             : 
    3299        1060 :     LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
    3300             : 
    3301             :     /* Has another participant commanded us to help grow? */
    3302        2108 :     if (pstate->growth == PHJ_GROWTH_NEED_MORE_BATCHES ||
    3303        1048 :         pstate->growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
    3304             :     {
    3305          12 :         ParallelHashGrowth growth = pstate->growth;
    3306             : 
    3307          12 :         LWLockRelease(&pstate->lock);
    3308          12 :         if (growth == PHJ_GROWTH_NEED_MORE_BATCHES)
    3309          12 :             ExecParallelHashIncreaseNumBatches(hashtable);
    3310           0 :         else if (growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
    3311           0 :             ExecParallelHashIncreaseNumBuckets(hashtable);
    3312             : 
    3313          12 :         return false;
    3314             :     }
    3315             : 
    3316        1944 :     if (pstate->growth != PHJ_GROWTH_DISABLED &&
    3317        1250 :         batch->at_least_one_chunk &&
    3318         354 :         (batch->shared->estimated_size + want + HASH_CHUNK_HEADER_SIZE
    3319         354 :          > pstate->space_allowed))
    3320             :     {
    3321             :         /*
    3322             :          * We have determined that this batch would exceed the space budget if
    3323             :          * loaded into memory.  Command all participants to help repartition.
    3324             :          */
    3325          12 :         batch->shared->space_exhausted = true;
    3326          12 :         pstate->growth = PHJ_GROWTH_NEED_MORE_BATCHES;
    3327          12 :         LWLockRelease(&pstate->lock);
    3328             : 
    3329          12 :         return false;
    3330             :     }
    3331             : 
    3332        1036 :     batch->at_least_one_chunk = true;
    3333        1036 :     batch->shared->estimated_size += want + HASH_CHUNK_HEADER_SIZE;
    3334        1036 :     batch->preallocated = want;
    3335        1036 :     LWLockRelease(&pstate->lock);
    3336             : 
    3337        1036 :     return true;
    3338             : }

Generated by: LCOV version 1.13