LCOV - code coverage report
Current view: top level - src/backend/executor - nodeAppend.c (source / functions) Hit Total Coverage
Test: PostgreSQL 13devel Lines: 173 184 94.0 %
Date: 2019-09-22 07:07:17 Functions: 11 12 91.7 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * nodeAppend.c
       4             :  *    routines to handle append nodes.
       5             :  *
       6             :  * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
       7             :  * Portions Copyright (c) 1994, Regents of the University of California
       8             :  *
       9             :  *
      10             :  * IDENTIFICATION
      11             :  *    src/backend/executor/nodeAppend.c
      12             :  *
      13             :  *-------------------------------------------------------------------------
      14             :  */
      15             : /* INTERFACE ROUTINES
      16             :  *      ExecInitAppend  - initialize the append node
      17             :  *      ExecAppend      - retrieve the next tuple from the node
      18             :  *      ExecEndAppend   - shut down the append node
      19             :  *      ExecReScanAppend - rescan the append node
      20             :  *
      21             :  *   NOTES
      22             :  *      Each append node contains a list of one or more subplans which
      23             :  *      must be iteratively processed (forwards or backwards).
      24             :  *      Tuples are retrieved by executing the 'whichplan'th subplan
      25             :  *      until the subplan stops returning tuples, at which point that
      26             :  *      plan is shut down and the next started up.
      27             :  *
      28             :  *      Append nodes don't make use of their left and right
      29             :  *      subtrees, rather they maintain a list of subplans so
      30             :  *      a typical append node looks like this in the plan tree:
      31             :  *
      32             :  *                 ...
      33             :  *                 /
      34             :  *              Append -------+------+------+--- nil
      35             :  *              /   \         |      |      |
      36             :  *            nil   nil      ...    ...    ...
      37             :  *                               subplans
      38             :  *
      39             :  *      Append nodes are currently used for unions, and to support
      40             :  *      inheritance queries, where several relations need to be scanned.
      41             :  *      For example, in our standard person/student/employee/student-emp
      42             :  *      example, where student and employee inherit from person
      43             :  *      and student-emp inherits from student and employee, the
      44             :  *      query:
      45             :  *
      46             :  *              select name from person
      47             :  *
      48             :  *      generates the plan:
      49             :  *
      50             :  *                |
      51             :  *              Append -------+-------+--------+--------+
      52             :  *              /   \         |       |        |        |
      53             :  *            nil   nil      Scan    Scan     Scan     Scan
      54             :  *                            |       |        |        |
      55             :  *                          person employee student student-emp
      56             :  */
      57             : 
      58             : #include "postgres.h"
      59             : 
      60             : #include "executor/execdebug.h"
      61             : #include "executor/execPartition.h"
      62             : #include "executor/nodeAppend.h"
      63             : #include "miscadmin.h"
      64             : 
      65             : /* Shared state for parallel-aware Append. */
      66             : struct ParallelAppendState
      67             : {
      68             :     LWLock      pa_lock;        /* mutual exclusion to choose next subplan */
      69             :     int         pa_next_plan;   /* next plan to choose by any worker */
      70             : 
      71             :     /*
      72             :      * pa_finished[i] should be true if no more workers should select subplan
      73             :      * i.  for a non-partial plan, this should be set to true as soon as a
      74             :      * worker selects the plan; for a partial plan, it remains false until
      75             :      * some worker executes the plan to completion.
      76             :      */
      77             :     bool        pa_finished[FLEXIBLE_ARRAY_MEMBER];
      78             : };
      79             : 
      80             : #define INVALID_SUBPLAN_INDEX       -1
      81             : #define NO_MATCHING_SUBPLANS        -2
      82             : 
      83             : static TupleTableSlot *ExecAppend(PlanState *pstate);
      84             : static bool choose_next_subplan_locally(AppendState *node);
      85             : static bool choose_next_subplan_for_leader(AppendState *node);
      86             : static bool choose_next_subplan_for_worker(AppendState *node);
      87             : static void mark_invalid_subplans_as_finished(AppendState *node);
      88             : 
      89             : /* ----------------------------------------------------------------
      90             :  *      ExecInitAppend
      91             :  *
      92             :  *      Begin all of the subscans of the append node.
      93             :  *
      94             :  *     (This is potentially wasteful, since the entire result of the
      95             :  *      append node may not be scanned, but this way all of the
      96             :  *      structures get allocated in the executor's top level memory
      97             :  *      block instead of that of the call to ExecAppend.)
      98             :  * ----------------------------------------------------------------
      99             :  */
     100             : AppendState *
     101        6014 : ExecInitAppend(Append *node, EState *estate, int eflags)
     102             : {
     103        6014 :     AppendState *appendstate = makeNode(AppendState);
     104             :     PlanState **appendplanstates;
     105             :     Bitmapset  *validsubplans;
     106             :     int         nplans;
     107             :     int         firstvalid;
     108             :     int         i,
     109             :                 j;
     110             : 
     111             :     /* check for unsupported flags */
     112             :     Assert(!(eflags & EXEC_FLAG_MARK));
     113             : 
     114             :     /*
     115             :      * create new AppendState for our append node
     116             :      */
     117        6014 :     appendstate->ps.plan = (Plan *) node;
     118        6014 :     appendstate->ps.state = estate;
     119        6014 :     appendstate->ps.ExecProcNode = ExecAppend;
     120             : 
     121             :     /* Let choose_next_subplan_* function handle setting the first subplan */
     122        6014 :     appendstate->as_whichplan = INVALID_SUBPLAN_INDEX;
     123             : 
     124             :     /* If run-time partition pruning is enabled, then set that up now */
     125        6014 :     if (node->part_prune_info != NULL)
     126             :     {
     127             :         PartitionPruneState *prunestate;
     128             : 
     129             :         /* We may need an expression context to evaluate partition exprs */
     130         360 :         ExecAssignExprContext(estate, &appendstate->ps);
     131             : 
     132             :         /* Create the working data structure for pruning. */
     133         360 :         prunestate = ExecCreatePartitionPruneState(&appendstate->ps,
     134         360 :                                                    node->part_prune_info);
     135         360 :         appendstate->as_prune_state = prunestate;
     136             : 
     137             :         /* Perform an initial partition prune, if required. */
     138         360 :         if (prunestate->do_initial_prune)
     139             :         {
     140             :             /* Determine which subplans survive initial pruning */
     141         140 :             validsubplans = ExecFindInitialMatchingSubPlans(prunestate,
     142         140 :                                                             list_length(node->appendplans));
     143             : 
     144             :             /*
     145             :              * The case where no subplans survive pruning must be handled
     146             :              * specially.  The problem here is that code in explain.c requires
     147             :              * an Append to have at least one subplan in order for it to
     148             :              * properly determine the Vars in that subplan's targetlist.  We
     149             :              * sidestep this issue by just initializing the first subplan and
     150             :              * setting as_whichplan to NO_MATCHING_SUBPLANS to indicate that
     151             :              * we don't really need to scan any subnodes.
     152             :              */
     153         140 :             if (bms_is_empty(validsubplans))
     154             :             {
     155          24 :                 appendstate->as_whichplan = NO_MATCHING_SUBPLANS;
     156             : 
     157             :                 /* Mark the first as valid so that it's initialized below */
     158          24 :                 validsubplans = bms_make_singleton(0);
     159             :             }
     160             : 
     161         140 :             nplans = bms_num_members(validsubplans);
     162             :         }
     163             :         else
     164             :         {
     165             :             /* We'll need to initialize all subplans */
     166         220 :             nplans = list_length(node->appendplans);
     167             :             Assert(nplans > 0);
     168         220 :             validsubplans = bms_add_range(NULL, 0, nplans - 1);
     169             :         }
     170             : 
     171             :         /*
     172             :          * If no runtime pruning is required, we can fill as_valid_subplans
     173             :          * immediately, preventing later calls to ExecFindMatchingSubPlans.
     174             :          */
     175         360 :         if (!prunestate->do_exec_prune)
     176             :         {
     177             :             Assert(nplans > 0);
     178         108 :             appendstate->as_valid_subplans = bms_add_range(NULL, 0, nplans - 1);
     179             :         }
     180             :     }
     181             :     else
     182             :     {
     183        5654 :         nplans = list_length(node->appendplans);
     184             : 
     185             :         /*
     186             :          * When run-time partition pruning is not enabled we can just mark all
     187             :          * subplans as valid; they must also all be initialized.
     188             :          */
     189             :         Assert(nplans > 0);
     190        5654 :         appendstate->as_valid_subplans = validsubplans =
     191        5654 :             bms_add_range(NULL, 0, nplans - 1);
     192        5654 :         appendstate->as_prune_state = NULL;
     193             :     }
     194             : 
     195             :     /*
     196             :      * Initialize result tuple type and slot.
     197             :      */
     198        6014 :     ExecInitResultTupleSlotTL(&appendstate->ps, &TTSOpsVirtual);
     199             : 
     200             :     /* node returns slots from each of its subnodes, therefore not fixed */
     201        6014 :     appendstate->ps.resultopsset = true;
     202        6014 :     appendstate->ps.resultopsfixed = false;
     203             : 
     204        6014 :     appendplanstates = (PlanState **) palloc(nplans *
     205             :                                              sizeof(PlanState *));
     206             : 
     207             :     /*
     208             :      * call ExecInitNode on each of the valid plans to be executed and save
     209             :      * the results into the appendplanstates array.
     210             :      *
     211             :      * While at it, find out the first valid partial plan.
     212             :      */
     213        6014 :     j = 0;
     214        6014 :     firstvalid = nplans;
     215        6014 :     i = -1;
     216       30140 :     while ((i = bms_next_member(validsubplans, i)) >= 0)
     217             :     {
     218       18112 :         Plan       *initNode = (Plan *) list_nth(node->appendplans, i);
     219             : 
     220             :         /*
     221             :          * Record the lowest appendplans index which is a valid partial plan.
     222             :          */
     223       18112 :         if (i >= node->first_partial_plan && j < firstvalid)
     224         336 :             firstvalid = j;
     225             : 
     226       18112 :         appendplanstates[j++] = ExecInitNode(initNode, estate, eflags);
     227             :     }
     228             : 
     229        6014 :     appendstate->as_first_partial_plan = firstvalid;
     230        6014 :     appendstate->appendplans = appendplanstates;
     231        6014 :     appendstate->as_nplans = nplans;
     232             : 
     233             :     /*
     234             :      * Miscellaneous initialization
     235             :      */
     236             : 
     237        6014 :     appendstate->ps.ps_ProjInfo = NULL;
     238             : 
     239             :     /* For parallel query, this will be overridden later. */
     240        6014 :     appendstate->choose_next_subplan = choose_next_subplan_locally;
     241             : 
     242        6014 :     return appendstate;
     243             : }
     244             : 
     245             : /* ----------------------------------------------------------------
     246             :  *     ExecAppend
     247             :  *
     248             :  *      Handles iteration over multiple subplans.
     249             :  * ----------------------------------------------------------------
     250             :  */
     251             : static TupleTableSlot *
     252     1257538 : ExecAppend(PlanState *pstate)
     253             : {
     254     1257538 :     AppendState *node = castNode(AppendState, pstate);
     255             : 
     256     1257538 :     if (node->as_whichplan < 0)
     257             :     {
     258             :         /*
     259             :          * If no subplan has been chosen, we must choose one before
     260             :          * proceeding.
     261             :          */
     262       91352 :         if (node->as_whichplan == INVALID_SUBPLAN_INDEX &&
     263       45664 :             !node->choose_next_subplan(node))
     264        2228 :             return ExecClearTuple(node->ps.ps_ResultTupleSlot);
     265             : 
     266             :         /* Nothing to do if there are no matching subplans */
     267       43460 :         else if (node->as_whichplan == NO_MATCHING_SUBPLANS)
     268          24 :             return ExecClearTuple(node->ps.ps_ResultTupleSlot);
     269             :     }
     270             : 
     271             :     for (;;)
     272       46032 :     {
     273             :         PlanState  *subnode;
     274             :         TupleTableSlot *result;
     275             : 
     276     1301318 :         CHECK_FOR_INTERRUPTS();
     277             : 
     278             :         /*
     279             :          * figure out which subplan we are currently processing
     280             :          */
     281             :         Assert(node->as_whichplan >= 0 && node->as_whichplan < node->as_nplans);
     282     1301318 :         subnode = node->appendplans[node->as_whichplan];
     283             : 
     284             :         /*
     285             :          * get a tuple from the subplan
     286             :          */
     287     1301318 :         result = ExecProcNode(subnode);
     288             : 
     289     1301318 :         if (!TupIsNull(result))
     290             :         {
     291             :             /*
     292             :              * If the subplan gave us something then return it as-is. We do
     293             :              * NOT make use of the result slot that was set up in
     294             :              * ExecInitAppend; there's no need for it.
     295             :              */
     296     1212028 :             return result;
     297             :         }
     298             : 
     299             :         /* choose new subplan; if none, we're done */
     300       89290 :         if (!node->choose_next_subplan(node))
     301       43258 :             return ExecClearTuple(node->ps.ps_ResultTupleSlot);
     302             :     }
     303             : }
     304             : 
     305             : /* ----------------------------------------------------------------
     306             :  *      ExecEndAppend
     307             :  *
     308             :  *      Shuts down the subscans of the append node.
     309             :  *
     310             :  *      Returns nothing of interest.
     311             :  * ----------------------------------------------------------------
     312             :  */
     313             : void
     314        5960 : ExecEndAppend(AppendState *node)
     315             : {
     316             :     PlanState **appendplans;
     317             :     int         nplans;
     318             :     int         i;
     319             : 
     320             :     /*
     321             :      * get information from the node
     322             :      */
     323        5960 :     appendplans = node->appendplans;
     324        5960 :     nplans = node->as_nplans;
     325             : 
     326             :     /*
     327             :      * shut down each of the subscans
     328             :      */
     329       23960 :     for (i = 0; i < nplans; i++)
     330       18000 :         ExecEndNode(appendplans[i]);
     331        5960 : }
     332             : 
     333             : void
     334       41830 : ExecReScanAppend(AppendState *node)
     335             : {
     336             :     int         i;
     337             : 
     338             :     /*
     339             :      * If any PARAM_EXEC Params used in pruning expressions have changed, then
     340             :      * we'd better unset the valid subplans so that they are reselected for
     341             :      * the new parameter values.
     342             :      */
     343       44006 :     if (node->as_prune_state &&
     344        2176 :         bms_overlap(node->ps.chgParam,
     345        2176 :                     node->as_prune_state->execparamids))
     346             :     {
     347        2176 :         bms_free(node->as_valid_subplans);
     348        2176 :         node->as_valid_subplans = NULL;
     349             :     }
     350             : 
     351      140338 :     for (i = 0; i < node->as_nplans; i++)
     352             :     {
     353       98508 :         PlanState  *subnode = node->appendplans[i];
     354             : 
     355             :         /*
     356             :          * ExecReScan doesn't know about my subplans, so I have to do
     357             :          * changed-parameter signaling myself.
     358             :          */
     359       98508 :         if (node->ps.chgParam != NULL)
     360       94424 :             UpdateChangedParamSet(subnode, node->ps.chgParam);
     361             : 
     362             :         /*
     363             :          * If chgParam of subnode is not null then plan will be re-scanned by
     364             :          * first ExecProcNode.
     365             :          */
     366       98508 :         if (subnode->chgParam == NULL)
     367       40904 :             ExecReScan(subnode);
     368             :     }
     369             : 
     370             :     /* Let choose_next_subplan_* function handle setting the first subplan */
     371       41830 :     node->as_whichplan = INVALID_SUBPLAN_INDEX;
     372       41830 : }
     373             : 
     374             : /* ----------------------------------------------------------------
     375             :  *                      Parallel Append Support
     376             :  * ----------------------------------------------------------------
     377             :  */
     378             : 
     379             : /* ----------------------------------------------------------------
     380             :  *      ExecAppendEstimate
     381             :  *
     382             :  *      Compute the amount of space we'll need in the parallel
     383             :  *      query DSM, and inform pcxt->estimator about our needs.
     384             :  * ----------------------------------------------------------------
     385             :  */
     386             : void
     387         108 : ExecAppendEstimate(AppendState *node,
     388             :                    ParallelContext *pcxt)
     389             : {
     390         108 :     node->pstate_len =
     391         108 :         add_size(offsetof(ParallelAppendState, pa_finished),
     392         108 :                  sizeof(bool) * node->as_nplans);
     393             : 
     394         108 :     shm_toc_estimate_chunk(&pcxt->estimator, node->pstate_len);
     395         108 :     shm_toc_estimate_keys(&pcxt->estimator, 1);
     396         108 : }
     397             : 
     398             : 
     399             : /* ----------------------------------------------------------------
     400             :  *      ExecAppendInitializeDSM
     401             :  *
     402             :  *      Set up shared state for Parallel Append.
     403             :  * ----------------------------------------------------------------
     404             :  */
     405             : void
     406         108 : ExecAppendInitializeDSM(AppendState *node,
     407             :                         ParallelContext *pcxt)
     408             : {
     409             :     ParallelAppendState *pstate;
     410             : 
     411         108 :     pstate = shm_toc_allocate(pcxt->toc, node->pstate_len);
     412         108 :     memset(pstate, 0, node->pstate_len);
     413         108 :     LWLockInitialize(&pstate->pa_lock, LWTRANCHE_PARALLEL_APPEND);
     414         108 :     shm_toc_insert(pcxt->toc, node->ps.plan->plan_node_id, pstate);
     415             : 
     416         108 :     node->as_pstate = pstate;
     417         108 :     node->choose_next_subplan = choose_next_subplan_for_leader;
     418         108 : }
     419             : 
     420             : /* ----------------------------------------------------------------
     421             :  *      ExecAppendReInitializeDSM
     422             :  *
     423             :  *      Reset shared state before beginning a fresh scan.
     424             :  * ----------------------------------------------------------------
     425             :  */
     426             : void
     427           0 : ExecAppendReInitializeDSM(AppendState *node, ParallelContext *pcxt)
     428             : {
     429           0 :     ParallelAppendState *pstate = node->as_pstate;
     430             : 
     431           0 :     pstate->pa_next_plan = 0;
     432           0 :     memset(pstate->pa_finished, 0, sizeof(bool) * node->as_nplans);
     433           0 : }
     434             : 
     435             : /* ----------------------------------------------------------------
     436             :  *      ExecAppendInitializeWorker
     437             :  *
     438             :  *      Copy relevant information from TOC into planstate, and initialize
     439             :  *      whatever is required to choose and execute the optimal subplan.
     440             :  * ----------------------------------------------------------------
     441             :  */
     442             : void
     443         228 : ExecAppendInitializeWorker(AppendState *node, ParallelWorkerContext *pwcxt)
     444             : {
     445         228 :     node->as_pstate = shm_toc_lookup(pwcxt->toc, node->ps.plan->plan_node_id, false);
     446         228 :     node->choose_next_subplan = choose_next_subplan_for_worker;
     447         228 : }
     448             : 
     449             : /* ----------------------------------------------------------------
     450             :  *      choose_next_subplan_locally
     451             :  *
     452             :  *      Choose next subplan for a non-parallel-aware Append,
     453             :  *      returning false if there are no more.
     454             :  * ----------------------------------------------------------------
     455             :  */
     456             : static bool
     457      134024 : choose_next_subplan_locally(AppendState *node)
     458             : {
     459      134024 :     int         whichplan = node->as_whichplan;
     460             :     int         nextplan;
     461             : 
     462             :     /* We should never be called when there are no subplans */
     463             :     Assert(whichplan != NO_MATCHING_SUBPLANS);
     464             : 
     465             :     /*
     466             :      * If first call then have the bms member function choose the first valid
     467             :      * subplan by initializing whichplan to -1.  If there happen to be no
     468             :      * valid subplans then the bms member function will handle that by
     469             :      * returning a negative number which will allow us to exit returning a
     470             :      * false value.
     471             :      */
     472      134024 :     if (whichplan == INVALID_SUBPLAN_INDEX)
     473             :     {
     474       45340 :         if (node->as_valid_subplans == NULL)
     475        2296 :             node->as_valid_subplans =
     476        2296 :                 ExecFindMatchingSubPlans(node->as_prune_state);
     477             : 
     478       45340 :         whichplan = -1;
     479             :     }
     480             : 
     481             :     /* Ensure whichplan is within the expected range */
     482             :     Assert(whichplan >= -1 && whichplan <= node->as_nplans);
     483             : 
     484      134024 :     if (ScanDirectionIsForward(node->ps.state->es_direction))
     485      134012 :         nextplan = bms_next_member(node->as_valid_subplans, whichplan);
     486             :     else
     487          12 :         nextplan = bms_prev_member(node->as_valid_subplans, whichplan);
     488             : 
     489      134024 :     if (nextplan < 0)
     490       45162 :         return false;
     491             : 
     492       88862 :     node->as_whichplan = nextplan;
     493             : 
     494       88862 :     return true;
     495             : }
     496             : 
     497             : /* ----------------------------------------------------------------
     498             :  *      choose_next_subplan_for_leader
     499             :  *
     500             :  *      Try to pick a plan which doesn't commit us to doing much
     501             :  *      work locally, so that as much work as possible is done in
     502             :  *      the workers.  Cheapest subplans are at the end.
     503             :  * ----------------------------------------------------------------
     504             :  */
     505             : static bool
     506         706 : choose_next_subplan_for_leader(AppendState *node)
     507             : {
     508         706 :     ParallelAppendState *pstate = node->as_pstate;
     509             : 
     510             :     /* Backward scan is not supported by parallel-aware plans */
     511             :     Assert(ScanDirectionIsForward(node->ps.state->es_direction));
     512             : 
     513             :     /* We should never be called when there are no subplans */
     514             :     Assert(node->as_whichplan != NO_MATCHING_SUBPLANS);
     515             : 
     516         706 :     LWLockAcquire(&pstate->pa_lock, LW_EXCLUSIVE);
     517             : 
     518         706 :     if (node->as_whichplan != INVALID_SUBPLAN_INDEX)
     519             :     {
     520             :         /* Mark just-completed subplan as finished. */
     521         602 :         node->as_pstate->pa_finished[node->as_whichplan] = true;
     522             :     }
     523             :     else
     524             :     {
     525             :         /* Start with last subplan. */
     526         104 :         node->as_whichplan = node->as_nplans - 1;
     527             : 
     528             :         /*
     529             :          * If we've yet to determine the valid subplans then do so now.  If
     530             :          * run-time pruning is disabled then the valid subplans will always be
     531             :          * set to all subplans.
     532             :          */
     533         104 :         if (node->as_valid_subplans == NULL)
     534             :         {
     535           4 :             node->as_valid_subplans =
     536           4 :                 ExecFindMatchingSubPlans(node->as_prune_state);
     537             : 
     538             :             /*
     539             :              * Mark each invalid plan as finished to allow the loop below to
     540             :              * select the first valid subplan.
     541             :              */
     542           4 :             mark_invalid_subplans_as_finished(node);
     543             :         }
     544             :     }
     545             : 
     546             :     /* Loop until we find a subplan to execute. */
     547        1916 :     while (pstate->pa_finished[node->as_whichplan])
     548             :     {
     549         608 :         if (node->as_whichplan == 0)
     550             :         {
     551         104 :             pstate->pa_next_plan = INVALID_SUBPLAN_INDEX;
     552         104 :             node->as_whichplan = INVALID_SUBPLAN_INDEX;
     553         104 :             LWLockRelease(&pstate->pa_lock);
     554         104 :             return false;
     555             :         }
     556             : 
     557             :         /*
     558             :          * We needn't pay attention to as_valid_subplans here as all invalid
     559             :          * plans have been marked as finished.
     560             :          */
     561         504 :         node->as_whichplan--;
     562             :     }
     563             : 
     564             :     /* If non-partial, immediately mark as finished. */
     565         602 :     if (node->as_whichplan < node->as_first_partial_plan)
     566          82 :         node->as_pstate->pa_finished[node->as_whichplan] = true;
     567             : 
     568         602 :     LWLockRelease(&pstate->pa_lock);
     569             : 
     570         602 :     return true;
     571             : }
     572             : 
     573             : /* ----------------------------------------------------------------
     574             :  *      choose_next_subplan_for_worker
     575             :  *
     576             :  *      Choose next subplan for a parallel-aware Append, returning
     577             :  *      false if there are no more.
     578             :  *
     579             :  *      We start from the first plan and advance through the list;
     580             :  *      when we get back to the end, we loop back to the first
     581             :  *      partial plan.  This assigns the non-partial plans first in
     582             :  *      order of descending cost and then spreads out the workers
     583             :  *      as evenly as possible across the remaining partial plans.
     584             :  * ----------------------------------------------------------------
     585             :  */
     586             : static bool
     587         224 : choose_next_subplan_for_worker(AppendState *node)
     588             : {
     589         224 :     ParallelAppendState *pstate = node->as_pstate;
     590             : 
     591             :     /* Backward scan is not supported by parallel-aware plans */
     592             :     Assert(ScanDirectionIsForward(node->ps.state->es_direction));
     593             : 
     594             :     /* We should never be called when there are no subplans */
     595             :     Assert(node->as_whichplan != NO_MATCHING_SUBPLANS);
     596             : 
     597         224 :     LWLockAcquire(&pstate->pa_lock, LW_EXCLUSIVE);
     598             : 
     599             :     /* Mark just-completed subplan as finished. */
     600         224 :     if (node->as_whichplan != INVALID_SUBPLAN_INDEX)
     601           4 :         node->as_pstate->pa_finished[node->as_whichplan] = true;
     602             : 
     603             :     /*
     604             :      * If we've yet to determine the valid subplans then do so now.  If
     605             :      * run-time pruning is disabled then the valid subplans will always be set
     606             :      * to all subplans.
     607             :      */
     608         220 :     else if (node->as_valid_subplans == NULL)
     609             :     {
     610           8 :         node->as_valid_subplans =
     611           8 :             ExecFindMatchingSubPlans(node->as_prune_state);
     612           8 :         mark_invalid_subplans_as_finished(node);
     613             :     }
     614             : 
     615             :     /* If all the plans are already done, we have nothing to do */
     616         224 :     if (pstate->pa_next_plan == INVALID_SUBPLAN_INDEX)
     617             :     {
     618         216 :         LWLockRelease(&pstate->pa_lock);
     619         216 :         return false;
     620             :     }
     621             : 
     622             :     /* Save the plan from which we are starting the search. */
     623           8 :     node->as_whichplan = pstate->pa_next_plan;
     624             : 
     625             :     /* Loop until we find a valid subplan to execute. */
     626          32 :     while (pstate->pa_finished[pstate->pa_next_plan])
     627             :     {
     628             :         int         nextplan;
     629             : 
     630          20 :         nextplan = bms_next_member(node->as_valid_subplans,
     631             :                                    pstate->pa_next_plan);
     632          20 :         if (nextplan >= 0)
     633             :         {
     634             :             /* Advance to the next valid plan. */
     635          16 :             pstate->pa_next_plan = nextplan;
     636             :         }
     637           4 :         else if (node->as_whichplan > node->as_first_partial_plan)
     638             :         {
     639             :             /*
     640             :              * Try looping back to the first valid partial plan, if there is
     641             :              * one.  If there isn't, arrange to bail out below.
     642             :              */
     643           2 :             nextplan = bms_next_member(node->as_valid_subplans,
     644           2 :                                        node->as_first_partial_plan - 1);
     645           2 :             pstate->pa_next_plan =
     646           2 :                 nextplan < 0 ? node->as_whichplan : nextplan;
     647             :         }
     648             :         else
     649             :         {
     650             :             /*
     651             :              * At last plan, and either there are no partial plans or we've
     652             :              * tried them all.  Arrange to bail out.
     653             :              */
     654           2 :             pstate->pa_next_plan = node->as_whichplan;
     655             :         }
     656             : 
     657          20 :         if (pstate->pa_next_plan == node->as_whichplan)
     658             :         {
     659             :             /* We've tried everything! */
     660           4 :             pstate->pa_next_plan = INVALID_SUBPLAN_INDEX;
     661           4 :             LWLockRelease(&pstate->pa_lock);
     662           4 :             return false;
     663             :         }
     664             :     }
     665             : 
     666             :     /* Pick the plan we found, and advance pa_next_plan one more time. */
     667           4 :     node->as_whichplan = pstate->pa_next_plan;
     668           4 :     pstate->pa_next_plan = bms_next_member(node->as_valid_subplans,
     669             :                                            pstate->pa_next_plan);
     670             : 
     671             :     /*
     672             :      * If there are no more valid plans then try setting the next plan to the
     673             :      * first valid partial plan.
     674             :      */
     675           4 :     if (pstate->pa_next_plan < 0)
     676             :     {
     677           0 :         int         nextplan = bms_next_member(node->as_valid_subplans,
     678           0 :                                                node->as_first_partial_plan - 1);
     679             : 
     680           0 :         if (nextplan >= 0)
     681           0 :             pstate->pa_next_plan = nextplan;
     682             :         else
     683             :         {
     684             :             /*
     685             :              * There are no valid partial plans, and we already chose the last
     686             :              * non-partial plan; so flag that there's nothing more for our
     687             :              * fellow workers to do.
     688             :              */
     689           0 :             pstate->pa_next_plan = INVALID_SUBPLAN_INDEX;
     690             :         }
     691             :     }
     692             : 
     693             :     /* If non-partial, immediately mark as finished. */
     694           4 :     if (node->as_whichplan < node->as_first_partial_plan)
     695           2 :         node->as_pstate->pa_finished[node->as_whichplan] = true;
     696             : 
     697           4 :     LWLockRelease(&pstate->pa_lock);
     698             : 
     699           4 :     return true;
     700             : }
     701             : 
     702             : /*
     703             :  * mark_invalid_subplans_as_finished
     704             :  *      Marks the ParallelAppendState's pa_finished as true for each invalid
     705             :  *      subplan.
     706             :  *
     707             :  * This function should only be called for parallel Append with run-time
     708             :  * pruning enabled.
     709             :  */
     710             : static void
     711          12 : mark_invalid_subplans_as_finished(AppendState *node)
     712             : {
     713             :     int         i;
     714             : 
     715             :     /* Only valid to call this while in parallel Append mode */
     716             :     Assert(node->as_pstate);
     717             : 
     718             :     /* Shouldn't have been called when run-time pruning is not enabled */
     719             :     Assert(node->as_prune_state);
     720             : 
     721             :     /* Nothing to do if all plans are valid */
     722          12 :     if (bms_num_members(node->as_valid_subplans) == node->as_nplans)
     723           0 :         return;
     724             : 
     725             :     /* Mark all non-valid plans as finished */
     726          48 :     for (i = 0; i < node->as_nplans; i++)
     727             :     {
     728          36 :         if (!bms_is_member(i, node->as_valid_subplans))
     729          12 :             node->as_pstate->pa_finished[i] = true;
     730             :     }
     731             : }

Generated by: LCOV version 1.13