Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * execPartition.c
4 : * Support routines for partitioning.
5 : *
6 : * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : * IDENTIFICATION
10 : * src/backend/executor/execPartition.c
11 : *
12 : *-------------------------------------------------------------------------
13 : */
14 : #include "postgres.h"
15 :
16 : #include "access/table.h"
17 : #include "access/tableam.h"
18 : #include "catalog/partition.h"
19 : #include "catalog/pg_inherits.h"
20 : #include "catalog/pg_type.h"
21 : #include "executor/execPartition.h"
22 : #include "executor/executor.h"
23 : #include "executor/nodeModifyTable.h"
24 : #include "foreign/fdwapi.h"
25 : #include "mb/pg_wchar.h"
26 : #include "miscadmin.h"
27 : #include "nodes/makefuncs.h"
28 : #include "partitioning/partbounds.h"
29 : #include "partitioning/partdesc.h"
30 : #include "partitioning/partprune.h"
31 : #include "rewrite/rewriteManip.h"
32 : #include "utils/acl.h"
33 : #include "utils/lsyscache.h"
34 : #include "utils/partcache.h"
35 : #include "utils/rls.h"
36 : #include "utils/ruleutils.h"
37 :
38 :
39 : /*-----------------------
40 : * PartitionTupleRouting - Encapsulates all information required to
41 : * route a tuple inserted into a partitioned table to one of its leaf
42 : * partitions.
43 : *
44 : * partition_root
45 : * The partitioned table that's the target of the command.
46 : *
47 : * partition_dispatch_info
48 : * Array of 'max_dispatch' elements containing a pointer to a
49 : * PartitionDispatch object for every partitioned table touched by tuple
50 : * routing. The entry for the target partitioned table is *always*
51 : * present in the 0th element of this array. See comment for
52 : * PartitionDispatchData->indexes for details on how this array is
53 : * indexed.
54 : *
55 : * nonleaf_partitions
56 : * Array of 'max_dispatch' elements containing pointers to fake
57 : * ResultRelInfo objects for nonleaf partitions, useful for checking
58 : * the partition constraint.
59 : *
60 : * num_dispatch
61 : * The current number of items stored in the 'partition_dispatch_info'
62 : * array. Also serves as the index of the next free array element for
63 : * new PartitionDispatch objects that need to be stored.
64 : *
65 : * max_dispatch
66 : * The current allocated size of the 'partition_dispatch_info' array.
67 : *
68 : * partitions
69 : * Array of 'max_partitions' elements containing a pointer to a
70 : * ResultRelInfo for every leaf partition touched by tuple routing.
71 : * Some of these are pointers to ResultRelInfos which are borrowed out of
72 : * the owning ModifyTableState node. The remainder have been built
73 : * especially for tuple routing. See comment for
74 : * PartitionDispatchData->indexes for details on how this array is
75 : * indexed.
76 : *
77 : * is_borrowed_rel
78 : * Array of 'max_partitions' booleans recording whether a given entry
79 : * in 'partitions' is a ResultRelInfo pointer borrowed from the owning
80 : * ModifyTableState node, rather than being built here.
81 : *
82 : * num_partitions
83 : * The current number of items stored in the 'partitions' array. Also
84 : * serves as the index of the next free array element for new
85 : * ResultRelInfo objects that need to be stored.
86 : *
87 : * max_partitions
88 : * The current allocated size of the 'partitions' array.
89 : *
90 : * memcxt
91 : * Memory context used to allocate subsidiary structs.
92 : *-----------------------
93 : */
94 : struct PartitionTupleRouting
95 : {
96 : Relation partition_root;
97 : PartitionDispatch *partition_dispatch_info;
98 : ResultRelInfo **nonleaf_partitions;
99 : int num_dispatch;
100 : int max_dispatch;
101 : ResultRelInfo **partitions;
102 : bool *is_borrowed_rel;
103 : int num_partitions;
104 : int max_partitions;
105 : MemoryContext memcxt;
106 : };
107 :
108 : /*-----------------------
109 : * PartitionDispatch - information about one partitioned table in a partition
110 : * hierarchy required to route a tuple to any of its partitions. A
111 : * PartitionDispatch is always encapsulated inside a PartitionTupleRouting
112 : * struct and stored inside its 'partition_dispatch_info' array.
113 : *
114 : * reldesc
115 : * Relation descriptor of the table
116 : *
117 : * key
118 : * Partition key information of the table
119 : *
120 : * keystate
121 : * Execution state required for expressions in the partition key
122 : *
123 : * partdesc
124 : * Partition descriptor of the table
125 : *
126 : * tupslot
127 : * A standalone TupleTableSlot initialized with this table's tuple
128 : * descriptor, or NULL if no tuple conversion between the parent is
129 : * required.
130 : *
131 : * tupmap
132 : * TupleConversionMap to convert from the parent's rowtype to this table's
133 : * rowtype (when extracting the partition key of a tuple just before
134 : * routing it through this table). A NULL value is stored if no tuple
135 : * conversion is required.
136 : *
137 : * indexes
138 : * Array of partdesc->nparts elements. For leaf partitions the index
139 : * corresponds to the partition's ResultRelInfo in the encapsulating
140 : * PartitionTupleRouting's partitions array. For partitioned partitions,
141 : * the index corresponds to the PartitionDispatch for it in its
142 : * partition_dispatch_info array. -1 indicates we've not yet allocated
143 : * anything in PartitionTupleRouting for the partition.
144 : *-----------------------
145 : */
146 : typedef struct PartitionDispatchData
147 : {
148 : Relation reldesc;
149 : PartitionKey key;
150 : List *keystate; /* list of ExprState */
151 : PartitionDesc partdesc;
152 : TupleTableSlot *tupslot;
153 : AttrMap *tupmap;
154 : int indexes[FLEXIBLE_ARRAY_MEMBER];
155 : } PartitionDispatchData;
156 :
157 :
158 : static ResultRelInfo *ExecInitPartitionInfo(ModifyTableState *mtstate,
159 : EState *estate, PartitionTupleRouting *proute,
160 : PartitionDispatch dispatch,
161 : ResultRelInfo *rootResultRelInfo,
162 : int partidx);
163 : static void ExecInitRoutingInfo(ModifyTableState *mtstate,
164 : EState *estate,
165 : PartitionTupleRouting *proute,
166 : PartitionDispatch dispatch,
167 : ResultRelInfo *partRelInfo,
168 : int partidx,
169 : bool is_borrowed_rel);
170 : static PartitionDispatch ExecInitPartitionDispatchInfo(EState *estate,
171 : PartitionTupleRouting *proute,
172 : Oid partoid, PartitionDispatch parent_pd,
173 : int partidx, ResultRelInfo *rootResultRelInfo);
174 : static void FormPartitionKeyDatum(PartitionDispatch pd,
175 : TupleTableSlot *slot,
176 : EState *estate,
177 : Datum *values,
178 : bool *isnull);
179 : static int get_partition_for_tuple(PartitionDispatch pd, Datum *values,
180 : bool *isnull);
181 : static char *ExecBuildSlotPartitionKeyDescription(Relation rel,
182 : Datum *values,
183 : bool *isnull,
184 : int maxfieldlen);
185 : static List *adjust_partition_colnos(List *colnos, ResultRelInfo *leaf_part_rri);
186 : static List *adjust_partition_colnos_using_map(List *colnos, AttrMap *attrMap);
187 : static PartitionPruneState *CreatePartitionPruneState(PlanState *planstate,
188 : PartitionPruneInfo *pruneinfo);
189 : static void InitPartitionPruneContext(PartitionPruneContext *context,
190 : List *pruning_steps,
191 : PartitionDesc partdesc,
192 : PartitionKey partkey,
193 : PlanState *planstate,
194 : ExprContext *econtext);
195 : static void PartitionPruneFixSubPlanMap(PartitionPruneState *prunestate,
196 : Bitmapset *initially_valid_subplans,
197 : int n_total_subplans);
198 : static void find_matching_subplans_recurse(PartitionPruningData *prunedata,
199 : PartitionedRelPruningData *pprune,
200 : bool initial_prune,
201 : Bitmapset **validsubplans);
202 :
203 :
204 : /*
205 : * ExecSetupPartitionTupleRouting - sets up information needed during
206 : * tuple routing for partitioned tables, encapsulates it in
207 : * PartitionTupleRouting, and returns it.
208 : *
209 : * Callers must use the returned PartitionTupleRouting during calls to
210 : * ExecFindPartition(). The actual ResultRelInfo for a partition is only
211 : * allocated when the partition is found for the first time.
212 : *
213 : * The current memory context is used to allocate this struct and all
214 : * subsidiary structs that will be allocated from it later on. Typically
215 : * it should be estate->es_query_cxt.
216 : */
217 : PartitionTupleRouting *
218 6336 : ExecSetupPartitionTupleRouting(EState *estate, Relation rel)
219 : {
220 : PartitionTupleRouting *proute;
221 :
222 : /*
223 : * Here we attempt to expend as little effort as possible in setting up
224 : * the PartitionTupleRouting. Each partition's ResultRelInfo is built on
225 : * demand, only when we actually need to route a tuple to that partition.
226 : * The reason for this is that a common case is for INSERT to insert a
227 : * single tuple into a partitioned table and this must be fast.
228 : */
229 6336 : proute = (PartitionTupleRouting *) palloc0(sizeof(PartitionTupleRouting));
230 6336 : proute->partition_root = rel;
231 6336 : proute->memcxt = CurrentMemoryContext;
232 : /* Rest of members initialized by zeroing */
233 :
234 : /*
235 : * Initialize this table's PartitionDispatch object. Here we pass in the
236 : * parent as NULL as we don't need to care about any parent of the target
237 : * partitioned table.
238 : */
239 6336 : ExecInitPartitionDispatchInfo(estate, proute, RelationGetRelid(rel),
240 : NULL, 0, NULL);
241 :
242 6336 : return proute;
243 : }
244 :
245 : /*
246 : * ExecFindPartition -- Return the ResultRelInfo for the leaf partition that
247 : * the tuple contained in *slot should belong to.
248 : *
249 : * If the partition's ResultRelInfo does not yet exist in 'proute' then we set
250 : * one up or reuse one from mtstate's resultRelInfo array. When reusing a
251 : * ResultRelInfo from the mtstate we verify that the relation is a valid
252 : * target for INSERTs and initialize tuple routing information.
253 : *
254 : * rootResultRelInfo is the relation named in the query.
255 : *
256 : * estate must be non-NULL; we'll need it to compute any expressions in the
257 : * partition keys. Also, its per-tuple contexts are used as evaluation
258 : * scratch space.
259 : *
260 : * If no leaf partition is found, this routine errors out with the appropriate
261 : * error message. An error may also be raised if the found target partition
262 : * is not a valid target for an INSERT.
263 : */
264 : ResultRelInfo *
265 935570 : ExecFindPartition(ModifyTableState *mtstate,
266 : ResultRelInfo *rootResultRelInfo,
267 : PartitionTupleRouting *proute,
268 : TupleTableSlot *slot, EState *estate)
269 : {
270 935570 : PartitionDispatch *pd = proute->partition_dispatch_info;
271 : Datum values[PARTITION_MAX_KEYS];
272 : bool isnull[PARTITION_MAX_KEYS];
273 : Relation rel;
274 : PartitionDispatch dispatch;
275 : PartitionDesc partdesc;
276 935570 : ExprContext *ecxt = GetPerTupleExprContext(estate);
277 935570 : TupleTableSlot *ecxt_scantuple_saved = ecxt->ecxt_scantuple;
278 935570 : TupleTableSlot *rootslot = slot;
279 935570 : TupleTableSlot *myslot = NULL;
280 : MemoryContext oldcxt;
281 935570 : ResultRelInfo *rri = NULL;
282 :
283 : /* use per-tuple context here to avoid leaking memory */
284 935570 : oldcxt = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
285 :
286 : /*
287 : * First check the root table's partition constraint, if any. No point in
288 : * routing the tuple if it doesn't belong in the root table itself.
289 : */
290 935570 : if (rootResultRelInfo->ri_RelationDesc->rd_rel->relispartition)
291 4496 : ExecPartitionCheck(rootResultRelInfo, slot, estate, true);
292 :
293 : /* start with the root partitioned table */
294 935538 : dispatch = pd[0];
295 1983378 : while (dispatch != NULL)
296 : {
297 1048008 : int partidx = -1;
298 : bool is_leaf;
299 :
300 1048008 : CHECK_FOR_INTERRUPTS();
301 :
302 1048008 : rel = dispatch->reldesc;
303 1048008 : partdesc = dispatch->partdesc;
304 :
305 : /*
306 : * Extract partition key from tuple. Expression evaluation machinery
307 : * that FormPartitionKeyDatum() invokes expects ecxt_scantuple to
308 : * point to the correct tuple slot. The slot might have changed from
309 : * what was used for the parent table if the table of the current
310 : * partitioning level has different tuple descriptor from the parent.
311 : * So update ecxt_scantuple accordingly.
312 : */
313 1048008 : ecxt->ecxt_scantuple = slot;
314 1048008 : FormPartitionKeyDatum(dispatch, slot, estate, values, isnull);
315 :
316 : /*
317 : * If this partitioned table has no partitions or no partition for
318 : * these values, error out.
319 : */
320 2095974 : if (partdesc->nparts == 0 ||
321 1047966 : (partidx = get_partition_for_tuple(dispatch, values, isnull)) < 0)
322 : {
323 : char *val_desc;
324 :
325 148 : val_desc = ExecBuildSlotPartitionKeyDescription(rel,
326 : values, isnull, 64);
327 : Assert(OidIsValid(RelationGetRelid(rel)));
328 148 : ereport(ERROR,
329 : (errcode(ERRCODE_CHECK_VIOLATION),
330 : errmsg("no partition of relation \"%s\" found for row",
331 : RelationGetRelationName(rel)),
332 : val_desc ?
333 : errdetail("Partition key of the failing row contains %s.",
334 : val_desc) : 0,
335 : errtable(rel)));
336 : }
337 :
338 1047860 : is_leaf = partdesc->is_leaf[partidx];
339 1047860 : if (is_leaf)
340 : {
341 : /*
342 : * We've reached the leaf -- hurray, we're done. Look to see if
343 : * we've already got a ResultRelInfo for this partition.
344 : */
345 935388 : if (likely(dispatch->indexes[partidx] >= 0))
346 : {
347 : /* ResultRelInfo already built */
348 : Assert(dispatch->indexes[partidx] < proute->num_partitions);
349 927396 : rri = proute->partitions[dispatch->indexes[partidx]];
350 : }
351 : else
352 : {
353 : /*
354 : * If the partition is known in the owning ModifyTableState
355 : * node, we can re-use that ResultRelInfo instead of creating
356 : * a new one with ExecInitPartitionInfo().
357 : */
358 7992 : rri = ExecLookupResultRelByOid(mtstate,
359 7992 : partdesc->oids[partidx],
360 : true, false);
361 7992 : if (rri)
362 : {
363 : /* Verify this ResultRelInfo allows INSERTs */
364 386 : CheckValidResultRel(rri, CMD_INSERT);
365 :
366 : /*
367 : * Initialize information needed to insert this and
368 : * subsequent tuples routed to this partition.
369 : */
370 386 : ExecInitRoutingInfo(mtstate, estate, proute, dispatch,
371 : rri, partidx, true);
372 : }
373 : else
374 : {
375 : /* We need to create a new one. */
376 7606 : rri = ExecInitPartitionInfo(mtstate, estate, proute,
377 : dispatch,
378 : rootResultRelInfo, partidx);
379 : }
380 : }
381 : Assert(rri != NULL);
382 :
383 : /* Signal to terminate the loop */
384 935370 : dispatch = NULL;
385 : }
386 : else
387 : {
388 : /*
389 : * Partition is a sub-partitioned table; get the PartitionDispatch
390 : */
391 112472 : if (likely(dispatch->indexes[partidx] >= 0))
392 : {
393 : /* Already built. */
394 : Assert(dispatch->indexes[partidx] < proute->num_dispatch);
395 :
396 111332 : rri = proute->nonleaf_partitions[dispatch->indexes[partidx]];
397 :
398 : /*
399 : * Move down to the next partition level and search again
400 : * until we find a leaf partition that matches this tuple
401 : */
402 111332 : dispatch = pd[dispatch->indexes[partidx]];
403 : }
404 : else
405 : {
406 : /* Not yet built. Do that now. */
407 : PartitionDispatch subdispatch;
408 :
409 : /*
410 : * Create the new PartitionDispatch. We pass the current one
411 : * in as the parent PartitionDispatch
412 : */
413 1140 : subdispatch = ExecInitPartitionDispatchInfo(estate,
414 : proute,
415 1140 : partdesc->oids[partidx],
416 : dispatch, partidx,
417 : mtstate->rootResultRelInfo);
418 : Assert(dispatch->indexes[partidx] >= 0 &&
419 : dispatch->indexes[partidx] < proute->num_dispatch);
420 :
421 1140 : rri = proute->nonleaf_partitions[dispatch->indexes[partidx]];
422 1140 : dispatch = subdispatch;
423 : }
424 :
425 : /*
426 : * Convert the tuple to the new parent's layout, if different from
427 : * the previous parent.
428 : */
429 112472 : if (dispatch->tupslot)
430 : {
431 61680 : AttrMap *map = dispatch->tupmap;
432 61680 : TupleTableSlot *tempslot = myslot;
433 :
434 61680 : myslot = dispatch->tupslot;
435 61680 : slot = execute_attr_map_slot(map, slot, myslot);
436 :
437 61680 : if (tempslot != NULL)
438 294 : ExecClearTuple(tempslot);
439 : }
440 : }
441 :
442 : /*
443 : * If this partition is the default one, we must check its partition
444 : * constraint now, which may have changed concurrently due to
445 : * partitions being added to the parent.
446 : *
447 : * (We do this here, and do not rely on ExecInsert doing it, because
448 : * we don't want to miss doing it for non-leaf partitions.)
449 : */
450 1047842 : if (partidx == partdesc->boundinfo->default_index)
451 : {
452 : /*
453 : * The tuple must match the partition's layout for the constraint
454 : * expression to be evaluated successfully. If the partition is
455 : * sub-partitioned, that would already be the case due to the code
456 : * above, but for a leaf partition the tuple still matches the
457 : * parent's layout.
458 : *
459 : * Note that we have a map to convert from root to current
460 : * partition, but not from immediate parent to current partition.
461 : * So if we have to convert, do it from the root slot; if not, use
462 : * the root slot as-is.
463 : */
464 522 : if (is_leaf)
465 : {
466 478 : TupleConversionMap *map = ExecGetRootToChildMap(rri, estate);
467 :
468 478 : if (map)
469 114 : slot = execute_attr_map_slot(map->attrMap, rootslot,
470 : rri->ri_PartitionTupleSlot);
471 : else
472 364 : slot = rootslot;
473 : }
474 :
475 522 : ExecPartitionCheck(rri, slot, estate, true);
476 : }
477 : }
478 :
479 : /* Release the tuple in the lowest parent's dedicated slot. */
480 935370 : if (myslot != NULL)
481 61354 : ExecClearTuple(myslot);
482 : /* and restore ecxt's scantuple */
483 935370 : ecxt->ecxt_scantuple = ecxt_scantuple_saved;
484 935370 : MemoryContextSwitchTo(oldcxt);
485 :
486 935370 : return rri;
487 : }
488 :
489 : /*
490 : * ExecInitPartitionInfo
491 : * Lock the partition and initialize ResultRelInfo. Also setup other
492 : * information for the partition and store it in the next empty slot in
493 : * the proute->partitions array.
494 : *
495 : * Returns the ResultRelInfo
496 : */
497 : static ResultRelInfo *
498 7606 : ExecInitPartitionInfo(ModifyTableState *mtstate, EState *estate,
499 : PartitionTupleRouting *proute,
500 : PartitionDispatch dispatch,
501 : ResultRelInfo *rootResultRelInfo,
502 : int partidx)
503 : {
504 7606 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
505 7606 : Oid partOid = dispatch->partdesc->oids[partidx];
506 : Relation partrel;
507 7606 : int firstVarno = mtstate->resultRelInfo[0].ri_RangeTableIndex;
508 7606 : Relation firstResultRel = mtstate->resultRelInfo[0].ri_RelationDesc;
509 : ResultRelInfo *leaf_part_rri;
510 : MemoryContext oldcxt;
511 7606 : AttrMap *part_attmap = NULL;
512 : bool found_whole_row;
513 :
514 7606 : oldcxt = MemoryContextSwitchTo(proute->memcxt);
515 :
516 7606 : partrel = table_open(partOid, RowExclusiveLock);
517 :
518 7606 : leaf_part_rri = makeNode(ResultRelInfo);
519 7606 : InitResultRelInfo(leaf_part_rri,
520 : partrel,
521 : 0,
522 : rootResultRelInfo,
523 : estate->es_instrument);
524 :
525 : /*
526 : * Verify result relation is a valid target for an INSERT. An UPDATE of a
527 : * partition-key becomes a DELETE+INSERT operation, so this check is still
528 : * required when the operation is CMD_UPDATE.
529 : */
530 7606 : CheckValidResultRel(leaf_part_rri, CMD_INSERT);
531 :
532 : /*
533 : * Open partition indices. The user may have asked to check for conflicts
534 : * within this leaf partition and do "nothing" instead of throwing an
535 : * error. Be prepared in that case by initializing the index information
536 : * needed by ExecInsert() to perform speculative insertions.
537 : */
538 7600 : if (partrel->rd_rel->relhasindex &&
539 1594 : leaf_part_rri->ri_IndexRelationDescs == NULL)
540 1594 : ExecOpenIndices(leaf_part_rri,
541 3018 : (node != NULL &&
542 1424 : node->onConflictAction != ONCONFLICT_NONE));
543 :
544 : /*
545 : * Build WITH CHECK OPTION constraints for the partition. Note that we
546 : * didn't build the withCheckOptionList for partitions within the planner,
547 : * but simple translation of varattnos will suffice. This only occurs for
548 : * the INSERT case or in the case of UPDATE/MERGE tuple routing where we
549 : * didn't find a result rel to reuse.
550 : */
551 7600 : if (node && node->withCheckOptionLists != NIL)
552 : {
553 : List *wcoList;
554 96 : List *wcoExprs = NIL;
555 : ListCell *ll;
556 :
557 : /*
558 : * In the case of INSERT on a partitioned table, there is only one
559 : * plan. Likewise, there is only one WCO list, not one per partition.
560 : * For UPDATE/MERGE, there are as many WCO lists as there are plans.
561 : */
562 : Assert((node->operation == CMD_INSERT &&
563 : list_length(node->withCheckOptionLists) == 1 &&
564 : list_length(node->resultRelations) == 1) ||
565 : (node->operation == CMD_UPDATE &&
566 : list_length(node->withCheckOptionLists) ==
567 : list_length(node->resultRelations)) ||
568 : (node->operation == CMD_MERGE &&
569 : list_length(node->withCheckOptionLists) ==
570 : list_length(node->resultRelations)));
571 :
572 : /*
573 : * Use the WCO list of the first plan as a reference to calculate
574 : * attno's for the WCO list of this partition. In the INSERT case,
575 : * that refers to the root partitioned table, whereas in the UPDATE
576 : * tuple routing case, that refers to the first partition in the
577 : * mtstate->resultRelInfo array. In any case, both that relation and
578 : * this partition should have the same columns, so we should be able
579 : * to map attributes successfully.
580 : */
581 96 : wcoList = linitial(node->withCheckOptionLists);
582 :
583 : /*
584 : * Convert Vars in it to contain this partition's attribute numbers.
585 : */
586 : part_attmap =
587 96 : build_attrmap_by_name(RelationGetDescr(partrel),
588 : RelationGetDescr(firstResultRel),
589 : false);
590 : wcoList = (List *)
591 96 : map_variable_attnos((Node *) wcoList,
592 : firstVarno, 0,
593 : part_attmap,
594 96 : RelationGetForm(partrel)->reltype,
595 : &found_whole_row);
596 : /* We ignore the value of found_whole_row. */
597 :
598 270 : foreach(ll, wcoList)
599 : {
600 174 : WithCheckOption *wco = lfirst_node(WithCheckOption, ll);
601 174 : ExprState *wcoExpr = ExecInitQual(castNode(List, wco->qual),
602 : &mtstate->ps);
603 :
604 174 : wcoExprs = lappend(wcoExprs, wcoExpr);
605 : }
606 :
607 96 : leaf_part_rri->ri_WithCheckOptions = wcoList;
608 96 : leaf_part_rri->ri_WithCheckOptionExprs = wcoExprs;
609 : }
610 :
611 : /*
612 : * Build the RETURNING projection for the partition. Note that we didn't
613 : * build the returningList for partitions within the planner, but simple
614 : * translation of varattnos will suffice. This only occurs for the INSERT
615 : * case or in the case of UPDATE tuple routing where we didn't find a
616 : * result rel to reuse.
617 : */
618 7600 : if (node && node->returningLists != NIL)
619 : {
620 : TupleTableSlot *slot;
621 : ExprContext *econtext;
622 : List *returningList;
623 :
624 : /* See the comment above for WCO lists. */
625 : /* (except no RETURNING support for MERGE yet) */
626 : Assert((node->operation == CMD_INSERT &&
627 : list_length(node->returningLists) == 1 &&
628 : list_length(node->resultRelations) == 1) ||
629 : (node->operation == CMD_UPDATE &&
630 : list_length(node->returningLists) ==
631 : list_length(node->resultRelations)));
632 :
633 : /*
634 : * Use the RETURNING list of the first plan as a reference to
635 : * calculate attno's for the RETURNING list of this partition. See
636 : * the comment above for WCO lists for more details on why this is
637 : * okay.
638 : */
639 146 : returningList = linitial(node->returningLists);
640 :
641 : /*
642 : * Convert Vars in it to contain this partition's attribute numbers.
643 : */
644 146 : if (part_attmap == NULL)
645 : part_attmap =
646 146 : build_attrmap_by_name(RelationGetDescr(partrel),
647 : RelationGetDescr(firstResultRel),
648 : false);
649 : returningList = (List *)
650 146 : map_variable_attnos((Node *) returningList,
651 : firstVarno, 0,
652 : part_attmap,
653 146 : RelationGetForm(partrel)->reltype,
654 : &found_whole_row);
655 : /* We ignore the value of found_whole_row. */
656 :
657 146 : leaf_part_rri->ri_returningList = returningList;
658 :
659 : /*
660 : * Initialize the projection itself.
661 : *
662 : * Use the slot and the expression context that would have been set up
663 : * in ExecInitModifyTable() for projection's output.
664 : */
665 : Assert(mtstate->ps.ps_ResultTupleSlot != NULL);
666 146 : slot = mtstate->ps.ps_ResultTupleSlot;
667 : Assert(mtstate->ps.ps_ExprContext != NULL);
668 146 : econtext = mtstate->ps.ps_ExprContext;
669 146 : leaf_part_rri->ri_projectReturning =
670 146 : ExecBuildProjectionInfo(returningList, econtext, slot,
671 : &mtstate->ps, RelationGetDescr(partrel));
672 : }
673 :
674 : /* Set up information needed for routing tuples to the partition. */
675 7600 : ExecInitRoutingInfo(mtstate, estate, proute, dispatch,
676 : leaf_part_rri, partidx, false);
677 :
678 : /*
679 : * If there is an ON CONFLICT clause, initialize state for it.
680 : */
681 7600 : if (node && node->onConflictAction != ONCONFLICT_NONE)
682 : {
683 222 : TupleDesc partrelDesc = RelationGetDescr(partrel);
684 222 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
685 : ListCell *lc;
686 222 : List *arbiterIndexes = NIL;
687 :
688 : /*
689 : * If there is a list of arbiter indexes, map it to a list of indexes
690 : * in the partition. We do that by scanning the partition's index
691 : * list and searching for ancestry relationships to each index in the
692 : * ancestor table.
693 : */
694 222 : if (rootResultRelInfo->ri_onConflictArbiterIndexes != NIL)
695 : {
696 : List *childIdxs;
697 :
698 172 : childIdxs = RelationGetIndexList(leaf_part_rri->ri_RelationDesc);
699 :
700 356 : foreach(lc, childIdxs)
701 : {
702 184 : Oid childIdx = lfirst_oid(lc);
703 : List *ancestors;
704 : ListCell *lc2;
705 :
706 184 : ancestors = get_partition_ancestors(childIdx);
707 368 : foreach(lc2, rootResultRelInfo->ri_onConflictArbiterIndexes)
708 : {
709 184 : if (list_member_oid(ancestors, lfirst_oid(lc2)))
710 172 : arbiterIndexes = lappend_oid(arbiterIndexes, childIdx);
711 : }
712 184 : list_free(ancestors);
713 : }
714 : }
715 :
716 : /*
717 : * If the resulting lists are of inequal length, something is wrong.
718 : * (This shouldn't happen, since arbiter index selection should not
719 : * pick up an invalid index.)
720 : */
721 444 : if (list_length(rootResultRelInfo->ri_onConflictArbiterIndexes) !=
722 222 : list_length(arbiterIndexes))
723 0 : elog(ERROR, "invalid arbiter index list");
724 222 : leaf_part_rri->ri_onConflictArbiterIndexes = arbiterIndexes;
725 :
726 : /*
727 : * In the DO UPDATE case, we have some more state to initialize.
728 : */
729 222 : if (node->onConflictAction == ONCONFLICT_UPDATE)
730 : {
731 166 : OnConflictSetState *onconfl = makeNode(OnConflictSetState);
732 : TupleConversionMap *map;
733 :
734 166 : map = ExecGetRootToChildMap(leaf_part_rri, estate);
735 :
736 : Assert(node->onConflictSet != NIL);
737 : Assert(rootResultRelInfo->ri_onConflict != NULL);
738 :
739 166 : leaf_part_rri->ri_onConflict = onconfl;
740 :
741 : /*
742 : * Need a separate existing slot for each partition, as the
743 : * partition could be of a different AM, even if the tuple
744 : * descriptors match.
745 : */
746 166 : onconfl->oc_Existing =
747 166 : table_slot_create(leaf_part_rri->ri_RelationDesc,
748 166 : &mtstate->ps.state->es_tupleTable);
749 :
750 : /*
751 : * If the partition's tuple descriptor matches exactly the root
752 : * parent (the common case), we can re-use most of the parent's ON
753 : * CONFLICT SET state, skipping a bunch of work. Otherwise, we
754 : * need to create state specific to this partition.
755 : */
756 166 : if (map == NULL)
757 : {
758 : /*
759 : * It's safe to reuse these from the partition root, as we
760 : * only process one tuple at a time (therefore we won't
761 : * overwrite needed data in slots), and the results of
762 : * projections are independent of the underlying storage.
763 : * Projections and where clauses themselves don't store state
764 : * / are independent of the underlying storage.
765 : */
766 90 : onconfl->oc_ProjSlot =
767 90 : rootResultRelInfo->ri_onConflict->oc_ProjSlot;
768 90 : onconfl->oc_ProjInfo =
769 90 : rootResultRelInfo->ri_onConflict->oc_ProjInfo;
770 90 : onconfl->oc_WhereClause =
771 90 : rootResultRelInfo->ri_onConflict->oc_WhereClause;
772 : }
773 : else
774 : {
775 : List *onconflset;
776 : List *onconflcols;
777 :
778 : /*
779 : * Translate expressions in onConflictSet to account for
780 : * different attribute numbers. For that, map partition
781 : * varattnos twice: first to catch the EXCLUDED
782 : * pseudo-relation (INNER_VAR), and second to handle the main
783 : * target relation (firstVarno).
784 : */
785 76 : onconflset = copyObject(node->onConflictSet);
786 76 : if (part_attmap == NULL)
787 : part_attmap =
788 70 : build_attrmap_by_name(RelationGetDescr(partrel),
789 : RelationGetDescr(firstResultRel),
790 : false);
791 : onconflset = (List *)
792 76 : map_variable_attnos((Node *) onconflset,
793 : INNER_VAR, 0,
794 : part_attmap,
795 76 : RelationGetForm(partrel)->reltype,
796 : &found_whole_row);
797 : /* We ignore the value of found_whole_row. */
798 : onconflset = (List *)
799 76 : map_variable_attnos((Node *) onconflset,
800 : firstVarno, 0,
801 : part_attmap,
802 76 : RelationGetForm(partrel)->reltype,
803 : &found_whole_row);
804 : /* We ignore the value of found_whole_row. */
805 :
806 : /* Finally, adjust the target colnos to match the partition. */
807 76 : onconflcols = adjust_partition_colnos(node->onConflictCols,
808 : leaf_part_rri);
809 :
810 : /* create the tuple slot for the UPDATE SET projection */
811 76 : onconfl->oc_ProjSlot =
812 76 : table_slot_create(partrel,
813 76 : &mtstate->ps.state->es_tupleTable);
814 :
815 : /* build UPDATE SET projection state */
816 76 : onconfl->oc_ProjInfo =
817 76 : ExecBuildUpdateProjection(onconflset,
818 : true,
819 : onconflcols,
820 : partrelDesc,
821 : econtext,
822 : onconfl->oc_ProjSlot,
823 : &mtstate->ps);
824 :
825 : /*
826 : * If there is a WHERE clause, initialize state where it will
827 : * be evaluated, mapping the attribute numbers appropriately.
828 : * As with onConflictSet, we need to map partition varattnos
829 : * to the partition's tupdesc.
830 : */
831 76 : if (node->onConflictWhere)
832 : {
833 : List *clause;
834 :
835 30 : clause = copyObject((List *) node->onConflictWhere);
836 : clause = (List *)
837 30 : map_variable_attnos((Node *) clause,
838 : INNER_VAR, 0,
839 : part_attmap,
840 30 : RelationGetForm(partrel)->reltype,
841 : &found_whole_row);
842 : /* We ignore the value of found_whole_row. */
843 : clause = (List *)
844 30 : map_variable_attnos((Node *) clause,
845 : firstVarno, 0,
846 : part_attmap,
847 30 : RelationGetForm(partrel)->reltype,
848 : &found_whole_row);
849 : /* We ignore the value of found_whole_row. */
850 30 : onconfl->oc_WhereClause =
851 30 : ExecInitQual((List *) clause, &mtstate->ps);
852 : }
853 : }
854 : }
855 : }
856 :
857 : /*
858 : * Since we've just initialized this ResultRelInfo, it's not in any list
859 : * attached to the estate as yet. Add it, so that it can be found later.
860 : *
861 : * Note that the entries in this list appear in no predetermined order,
862 : * because partition result rels are initialized as and when they're
863 : * needed.
864 : */
865 7600 : MemoryContextSwitchTo(estate->es_query_cxt);
866 7600 : estate->es_tuple_routing_result_relations =
867 7600 : lappend(estate->es_tuple_routing_result_relations,
868 : leaf_part_rri);
869 :
870 : /*
871 : * Initialize information about this partition that's needed to handle
872 : * MERGE. We take the "first" result relation's mergeActionList as
873 : * reference and make copy for this relation, converting stuff that
874 : * references attribute numbers to match this relation's.
875 : *
876 : * This duplicates much of the logic in ExecInitMerge(), so something
877 : * changes there, look here too.
878 : */
879 7600 : if (node && node->operation == CMD_MERGE)
880 : {
881 44 : List *firstMergeActionList = linitial(node->mergeActionLists);
882 : ListCell *lc;
883 44 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
884 :
885 44 : if (part_attmap == NULL)
886 : part_attmap =
887 38 : build_attrmap_by_name(RelationGetDescr(partrel),
888 : RelationGetDescr(firstResultRel),
889 : false);
890 :
891 44 : if (unlikely(!leaf_part_rri->ri_projectNewInfoValid))
892 44 : ExecInitMergeTupleSlots(mtstate, leaf_part_rri);
893 :
894 138 : foreach(lc, firstMergeActionList)
895 : {
896 : /* Make a copy for this relation to be safe. */
897 94 : MergeAction *action = copyObject(lfirst(lc));
898 : MergeActionState *action_state;
899 : List **list;
900 :
901 : /* Generate the action's state for this relation */
902 94 : action_state = makeNode(MergeActionState);
903 94 : action_state->mas_action = action;
904 :
905 : /* And put the action in the appropriate list */
906 94 : if (action->matched)
907 56 : list = &leaf_part_rri->ri_matchedMergeAction;
908 : else
909 38 : list = &leaf_part_rri->ri_notMatchedMergeAction;
910 94 : *list = lappend(*list, action_state);
911 :
912 94 : switch (action->commandType)
913 : {
914 38 : case CMD_INSERT:
915 :
916 : /*
917 : * ExecCheckPlanOutput() already done on the targetlist
918 : * when "first" result relation initialized and it is same
919 : * for all result relations.
920 : */
921 38 : action_state->mas_proj =
922 38 : ExecBuildProjectionInfo(action->targetList, econtext,
923 : leaf_part_rri->ri_newTupleSlot,
924 : &mtstate->ps,
925 : RelationGetDescr(partrel));
926 38 : break;
927 44 : case CMD_UPDATE:
928 :
929 : /*
930 : * Convert updateColnos from "first" result relation
931 : * attribute numbers to this result rel's.
932 : */
933 44 : if (part_attmap)
934 44 : action->updateColnos =
935 44 : adjust_partition_colnos_using_map(action->updateColnos,
936 : part_attmap);
937 44 : action_state->mas_proj =
938 44 : ExecBuildUpdateProjection(action->targetList,
939 : true,
940 : action->updateColnos,
941 44 : RelationGetDescr(leaf_part_rri->ri_RelationDesc),
942 : econtext,
943 : leaf_part_rri->ri_newTupleSlot,
944 : NULL);
945 44 : break;
946 12 : case CMD_DELETE:
947 12 : break;
948 :
949 0 : default:
950 0 : elog(ERROR, "unknown action in MERGE WHEN clause");
951 : }
952 :
953 : /* found_whole_row intentionally ignored. */
954 94 : action->qual =
955 94 : map_variable_attnos(action->qual,
956 : firstVarno, 0,
957 : part_attmap,
958 94 : RelationGetForm(partrel)->reltype,
959 : &found_whole_row);
960 94 : action_state->mas_whenqual =
961 94 : ExecInitQual((List *) action->qual, &mtstate->ps);
962 : }
963 : }
964 7600 : MemoryContextSwitchTo(oldcxt);
965 :
966 7600 : return leaf_part_rri;
967 : }
968 :
969 : /*
970 : * ExecInitRoutingInfo
971 : * Set up information needed for translating tuples between root
972 : * partitioned table format and partition format, and keep track of it
973 : * in PartitionTupleRouting.
974 : */
975 : static void
976 7986 : ExecInitRoutingInfo(ModifyTableState *mtstate,
977 : EState *estate,
978 : PartitionTupleRouting *proute,
979 : PartitionDispatch dispatch,
980 : ResultRelInfo *partRelInfo,
981 : int partidx,
982 : bool is_borrowed_rel)
983 : {
984 : MemoryContext oldcxt;
985 : int rri_index;
986 :
987 7986 : oldcxt = MemoryContextSwitchTo(proute->memcxt);
988 :
989 : /*
990 : * Set up tuple conversion between root parent and the partition if the
991 : * two have different rowtypes. If conversion is indeed required, also
992 : * initialize a slot dedicated to storing this partition's converted
993 : * tuples. Various operations that are applied to tuples after routing,
994 : * such as checking constraints, will refer to this slot.
995 : */
996 7986 : if (ExecGetRootToChildMap(partRelInfo, estate) != NULL)
997 : {
998 1200 : Relation partrel = partRelInfo->ri_RelationDesc;
999 :
1000 : /*
1001 : * This pins the partition's TupleDesc, which will be released at the
1002 : * end of the command.
1003 : */
1004 1200 : partRelInfo->ri_PartitionTupleSlot =
1005 1200 : table_slot_create(partrel, &estate->es_tupleTable);
1006 : }
1007 : else
1008 6786 : partRelInfo->ri_PartitionTupleSlot = NULL;
1009 :
1010 : /*
1011 : * If the partition is a foreign table, let the FDW init itself for
1012 : * routing tuples to the partition.
1013 : */
1014 7986 : if (partRelInfo->ri_FdwRoutine != NULL &&
1015 84 : partRelInfo->ri_FdwRoutine->BeginForeignInsert != NULL)
1016 84 : partRelInfo->ri_FdwRoutine->BeginForeignInsert(mtstate, partRelInfo);
1017 :
1018 : /*
1019 : * Determine if the FDW supports batch insert and determine the batch size
1020 : * (a FDW may support batching, but it may be disabled for the
1021 : * server/table or for this particular query).
1022 : *
1023 : * If the FDW does not support batching, we set the batch size to 1.
1024 : */
1025 7974 : if (partRelInfo->ri_FdwRoutine != NULL &&
1026 72 : partRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize &&
1027 72 : partRelInfo->ri_FdwRoutine->ExecForeignBatchInsert)
1028 72 : partRelInfo->ri_BatchSize =
1029 72 : partRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize(partRelInfo);
1030 : else
1031 7902 : partRelInfo->ri_BatchSize = 1;
1032 :
1033 : Assert(partRelInfo->ri_BatchSize >= 1);
1034 :
1035 7974 : partRelInfo->ri_CopyMultiInsertBuffer = NULL;
1036 :
1037 : /*
1038 : * Keep track of it in the PartitionTupleRouting->partitions array.
1039 : */
1040 : Assert(dispatch->indexes[partidx] == -1);
1041 :
1042 7974 : rri_index = proute->num_partitions++;
1043 :
1044 : /* Allocate or enlarge the array, as needed */
1045 7974 : if (proute->num_partitions >= proute->max_partitions)
1046 : {
1047 6080 : if (proute->max_partitions == 0)
1048 : {
1049 6074 : proute->max_partitions = 8;
1050 6074 : proute->partitions = (ResultRelInfo **)
1051 6074 : palloc(sizeof(ResultRelInfo *) * proute->max_partitions);
1052 6074 : proute->is_borrowed_rel = (bool *)
1053 6074 : palloc(sizeof(bool) * proute->max_partitions);
1054 : }
1055 : else
1056 : {
1057 6 : proute->max_partitions *= 2;
1058 6 : proute->partitions = (ResultRelInfo **)
1059 6 : repalloc(proute->partitions, sizeof(ResultRelInfo *) *
1060 6 : proute->max_partitions);
1061 6 : proute->is_borrowed_rel = (bool *)
1062 6 : repalloc(proute->is_borrowed_rel, sizeof(bool) *
1063 6 : proute->max_partitions);
1064 : }
1065 : }
1066 :
1067 7974 : proute->partitions[rri_index] = partRelInfo;
1068 7974 : proute->is_borrowed_rel[rri_index] = is_borrowed_rel;
1069 7974 : dispatch->indexes[partidx] = rri_index;
1070 :
1071 7974 : MemoryContextSwitchTo(oldcxt);
1072 7974 : }
1073 :
1074 : /*
1075 : * ExecInitPartitionDispatchInfo
1076 : * Lock the partitioned table (if not locked already) and initialize
1077 : * PartitionDispatch for a partitioned table and store it in the next
1078 : * available slot in the proute->partition_dispatch_info array. Also,
1079 : * record the index into this array in the parent_pd->indexes[] array in
1080 : * the partidx element so that we can properly retrieve the newly created
1081 : * PartitionDispatch later.
1082 : */
1083 : static PartitionDispatch
1084 7476 : ExecInitPartitionDispatchInfo(EState *estate,
1085 : PartitionTupleRouting *proute, Oid partoid,
1086 : PartitionDispatch parent_pd, int partidx,
1087 : ResultRelInfo *rootResultRelInfo)
1088 : {
1089 : Relation rel;
1090 : PartitionDesc partdesc;
1091 : PartitionDispatch pd;
1092 : int dispatchidx;
1093 : MemoryContext oldcxt;
1094 :
1095 : /*
1096 : * For data modification, it is better that executor does not include
1097 : * partitions being detached, except when running in snapshot-isolation
1098 : * mode. This means that a read-committed transaction immediately gets a
1099 : * "no partition for tuple" error when a tuple is inserted into a
1100 : * partition that's being detached concurrently, but a transaction in
1101 : * repeatable-read mode can still use such a partition.
1102 : */
1103 7476 : if (estate->es_partition_directory == NULL)
1104 6324 : estate->es_partition_directory =
1105 6324 : CreatePartitionDirectory(estate->es_query_cxt,
1106 : !IsolationUsesXactSnapshot());
1107 :
1108 7476 : oldcxt = MemoryContextSwitchTo(proute->memcxt);
1109 :
1110 : /*
1111 : * Only sub-partitioned tables need to be locked here. The root
1112 : * partitioned table will already have been locked as it's referenced in
1113 : * the query's rtable.
1114 : */
1115 7476 : if (partoid != RelationGetRelid(proute->partition_root))
1116 1140 : rel = table_open(partoid, RowExclusiveLock);
1117 : else
1118 6336 : rel = proute->partition_root;
1119 7476 : partdesc = PartitionDirectoryLookup(estate->es_partition_directory, rel);
1120 :
1121 7476 : pd = (PartitionDispatch) palloc(offsetof(PartitionDispatchData, indexes) +
1122 7476 : partdesc->nparts * sizeof(int));
1123 7476 : pd->reldesc = rel;
1124 7476 : pd->key = RelationGetPartitionKey(rel);
1125 7476 : pd->keystate = NIL;
1126 7476 : pd->partdesc = partdesc;
1127 7476 : if (parent_pd != NULL)
1128 : {
1129 1140 : TupleDesc tupdesc = RelationGetDescr(rel);
1130 :
1131 : /*
1132 : * For sub-partitioned tables where the column order differs from its
1133 : * direct parent partitioned table, we must store a tuple table slot
1134 : * initialized with its tuple descriptor and a tuple conversion map to
1135 : * convert a tuple from its parent's rowtype to its own. This is to
1136 : * make sure that we are looking at the correct row using the correct
1137 : * tuple descriptor when computing its partition key for tuple
1138 : * routing.
1139 : */
1140 1140 : pd->tupmap = build_attrmap_by_name_if_req(RelationGetDescr(parent_pd->reldesc),
1141 : tupdesc,
1142 : false);
1143 1140 : pd->tupslot = pd->tupmap ?
1144 1140 : MakeSingleTupleTableSlot(tupdesc, &TTSOpsVirtual) : NULL;
1145 : }
1146 : else
1147 : {
1148 : /* Not required for the root partitioned table */
1149 6336 : pd->tupmap = NULL;
1150 6336 : pd->tupslot = NULL;
1151 : }
1152 :
1153 : /*
1154 : * Initialize with -1 to signify that the corresponding partition's
1155 : * ResultRelInfo or PartitionDispatch has not been created yet.
1156 : */
1157 7476 : memset(pd->indexes, -1, sizeof(int) * partdesc->nparts);
1158 :
1159 : /* Track in PartitionTupleRouting for later use */
1160 7476 : dispatchidx = proute->num_dispatch++;
1161 :
1162 : /* Allocate or enlarge the array, as needed */
1163 7476 : if (proute->num_dispatch >= proute->max_dispatch)
1164 : {
1165 6336 : if (proute->max_dispatch == 0)
1166 : {
1167 6336 : proute->max_dispatch = 4;
1168 6336 : proute->partition_dispatch_info = (PartitionDispatch *)
1169 6336 : palloc(sizeof(PartitionDispatch) * proute->max_dispatch);
1170 6336 : proute->nonleaf_partitions = (ResultRelInfo **)
1171 6336 : palloc(sizeof(ResultRelInfo *) * proute->max_dispatch);
1172 : }
1173 : else
1174 : {
1175 0 : proute->max_dispatch *= 2;
1176 0 : proute->partition_dispatch_info = (PartitionDispatch *)
1177 0 : repalloc(proute->partition_dispatch_info,
1178 0 : sizeof(PartitionDispatch) * proute->max_dispatch);
1179 0 : proute->nonleaf_partitions = (ResultRelInfo **)
1180 0 : repalloc(proute->nonleaf_partitions,
1181 0 : sizeof(ResultRelInfo *) * proute->max_dispatch);
1182 : }
1183 : }
1184 7476 : proute->partition_dispatch_info[dispatchidx] = pd;
1185 :
1186 : /*
1187 : * If setting up a PartitionDispatch for a sub-partitioned table, we may
1188 : * also need a minimally valid ResultRelInfo for checking the partition
1189 : * constraint later; set that up now.
1190 : */
1191 7476 : if (parent_pd)
1192 : {
1193 1140 : ResultRelInfo *rri = makeNode(ResultRelInfo);
1194 :
1195 1140 : InitResultRelInfo(rri, rel, 0, rootResultRelInfo, 0);
1196 1140 : proute->nonleaf_partitions[dispatchidx] = rri;
1197 : }
1198 : else
1199 6336 : proute->nonleaf_partitions[dispatchidx] = NULL;
1200 :
1201 : /*
1202 : * Finally, if setting up a PartitionDispatch for a sub-partitioned table,
1203 : * install a downlink in the parent to allow quick descent.
1204 : */
1205 7476 : if (parent_pd)
1206 : {
1207 : Assert(parent_pd->indexes[partidx] == -1);
1208 1140 : parent_pd->indexes[partidx] = dispatchidx;
1209 : }
1210 :
1211 7476 : MemoryContextSwitchTo(oldcxt);
1212 :
1213 7476 : return pd;
1214 : }
1215 :
1216 : /*
1217 : * ExecCleanupTupleRouting -- Clean up objects allocated for partition tuple
1218 : * routing.
1219 : *
1220 : * Close all the partitioned tables, leaf partitions, and their indices.
1221 : */
1222 : void
1223 5658 : ExecCleanupTupleRouting(ModifyTableState *mtstate,
1224 : PartitionTupleRouting *proute)
1225 : {
1226 : int i;
1227 :
1228 : /*
1229 : * Remember, proute->partition_dispatch_info[0] corresponds to the root
1230 : * partitioned table, which we must not try to close, because it is the
1231 : * main target table of the query that will be closed by callers such as
1232 : * ExecEndPlan() or DoCopy(). Also, tupslot is NULL for the root
1233 : * partitioned table.
1234 : */
1235 6586 : for (i = 1; i < proute->num_dispatch; i++)
1236 : {
1237 928 : PartitionDispatch pd = proute->partition_dispatch_info[i];
1238 :
1239 928 : table_close(pd->reldesc, NoLock);
1240 :
1241 928 : if (pd->tupslot)
1242 442 : ExecDropSingleTupleTableSlot(pd->tupslot);
1243 : }
1244 :
1245 13160 : for (i = 0; i < proute->num_partitions; i++)
1246 : {
1247 7502 : ResultRelInfo *resultRelInfo = proute->partitions[i];
1248 :
1249 : /* Allow any FDWs to shut down */
1250 7502 : if (resultRelInfo->ri_FdwRoutine != NULL &&
1251 68 : resultRelInfo->ri_FdwRoutine->EndForeignInsert != NULL)
1252 68 : resultRelInfo->ri_FdwRoutine->EndForeignInsert(mtstate->ps.state,
1253 : resultRelInfo);
1254 :
1255 : /*
1256 : * Close it if it's not one of the result relations borrowed from the
1257 : * owning ModifyTableState; those will be closed by ExecEndPlan().
1258 : */
1259 7502 : if (proute->is_borrowed_rel[i])
1260 338 : continue;
1261 :
1262 7164 : ExecCloseIndices(resultRelInfo);
1263 7164 : table_close(resultRelInfo->ri_RelationDesc, NoLock);
1264 : }
1265 5658 : }
1266 :
1267 : /* ----------------
1268 : * FormPartitionKeyDatum
1269 : * Construct values[] and isnull[] arrays for the partition key
1270 : * of a tuple.
1271 : *
1272 : * pd Partition dispatch object of the partitioned table
1273 : * slot Heap tuple from which to extract partition key
1274 : * estate executor state for evaluating any partition key
1275 : * expressions (must be non-NULL)
1276 : * values Array of partition key Datums (output area)
1277 : * isnull Array of is-null indicators (output area)
1278 : *
1279 : * the ecxt_scantuple slot of estate's per-tuple expr context must point to
1280 : * the heap tuple passed in.
1281 : * ----------------
1282 : */
1283 : static void
1284 1048008 : FormPartitionKeyDatum(PartitionDispatch pd,
1285 : TupleTableSlot *slot,
1286 : EState *estate,
1287 : Datum *values,
1288 : bool *isnull)
1289 : {
1290 : ListCell *partexpr_item;
1291 : int i;
1292 :
1293 1048008 : if (pd->key->partexprs != NIL && pd->keystate == NIL)
1294 : {
1295 : /* Check caller has set up context correctly */
1296 : Assert(estate != NULL &&
1297 : GetPerTupleExprContext(estate)->ecxt_scantuple == slot);
1298 :
1299 : /* First time through, set up expression evaluation state */
1300 522 : pd->keystate = ExecPrepareExprList(pd->key->partexprs, estate);
1301 : }
1302 :
1303 1048008 : partexpr_item = list_head(pd->keystate);
1304 2118516 : for (i = 0; i < pd->key->partnatts; i++)
1305 : {
1306 1070508 : AttrNumber keycol = pd->key->partattrs[i];
1307 : Datum datum;
1308 : bool isNull;
1309 :
1310 1070508 : if (keycol != 0)
1311 : {
1312 : /* Plain column; get the value directly from the heap tuple */
1313 1042890 : datum = slot_getattr(slot, keycol, &isNull);
1314 : }
1315 : else
1316 : {
1317 : /* Expression; need to evaluate it */
1318 27618 : if (partexpr_item == NULL)
1319 0 : elog(ERROR, "wrong number of partition key expressions");
1320 27618 : datum = ExecEvalExprSwitchContext((ExprState *) lfirst(partexpr_item),
1321 27618 : GetPerTupleExprContext(estate),
1322 : &isNull);
1323 27618 : partexpr_item = lnext(pd->keystate, partexpr_item);
1324 : }
1325 1070508 : values[i] = datum;
1326 1070508 : isnull[i] = isNull;
1327 : }
1328 :
1329 1048008 : if (partexpr_item != NULL)
1330 0 : elog(ERROR, "wrong number of partition key expressions");
1331 1048008 : }
1332 :
1333 : /*
1334 : * The number of times the same partition must be found in a row before we
1335 : * switch from a binary search for the given values to just checking if the
1336 : * values belong to the last found partition. This must be above 0.
1337 : */
1338 : #define PARTITION_CACHED_FIND_THRESHOLD 16
1339 :
1340 : /*
1341 : * get_partition_for_tuple
1342 : * Finds partition of relation which accepts the partition key specified
1343 : * in values and isnull.
1344 : *
1345 : * Calling this function can be quite expensive when LIST and RANGE
1346 : * partitioned tables have many partitions. This is due to the binary search
1347 : * that's done to find the correct partition. Many of the use cases for LIST
1348 : * and RANGE partitioned tables make it likely that the same partition is
1349 : * found in subsequent ExecFindPartition() calls. This is especially true for
1350 : * cases such as RANGE partitioned tables on a TIMESTAMP column where the
1351 : * partition key is the current time. When asked to find a partition for a
1352 : * RANGE or LIST partitioned table, we record the partition index and datum
1353 : * offset we've found for the given 'values' in the PartitionDesc (which is
1354 : * stored in relcache), and if we keep finding the same partition
1355 : * PARTITION_CACHED_FIND_THRESHOLD times in a row, then we'll enable caching
1356 : * logic and instead of performing a binary search to find the correct
1357 : * partition, we'll just double-check that 'values' still belong to the last
1358 : * found partition, and if so, we'll return that partition index, thus
1359 : * skipping the need for the binary search. If we fail to match the last
1360 : * partition when double checking, then we fall back on doing a binary search.
1361 : * In this case, unless we find 'values' belong to the DEFAULT partition,
1362 : * we'll reset the number of times we've hit the same partition so that we
1363 : * don't attempt to use the cache again until we've found that partition at
1364 : * least PARTITION_CACHED_FIND_THRESHOLD times in a row.
1365 : *
1366 : * For cases where the partition changes on each lookup, the amount of
1367 : * additional work required just amounts to recording the last found partition
1368 : * and bound offset then resetting the found counter. This is cheap and does
1369 : * not appear to cause any meaningful slowdowns for such cases.
1370 : *
1371 : * No caching of partitions is done when the last found partition is the
1372 : * DEFAULT or NULL partition. For the case of the DEFAULT partition, there
1373 : * is no bound offset storing the matching datum, so we cannot confirm the
1374 : * indexes match. For the NULL partition, this is just so cheap, there's no
1375 : * sense in caching.
1376 : *
1377 : * Return value is index of the partition (>= 0 and < partdesc->nparts) if one
1378 : * found or -1 if none found.
1379 : */
1380 : static int
1381 1047966 : get_partition_for_tuple(PartitionDispatch pd, Datum *values, bool *isnull)
1382 : {
1383 1047966 : int bound_offset = -1;
1384 1047966 : int part_index = -1;
1385 1047966 : PartitionKey key = pd->key;
1386 1047966 : PartitionDesc partdesc = pd->partdesc;
1387 1047966 : PartitionBoundInfo boundinfo = partdesc->boundinfo;
1388 :
1389 : /*
1390 : * In the switch statement below, when we perform a cached lookup for
1391 : * RANGE and LIST partitioned tables, if we find that the last found
1392 : * partition matches the 'values', we return the partition index right
1393 : * away. We do this instead of breaking out of the switch as we don't
1394 : * want to execute the code about the DEFAULT partition or do any updates
1395 : * for any of the cache-related fields. That would be a waste of effort
1396 : * as we already know it's not the DEFAULT partition and have no need to
1397 : * increment the number of times we found the same partition any higher
1398 : * than PARTITION_CACHED_FIND_THRESHOLD.
1399 : */
1400 :
1401 : /* Route as appropriate based on partitioning strategy. */
1402 1047966 : switch (key->strategy)
1403 : {
1404 212626 : case PARTITION_STRATEGY_HASH:
1405 : {
1406 : uint64 rowHash;
1407 :
1408 : /* hash partitioning is too cheap to bother caching */
1409 212626 : rowHash = compute_partition_hash_value(key->partnatts,
1410 : key->partsupfunc,
1411 : key->partcollation,
1412 : values, isnull);
1413 :
1414 : /*
1415 : * HASH partitions can't have a DEFAULT partition and we don't
1416 : * do any caching work for them, so just return the part index
1417 : */
1418 212626 : return boundinfo->indexes[rowHash % boundinfo->nindexes];
1419 : }
1420 :
1421 109984 : case PARTITION_STRATEGY_LIST:
1422 109984 : if (isnull[0])
1423 : {
1424 : /* this is far too cheap to bother doing any caching */
1425 120 : if (partition_bound_accepts_nulls(boundinfo))
1426 : {
1427 : /*
1428 : * When there is a NULL partition we just return that
1429 : * directly. We don't have a bound_offset so it's not
1430 : * valid to drop into the code after the switch which
1431 : * checks and updates the cache fields. We perhaps should
1432 : * be invalidating the details of the last cached
1433 : * partition but there's no real need to. Keeping those
1434 : * fields set gives a chance at matching to the cached
1435 : * partition on the next lookup.
1436 : */
1437 90 : return boundinfo->null_index;
1438 : }
1439 : }
1440 : else
1441 : {
1442 : bool equal;
1443 :
1444 109864 : if (partdesc->last_found_count >= PARTITION_CACHED_FIND_THRESHOLD)
1445 : {
1446 23892 : int last_datum_offset = partdesc->last_found_datum_index;
1447 23892 : Datum lastDatum = boundinfo->datums[last_datum_offset][0];
1448 : int32 cmpval;
1449 :
1450 : /* does the last found datum index match this datum? */
1451 23892 : cmpval = DatumGetInt32(FunctionCall2Coll(&key->partsupfunc[0],
1452 23892 : key->partcollation[0],
1453 : lastDatum,
1454 : values[0]));
1455 :
1456 23892 : if (cmpval == 0)
1457 23538 : return boundinfo->indexes[last_datum_offset];
1458 :
1459 : /* fall-through and do a manual lookup */
1460 : }
1461 :
1462 86326 : bound_offset = partition_list_bsearch(key->partsupfunc,
1463 : key->partcollation,
1464 : boundinfo,
1465 : values[0], &equal);
1466 86326 : if (bound_offset >= 0 && equal)
1467 85988 : part_index = boundinfo->indexes[bound_offset];
1468 : }
1469 86356 : break;
1470 :
1471 725356 : case PARTITION_STRATEGY_RANGE:
1472 : {
1473 725356 : bool equal = false,
1474 725356 : range_partkey_has_null = false;
1475 : int i;
1476 :
1477 : /*
1478 : * No range includes NULL, so this will be accepted by the
1479 : * default partition if there is one, and otherwise rejected.
1480 : */
1481 1473080 : for (i = 0; i < key->partnatts; i++)
1482 : {
1483 747778 : if (isnull[i])
1484 : {
1485 54 : range_partkey_has_null = true;
1486 54 : break;
1487 : }
1488 : }
1489 :
1490 : /* NULLs belong in the DEFAULT partition */
1491 725356 : if (range_partkey_has_null)
1492 54 : break;
1493 :
1494 725302 : if (partdesc->last_found_count >= PARTITION_CACHED_FIND_THRESHOLD)
1495 : {
1496 238962 : int last_datum_offset = partdesc->last_found_datum_index;
1497 238962 : Datum *lastDatums = boundinfo->datums[last_datum_offset];
1498 238962 : PartitionRangeDatumKind *kind = boundinfo->kind[last_datum_offset];
1499 : int32 cmpval;
1500 :
1501 : /* check if the value is >= to the lower bound */
1502 238962 : cmpval = partition_rbound_datum_cmp(key->partsupfunc,
1503 : key->partcollation,
1504 : lastDatums,
1505 : kind,
1506 : values,
1507 238962 : key->partnatts);
1508 :
1509 : /*
1510 : * If it's equal to the lower bound then no need to check
1511 : * the upper bound.
1512 : */
1513 238962 : if (cmpval == 0)
1514 238772 : return boundinfo->indexes[last_datum_offset + 1];
1515 :
1516 233064 : if (cmpval < 0 && last_datum_offset + 1 < boundinfo->ndatums)
1517 : {
1518 : /* check if the value is below the upper bound */
1519 233064 : lastDatums = boundinfo->datums[last_datum_offset + 1];
1520 233064 : kind = boundinfo->kind[last_datum_offset + 1];
1521 233064 : cmpval = partition_rbound_datum_cmp(key->partsupfunc,
1522 : key->partcollation,
1523 : lastDatums,
1524 : kind,
1525 : values,
1526 233064 : key->partnatts);
1527 :
1528 233064 : if (cmpval > 0)
1529 232874 : return boundinfo->indexes[last_datum_offset + 1];
1530 : }
1531 : /* fall-through and do a manual lookup */
1532 : }
1533 :
1534 486530 : bound_offset = partition_range_datum_bsearch(key->partsupfunc,
1535 : key->partcollation,
1536 : boundinfo,
1537 486530 : key->partnatts,
1538 : values,
1539 : &equal);
1540 :
1541 : /*
1542 : * The bound at bound_offset is less than or equal to the
1543 : * tuple value, so the bound at offset+1 is the upper bound of
1544 : * the partition we're looking for, if there actually exists
1545 : * one.
1546 : */
1547 486530 : part_index = boundinfo->indexes[bound_offset + 1];
1548 : }
1549 486530 : break;
1550 :
1551 0 : default:
1552 0 : elog(ERROR, "unexpected partition strategy: %d",
1553 : (int) key->strategy);
1554 : }
1555 :
1556 : /*
1557 : * part_index < 0 means we failed to find a partition of this parent. Use
1558 : * the default partition, if there is one.
1559 : */
1560 572940 : if (part_index < 0)
1561 : {
1562 : /*
1563 : * No need to reset the cache fields here. The next set of values
1564 : * might end up belonging to the cached partition, so leaving the
1565 : * cache alone improves the chances of a cache hit on the next lookup.
1566 : */
1567 628 : return boundinfo->default_index;
1568 : }
1569 :
1570 : /* we should only make it here when the code above set bound_offset */
1571 : Assert(bound_offset >= 0);
1572 :
1573 : /*
1574 : * Attend to the cache fields. If the bound_offset matches the last
1575 : * cached bound offset then we've found the same partition as last time,
1576 : * so bump the count by one. If all goes well, we'll eventually reach
1577 : * PARTITION_CACHED_FIND_THRESHOLD and try the cache path next time
1578 : * around. Otherwise, we'll reset the cache count back to 1 to mark that
1579 : * we've found this partition for the first time.
1580 : */
1581 572312 : if (bound_offset == partdesc->last_found_datum_index)
1582 436348 : partdesc->last_found_count++;
1583 : else
1584 : {
1585 135964 : partdesc->last_found_count = 1;
1586 135964 : partdesc->last_found_part_index = part_index;
1587 135964 : partdesc->last_found_datum_index = bound_offset;
1588 : }
1589 :
1590 572312 : return part_index;
1591 : }
1592 :
1593 : /*
1594 : * ExecBuildSlotPartitionKeyDescription
1595 : *
1596 : * This works very much like BuildIndexValueDescription() and is currently
1597 : * used for building error messages when ExecFindPartition() fails to find
1598 : * partition for a row.
1599 : */
1600 : static char *
1601 148 : ExecBuildSlotPartitionKeyDescription(Relation rel,
1602 : Datum *values,
1603 : bool *isnull,
1604 : int maxfieldlen)
1605 : {
1606 : StringInfoData buf;
1607 148 : PartitionKey key = RelationGetPartitionKey(rel);
1608 148 : int partnatts = get_partition_natts(key);
1609 : int i;
1610 148 : Oid relid = RelationGetRelid(rel);
1611 : AclResult aclresult;
1612 :
1613 148 : if (check_enable_rls(relid, InvalidOid, true) == RLS_ENABLED)
1614 0 : return NULL;
1615 :
1616 : /* If the user has table-level access, just go build the description. */
1617 148 : aclresult = pg_class_aclcheck(relid, GetUserId(), ACL_SELECT);
1618 148 : if (aclresult != ACLCHECK_OK)
1619 : {
1620 : /*
1621 : * Step through the columns of the partition key and make sure the
1622 : * user has SELECT rights on all of them.
1623 : */
1624 24 : for (i = 0; i < partnatts; i++)
1625 : {
1626 18 : AttrNumber attnum = get_partition_col_attnum(key, i);
1627 :
1628 : /*
1629 : * If this partition key column is an expression, we return no
1630 : * detail rather than try to figure out what column(s) the
1631 : * expression includes and if the user has SELECT rights on them.
1632 : */
1633 30 : if (attnum == InvalidAttrNumber ||
1634 12 : pg_attribute_aclcheck(relid, attnum, GetUserId(),
1635 : ACL_SELECT) != ACLCHECK_OK)
1636 12 : return NULL;
1637 : }
1638 : }
1639 :
1640 136 : initStringInfo(&buf);
1641 136 : appendStringInfo(&buf, "(%s) = (",
1642 : pg_get_partkeydef_columns(relid, true));
1643 :
1644 326 : for (i = 0; i < partnatts; i++)
1645 : {
1646 : char *val;
1647 : int vallen;
1648 :
1649 190 : if (isnull[i])
1650 30 : val = "null";
1651 : else
1652 : {
1653 : Oid foutoid;
1654 : bool typisvarlena;
1655 :
1656 160 : getTypeOutputInfo(get_partition_col_typid(key, i),
1657 : &foutoid, &typisvarlena);
1658 160 : val = OidOutputFunctionCall(foutoid, values[i]);
1659 : }
1660 :
1661 190 : if (i > 0)
1662 54 : appendStringInfoString(&buf, ", ");
1663 :
1664 : /* truncate if needed */
1665 190 : vallen = strlen(val);
1666 190 : if (vallen <= maxfieldlen)
1667 190 : appendBinaryStringInfo(&buf, val, vallen);
1668 : else
1669 : {
1670 0 : vallen = pg_mbcliplen(val, vallen, maxfieldlen);
1671 0 : appendBinaryStringInfo(&buf, val, vallen);
1672 0 : appendStringInfoString(&buf, "...");
1673 : }
1674 : }
1675 :
1676 136 : appendStringInfoChar(&buf, ')');
1677 :
1678 136 : return buf.data;
1679 : }
1680 :
1681 : /*
1682 : * adjust_partition_colnos
1683 : * Adjust the list of UPDATE target column numbers to account for
1684 : * attribute differences between the parent and the partition.
1685 : *
1686 : * Note: mustn't be called if no adjustment is required.
1687 : */
1688 : static List *
1689 76 : adjust_partition_colnos(List *colnos, ResultRelInfo *leaf_part_rri)
1690 : {
1691 76 : TupleConversionMap *map = ExecGetChildToRootMap(leaf_part_rri);
1692 :
1693 : Assert(map != NULL);
1694 :
1695 76 : return adjust_partition_colnos_using_map(colnos, map->attrMap);
1696 : }
1697 :
1698 : /*
1699 : * adjust_partition_colnos_using_map
1700 : * Like adjust_partition_colnos, but uses a caller-supplied map instead
1701 : * of assuming to map from the "root" result relation.
1702 : *
1703 : * Note: mustn't be called if no adjustment is required.
1704 : */
1705 : static List *
1706 120 : adjust_partition_colnos_using_map(List *colnos, AttrMap *attrMap)
1707 : {
1708 120 : List *new_colnos = NIL;
1709 : ListCell *lc;
1710 :
1711 : Assert(attrMap != NULL); /* else we shouldn't be here */
1712 :
1713 304 : foreach(lc, colnos)
1714 : {
1715 184 : AttrNumber parentattrno = lfirst_int(lc);
1716 :
1717 184 : if (parentattrno <= 0 ||
1718 184 : parentattrno > attrMap->maplen ||
1719 184 : attrMap->attnums[parentattrno - 1] == 0)
1720 0 : elog(ERROR, "unexpected attno %d in target column list",
1721 : parentattrno);
1722 184 : new_colnos = lappend_int(new_colnos,
1723 184 : attrMap->attnums[parentattrno - 1]);
1724 : }
1725 :
1726 120 : return new_colnos;
1727 : }
1728 :
1729 : /*-------------------------------------------------------------------------
1730 : * Run-Time Partition Pruning Support.
1731 : *
1732 : * The following series of functions exist to support the removal of unneeded
1733 : * subplans for queries against partitioned tables. The supporting functions
1734 : * here are designed to work with any plan type which supports an arbitrary
1735 : * number of subplans, e.g. Append, MergeAppend.
1736 : *
1737 : * When pruning involves comparison of a partition key to a constant, it's
1738 : * done by the planner. However, if we have a comparison to a non-constant
1739 : * but not volatile expression, that presents an opportunity for run-time
1740 : * pruning by the executor, allowing irrelevant partitions to be skipped
1741 : * dynamically.
1742 : *
1743 : * We must distinguish expressions containing PARAM_EXEC Params from
1744 : * expressions that don't contain those. Even though a PARAM_EXEC Param is
1745 : * considered to be a stable expression, it can change value from one plan
1746 : * node scan to the next during query execution. Stable comparison
1747 : * expressions that don't involve such Params allow partition pruning to be
1748 : * done once during executor startup. Expressions that do involve such Params
1749 : * require us to prune separately for each scan of the parent plan node.
1750 : *
1751 : * Note that pruning away unneeded subplans during executor startup has the
1752 : * added benefit of not having to initialize the unneeded subplans at all.
1753 : *
1754 : *
1755 : * Functions:
1756 : *
1757 : * ExecInitPartitionPruning:
1758 : * Creates the PartitionPruneState required by ExecFindMatchingSubPlans.
1759 : * Details stored include how to map the partition index returned by the
1760 : * partition pruning code into subplan indexes. Also determines the set
1761 : * of subplans to initialize considering the result of performing initial
1762 : * pruning steps if any. Maps in PartitionPruneState are updated to
1763 : * account for initial pruning possibly having eliminated some of the
1764 : * subplans.
1765 : *
1766 : * ExecFindMatchingSubPlans:
1767 : * Returns indexes of matching subplans after evaluating the expressions
1768 : * that are safe to evaluate at a given point. This function is first
1769 : * called during ExecInitPartitionPruning() to find the initially
1770 : * matching subplans based on performing the initial pruning steps and
1771 : * then must be called again each time the value of a Param listed in
1772 : * PartitionPruneState's 'execparamids' changes.
1773 : *-------------------------------------------------------------------------
1774 : */
1775 :
1776 : /*
1777 : * ExecInitPartitionPruning
1778 : * Initialize data structure needed for run-time partition pruning and
1779 : * do initial pruning if needed
1780 : *
1781 : * On return, *initially_valid_subplans is assigned the set of indexes of
1782 : * child subplans that must be initialized along with the parent plan node.
1783 : * Initial pruning is performed here if needed and in that case only the
1784 : * surviving subplans' indexes are added.
1785 : *
1786 : * If subplans are indeed pruned, subplan_map arrays contained in the returned
1787 : * PartitionPruneState are re-sequenced to not count those, though only if the
1788 : * maps will be needed for subsequent execution pruning passes.
1789 : */
1790 : PartitionPruneState *
1791 606 : ExecInitPartitionPruning(PlanState *planstate,
1792 : int n_total_subplans,
1793 : PartitionPruneInfo *pruneinfo,
1794 : Bitmapset **initially_valid_subplans)
1795 : {
1796 : PartitionPruneState *prunestate;
1797 606 : EState *estate = planstate->state;
1798 :
1799 : /* We may need an expression context to evaluate partition exprs */
1800 606 : ExecAssignExprContext(estate, planstate);
1801 :
1802 : /* Create the working data structure for pruning */
1803 606 : prunestate = CreatePartitionPruneState(planstate, pruneinfo);
1804 :
1805 : /*
1806 : * Perform an initial partition prune pass, if required.
1807 : */
1808 606 : if (prunestate->do_initial_prune)
1809 260 : *initially_valid_subplans = ExecFindMatchingSubPlans(prunestate, true);
1810 : else
1811 : {
1812 : /* No pruning, so we'll need to initialize all subplans */
1813 : Assert(n_total_subplans > 0);
1814 346 : *initially_valid_subplans = bms_add_range(NULL, 0,
1815 : n_total_subplans - 1);
1816 : }
1817 :
1818 : /*
1819 : * Re-sequence subplan indexes contained in prunestate to account for any
1820 : * that were removed above due to initial pruning. No need to do this if
1821 : * no steps were removed.
1822 : */
1823 606 : if (bms_num_members(*initially_valid_subplans) < n_total_subplans)
1824 : {
1825 : /*
1826 : * We can safely skip this when !do_exec_prune, even though that
1827 : * leaves invalid data in prunestate, because that data won't be
1828 : * consulted again (cf initial Assert in ExecFindMatchingSubPlans).
1829 : */
1830 260 : if (prunestate->do_exec_prune)
1831 48 : PartitionPruneFixSubPlanMap(prunestate,
1832 : *initially_valid_subplans,
1833 : n_total_subplans);
1834 : }
1835 :
1836 606 : return prunestate;
1837 : }
1838 :
1839 : /*
1840 : * CreatePartitionPruneState
1841 : * Build the data structure required for calling ExecFindMatchingSubPlans
1842 : *
1843 : * 'planstate' is the parent plan node's execution state.
1844 : *
1845 : * 'pruneinfo' is a PartitionPruneInfo as generated by
1846 : * make_partition_pruneinfo. Here we build a PartitionPruneState containing a
1847 : * PartitionPruningData for each partitioning hierarchy (i.e., each sublist of
1848 : * pruneinfo->prune_infos), each of which contains a PartitionedRelPruningData
1849 : * for each PartitionedRelPruneInfo appearing in that sublist. This two-level
1850 : * system is needed to keep from confusing the different hierarchies when a
1851 : * UNION ALL contains multiple partitioned tables as children. The data
1852 : * stored in each PartitionedRelPruningData can be re-used each time we
1853 : * re-evaluate which partitions match the pruning steps provided in each
1854 : * PartitionedRelPruneInfo.
1855 : */
1856 : static PartitionPruneState *
1857 606 : CreatePartitionPruneState(PlanState *planstate, PartitionPruneInfo *pruneinfo)
1858 : {
1859 606 : EState *estate = planstate->state;
1860 : PartitionPruneState *prunestate;
1861 : int n_part_hierarchies;
1862 : ListCell *lc;
1863 : int i;
1864 606 : ExprContext *econtext = planstate->ps_ExprContext;
1865 :
1866 : /* For data reading, executor always omits detached partitions */
1867 606 : if (estate->es_partition_directory == NULL)
1868 588 : estate->es_partition_directory =
1869 588 : CreatePartitionDirectory(estate->es_query_cxt, false);
1870 :
1871 606 : n_part_hierarchies = list_length(pruneinfo->prune_infos);
1872 : Assert(n_part_hierarchies > 0);
1873 :
1874 : /*
1875 : * Allocate the data structure
1876 : */
1877 : prunestate = (PartitionPruneState *)
1878 606 : palloc(offsetof(PartitionPruneState, partprunedata) +
1879 : sizeof(PartitionPruningData *) * n_part_hierarchies);
1880 :
1881 606 : prunestate->execparamids = NULL;
1882 : /* other_subplans can change at runtime, so we need our own copy */
1883 606 : prunestate->other_subplans = bms_copy(pruneinfo->other_subplans);
1884 606 : prunestate->do_initial_prune = false; /* may be set below */
1885 606 : prunestate->do_exec_prune = false; /* may be set below */
1886 606 : prunestate->num_partprunedata = n_part_hierarchies;
1887 :
1888 : /*
1889 : * Create a short-term memory context which we'll use when making calls to
1890 : * the partition pruning functions. This avoids possible memory leaks,
1891 : * since the pruning functions call comparison functions that aren't under
1892 : * our control.
1893 : */
1894 606 : prunestate->prune_context =
1895 606 : AllocSetContextCreate(CurrentMemoryContext,
1896 : "Partition Prune",
1897 : ALLOCSET_DEFAULT_SIZES);
1898 :
1899 606 : i = 0;
1900 1236 : foreach(lc, pruneinfo->prune_infos)
1901 : {
1902 630 : List *partrelpruneinfos = lfirst_node(List, lc);
1903 630 : int npartrelpruneinfos = list_length(partrelpruneinfos);
1904 : PartitionPruningData *prunedata;
1905 : ListCell *lc2;
1906 : int j;
1907 :
1908 : prunedata = (PartitionPruningData *)
1909 630 : palloc(offsetof(PartitionPruningData, partrelprunedata) +
1910 630 : npartrelpruneinfos * sizeof(PartitionedRelPruningData));
1911 630 : prunestate->partprunedata[i] = prunedata;
1912 630 : prunedata->num_partrelprunedata = npartrelpruneinfos;
1913 :
1914 630 : j = 0;
1915 2070 : foreach(lc2, partrelpruneinfos)
1916 : {
1917 1440 : PartitionedRelPruneInfo *pinfo = lfirst_node(PartitionedRelPruneInfo, lc2);
1918 1440 : PartitionedRelPruningData *pprune = &prunedata->partrelprunedata[j];
1919 : Relation partrel;
1920 : PartitionDesc partdesc;
1921 : PartitionKey partkey;
1922 :
1923 : /*
1924 : * We can rely on the copies of the partitioned table's partition
1925 : * key and partition descriptor appearing in its relcache entry,
1926 : * because that entry will be held open and locked for the
1927 : * duration of this executor run.
1928 : */
1929 1440 : partrel = ExecGetRangeTableRelation(estate, pinfo->rtindex);
1930 1440 : partkey = RelationGetPartitionKey(partrel);
1931 1440 : partdesc = PartitionDirectoryLookup(estate->es_partition_directory,
1932 : partrel);
1933 :
1934 : /*
1935 : * Initialize the subplan_map and subpart_map.
1936 : *
1937 : * Because we request detached partitions to be included, and
1938 : * detaching waits for old transactions, it is safe to assume that
1939 : * no partitions have disappeared since this query was planned.
1940 : *
1941 : * However, new partitions may have been added.
1942 : */
1943 : Assert(partdesc->nparts >= pinfo->nparts);
1944 1440 : pprune->nparts = partdesc->nparts;
1945 1440 : pprune->subplan_map = palloc(sizeof(int) * partdesc->nparts);
1946 1440 : if (partdesc->nparts == pinfo->nparts)
1947 : {
1948 : /*
1949 : * There are no new partitions, so this is simple. We can
1950 : * simply point to the subpart_map from the plan, but we must
1951 : * copy the subplan_map since we may change it later.
1952 : */
1953 1438 : pprune->subpart_map = pinfo->subpart_map;
1954 1438 : memcpy(pprune->subplan_map, pinfo->subplan_map,
1955 1438 : sizeof(int) * pinfo->nparts);
1956 :
1957 : /*
1958 : * Double-check that the list of unpruned relations has not
1959 : * changed. (Pruned partitions are not in relid_map[].)
1960 : */
1961 : #ifdef USE_ASSERT_CHECKING
1962 : for (int k = 0; k < pinfo->nparts; k++)
1963 : {
1964 : Assert(partdesc->oids[k] == pinfo->relid_map[k] ||
1965 : pinfo->subplan_map[k] == -1);
1966 : }
1967 : #endif
1968 : }
1969 : else
1970 : {
1971 2 : int pd_idx = 0;
1972 : int pp_idx;
1973 :
1974 : /*
1975 : * Some new partitions have appeared since plan time, and
1976 : * those are reflected in our PartitionDesc but were not
1977 : * present in the one used to construct subplan_map and
1978 : * subpart_map. So we must construct new and longer arrays
1979 : * where the partitions that were originally present map to
1980 : * the same sub-structures, and any added partitions map to
1981 : * -1, as if the new partitions had been pruned.
1982 : *
1983 : * Note: pinfo->relid_map[] may contain InvalidOid entries for
1984 : * partitions pruned by the planner. We cannot tell exactly
1985 : * which of the partdesc entries these correspond to, but we
1986 : * don't have to; just skip over them. The non-pruned
1987 : * relid_map entries, however, had better be a subset of the
1988 : * partdesc entries and in the same order.
1989 : */
1990 2 : pprune->subpart_map = palloc(sizeof(int) * partdesc->nparts);
1991 10 : for (pp_idx = 0; pp_idx < partdesc->nparts; pp_idx++)
1992 : {
1993 : /* Skip any InvalidOid relid_map entries */
1994 10 : while (pd_idx < pinfo->nparts &&
1995 10 : !OidIsValid(pinfo->relid_map[pd_idx]))
1996 2 : pd_idx++;
1997 :
1998 8 : if (pd_idx < pinfo->nparts &&
1999 8 : pinfo->relid_map[pd_idx] == partdesc->oids[pp_idx])
2000 : {
2001 : /* match... */
2002 4 : pprune->subplan_map[pp_idx] =
2003 4 : pinfo->subplan_map[pd_idx];
2004 4 : pprune->subpart_map[pp_idx] =
2005 4 : pinfo->subpart_map[pd_idx];
2006 4 : pd_idx++;
2007 : }
2008 : else
2009 : {
2010 : /* this partdesc entry is not in the plan */
2011 4 : pprune->subplan_map[pp_idx] = -1;
2012 4 : pprune->subpart_map[pp_idx] = -1;
2013 : }
2014 : }
2015 :
2016 : /*
2017 : * It might seem that we need to skip any trailing InvalidOid
2018 : * entries in pinfo->relid_map before checking that we scanned
2019 : * all of the relid_map. But we will have skipped them above,
2020 : * because they must correspond to some partdesc->oids
2021 : * entries; we just couldn't tell which.
2022 : */
2023 2 : if (pd_idx != pinfo->nparts)
2024 0 : elog(ERROR, "could not match partition child tables to plan elements");
2025 : }
2026 :
2027 : /* present_parts is also subject to later modification */
2028 1440 : pprune->present_parts = bms_copy(pinfo->present_parts);
2029 :
2030 : /*
2031 : * Initialize pruning contexts as needed. Note that we must skip
2032 : * execution-time partition pruning in EXPLAIN (GENERIC_PLAN),
2033 : * since parameter values may be missing.
2034 : */
2035 1440 : pprune->initial_pruning_steps = pinfo->initial_pruning_steps;
2036 1440 : if (pinfo->initial_pruning_steps &&
2037 368 : !(econtext->ecxt_estate->es_top_eflags & EXEC_FLAG_EXPLAIN_GENERIC))
2038 : {
2039 362 : InitPartitionPruneContext(&pprune->initial_context,
2040 : pinfo->initial_pruning_steps,
2041 : partdesc, partkey, planstate,
2042 : econtext);
2043 : /* Record whether initial pruning is needed at any level */
2044 362 : prunestate->do_initial_prune = true;
2045 : }
2046 1440 : pprune->exec_pruning_steps = pinfo->exec_pruning_steps;
2047 1440 : if (pinfo->exec_pruning_steps &&
2048 502 : !(econtext->ecxt_estate->es_top_eflags & EXEC_FLAG_EXPLAIN_GENERIC))
2049 : {
2050 502 : InitPartitionPruneContext(&pprune->exec_context,
2051 : pinfo->exec_pruning_steps,
2052 : partdesc, partkey, planstate,
2053 : econtext);
2054 : /* Record whether exec pruning is needed at any level */
2055 502 : prunestate->do_exec_prune = true;
2056 : }
2057 :
2058 : /*
2059 : * Accumulate the IDs of all PARAM_EXEC Params affecting the
2060 : * partitioning decisions at this plan node.
2061 : */
2062 2880 : prunestate->execparamids = bms_add_members(prunestate->execparamids,
2063 1440 : pinfo->execparamids);
2064 :
2065 1440 : j++;
2066 : }
2067 630 : i++;
2068 : }
2069 :
2070 606 : return prunestate;
2071 : }
2072 :
2073 : /*
2074 : * Initialize a PartitionPruneContext for the given list of pruning steps.
2075 : */
2076 : static void
2077 864 : InitPartitionPruneContext(PartitionPruneContext *context,
2078 : List *pruning_steps,
2079 : PartitionDesc partdesc,
2080 : PartitionKey partkey,
2081 : PlanState *planstate,
2082 : ExprContext *econtext)
2083 : {
2084 : int n_steps;
2085 : int partnatts;
2086 : ListCell *lc;
2087 :
2088 864 : n_steps = list_length(pruning_steps);
2089 :
2090 864 : context->strategy = partkey->strategy;
2091 864 : context->partnatts = partnatts = partkey->partnatts;
2092 864 : context->nparts = partdesc->nparts;
2093 864 : context->boundinfo = partdesc->boundinfo;
2094 864 : context->partcollation = partkey->partcollation;
2095 864 : context->partsupfunc = partkey->partsupfunc;
2096 :
2097 : /* We'll look up type-specific support functions as needed */
2098 864 : context->stepcmpfuncs = (FmgrInfo *)
2099 864 : palloc0(sizeof(FmgrInfo) * n_steps * partnatts);
2100 :
2101 864 : context->ppccontext = CurrentMemoryContext;
2102 864 : context->planstate = planstate;
2103 864 : context->exprcontext = econtext;
2104 :
2105 : /* Initialize expression state for each expression we need */
2106 864 : context->exprstates = (ExprState **)
2107 864 : palloc0(sizeof(ExprState *) * n_steps * partnatts);
2108 2390 : foreach(lc, pruning_steps)
2109 : {
2110 1526 : PartitionPruneStepOp *step = (PartitionPruneStepOp *) lfirst(lc);
2111 : ListCell *lc2;
2112 : int keyno;
2113 :
2114 : /* not needed for other step kinds */
2115 1526 : if (!IsA(step, PartitionPruneStepOp))
2116 286 : continue;
2117 :
2118 : Assert(list_length(step->exprs) <= partnatts);
2119 :
2120 1240 : keyno = 0;
2121 2528 : foreach(lc2, step->exprs)
2122 : {
2123 1288 : Expr *expr = (Expr *) lfirst(lc2);
2124 :
2125 : /* not needed for Consts */
2126 1288 : if (!IsA(expr, Const))
2127 : {
2128 1194 : int stateidx = PruneCxtStateIdx(partnatts,
2129 : step->step.step_id,
2130 : keyno);
2131 :
2132 : /*
2133 : * When planstate is NULL, pruning_steps is known not to
2134 : * contain any expressions that depend on the parent plan.
2135 : * Information of any available EXTERN parameters must be
2136 : * passed explicitly in that case, which the caller must have
2137 : * made available via econtext.
2138 : */
2139 1194 : if (planstate == NULL)
2140 0 : context->exprstates[stateidx] =
2141 0 : ExecInitExprWithParams(expr,
2142 : econtext->ecxt_param_list_info);
2143 : else
2144 1194 : context->exprstates[stateidx] =
2145 1194 : ExecInitExpr(expr, context->planstate);
2146 : }
2147 1288 : keyno++;
2148 : }
2149 : }
2150 864 : }
2151 :
2152 : /*
2153 : * PartitionPruneFixSubPlanMap
2154 : * Fix mapping of partition indexes to subplan indexes contained in
2155 : * prunestate by considering the new list of subplans that survived
2156 : * initial pruning
2157 : *
2158 : * Current values of the indexes present in PartitionPruneState count all the
2159 : * subplans that would be present before initial pruning was done. If initial
2160 : * pruning got rid of some of the subplans, any subsequent pruning passes will
2161 : * be looking at a different set of target subplans to choose from than those
2162 : * in the pre-initial-pruning set, so the maps in PartitionPruneState
2163 : * containing those indexes must be updated to reflect the new indexes of
2164 : * subplans in the post-initial-pruning set.
2165 : */
2166 : static void
2167 48 : PartitionPruneFixSubPlanMap(PartitionPruneState *prunestate,
2168 : Bitmapset *initially_valid_subplans,
2169 : int n_total_subplans)
2170 : {
2171 : int *new_subplan_indexes;
2172 : Bitmapset *new_other_subplans;
2173 : int i;
2174 : int newidx;
2175 :
2176 : /*
2177 : * First we must build a temporary array which maps old subplan indexes to
2178 : * new ones. For convenience of initialization, we use 1-based indexes in
2179 : * this array and leave pruned items as 0.
2180 : */
2181 48 : new_subplan_indexes = (int *) palloc0(sizeof(int) * n_total_subplans);
2182 48 : newidx = 1;
2183 48 : i = -1;
2184 186 : while ((i = bms_next_member(initially_valid_subplans, i)) >= 0)
2185 : {
2186 : Assert(i < n_total_subplans);
2187 138 : new_subplan_indexes[i] = newidx++;
2188 : }
2189 :
2190 : /*
2191 : * Now we can update each PartitionedRelPruneInfo's subplan_map with new
2192 : * subplan indexes. We must also recompute its present_parts bitmap.
2193 : */
2194 120 : for (i = 0; i < prunestate->num_partprunedata; i++)
2195 : {
2196 72 : PartitionPruningData *prunedata = prunestate->partprunedata[i];
2197 : int j;
2198 :
2199 : /*
2200 : * Within each hierarchy, we perform this loop in back-to-front order
2201 : * so that we determine present_parts for the lowest-level partitioned
2202 : * tables first. This way we can tell whether a sub-partitioned
2203 : * table's partitions were entirely pruned so we can exclude it from
2204 : * the current level's present_parts.
2205 : */
2206 264 : for (j = prunedata->num_partrelprunedata - 1; j >= 0; j--)
2207 : {
2208 192 : PartitionedRelPruningData *pprune = &prunedata->partrelprunedata[j];
2209 192 : int nparts = pprune->nparts;
2210 : int k;
2211 :
2212 : /* We just rebuild present_parts from scratch */
2213 192 : bms_free(pprune->present_parts);
2214 192 : pprune->present_parts = NULL;
2215 :
2216 708 : for (k = 0; k < nparts; k++)
2217 : {
2218 516 : int oldidx = pprune->subplan_map[k];
2219 : int subidx;
2220 :
2221 : /*
2222 : * If this partition existed as a subplan then change the old
2223 : * subplan index to the new subplan index. The new index may
2224 : * become -1 if the partition was pruned above, or it may just
2225 : * come earlier in the subplan list due to some subplans being
2226 : * removed earlier in the list. If it's a subpartition, add
2227 : * it to present_parts unless it's entirely pruned.
2228 : */
2229 516 : if (oldidx >= 0)
2230 : {
2231 : Assert(oldidx < n_total_subplans);
2232 396 : pprune->subplan_map[k] = new_subplan_indexes[oldidx] - 1;
2233 :
2234 396 : if (new_subplan_indexes[oldidx] > 0)
2235 114 : pprune->present_parts =
2236 114 : bms_add_member(pprune->present_parts, k);
2237 : }
2238 120 : else if ((subidx = pprune->subpart_map[k]) >= 0)
2239 : {
2240 : PartitionedRelPruningData *subprune;
2241 :
2242 120 : subprune = &prunedata->partrelprunedata[subidx];
2243 :
2244 120 : if (!bms_is_empty(subprune->present_parts))
2245 48 : pprune->present_parts =
2246 48 : bms_add_member(pprune->present_parts, k);
2247 : }
2248 : }
2249 : }
2250 : }
2251 :
2252 : /*
2253 : * We must also recompute the other_subplans set, since indexes in it may
2254 : * change.
2255 : */
2256 48 : new_other_subplans = NULL;
2257 48 : i = -1;
2258 72 : while ((i = bms_next_member(prunestate->other_subplans, i)) >= 0)
2259 24 : new_other_subplans = bms_add_member(new_other_subplans,
2260 24 : new_subplan_indexes[i] - 1);
2261 :
2262 48 : bms_free(prunestate->other_subplans);
2263 48 : prunestate->other_subplans = new_other_subplans;
2264 :
2265 48 : pfree(new_subplan_indexes);
2266 48 : }
2267 :
2268 : /*
2269 : * ExecFindMatchingSubPlans
2270 : * Determine which subplans match the pruning steps detailed in
2271 : * 'prunestate' for the current comparison expression values.
2272 : *
2273 : * Pass initial_prune if PARAM_EXEC Params cannot yet be evaluated. This
2274 : * differentiates the initial executor-time pruning step from later
2275 : * runtime pruning.
2276 : */
2277 : Bitmapset *
2278 3730 : ExecFindMatchingSubPlans(PartitionPruneState *prunestate,
2279 : bool initial_prune)
2280 : {
2281 3730 : Bitmapset *result = NULL;
2282 : MemoryContext oldcontext;
2283 : int i;
2284 :
2285 : /*
2286 : * Either we're here on the initial prune done during pruning
2287 : * initialization, or we're at a point where PARAM_EXEC Params can be
2288 : * evaluated *and* there are steps in which to do so.
2289 : */
2290 : Assert(initial_prune || prunestate->do_exec_prune);
2291 :
2292 : /*
2293 : * Switch to a temp context to avoid leaking memory in the executor's
2294 : * query-lifespan memory context.
2295 : */
2296 3730 : oldcontext = MemoryContextSwitchTo(prunestate->prune_context);
2297 :
2298 : /*
2299 : * For each hierarchy, do the pruning tests, and add nondeletable
2300 : * subplans' indexes to "result".
2301 : */
2302 7502 : for (i = 0; i < prunestate->num_partprunedata; i++)
2303 : {
2304 3772 : PartitionPruningData *prunedata = prunestate->partprunedata[i];
2305 : PartitionedRelPruningData *pprune;
2306 :
2307 : /*
2308 : * We pass the zeroth item, belonging to the root table of the
2309 : * hierarchy, and find_matching_subplans_recurse() takes care of
2310 : * recursing to other (lower-level) parents as needed.
2311 : */
2312 3772 : pprune = &prunedata->partrelprunedata[0];
2313 3772 : find_matching_subplans_recurse(prunedata, pprune, initial_prune,
2314 : &result);
2315 :
2316 : /* Expression eval may have used space in ExprContext too */
2317 3772 : if (pprune->exec_pruning_steps)
2318 3434 : ResetExprContext(pprune->exec_context.exprcontext);
2319 : }
2320 :
2321 : /* Add in any subplans that partition pruning didn't account for */
2322 3730 : result = bms_add_members(result, prunestate->other_subplans);
2323 :
2324 3730 : MemoryContextSwitchTo(oldcontext);
2325 :
2326 : /* Copy result out of the temp context before we reset it */
2327 3730 : result = bms_copy(result);
2328 :
2329 3730 : MemoryContextReset(prunestate->prune_context);
2330 :
2331 3730 : return result;
2332 : }
2333 :
2334 : /*
2335 : * find_matching_subplans_recurse
2336 : * Recursive worker function for ExecFindMatchingSubPlans
2337 : *
2338 : * Adds valid (non-prunable) subplan IDs to *validsubplans
2339 : */
2340 : static void
2341 4186 : find_matching_subplans_recurse(PartitionPruningData *prunedata,
2342 : PartitionedRelPruningData *pprune,
2343 : bool initial_prune,
2344 : Bitmapset **validsubplans)
2345 : {
2346 : Bitmapset *partset;
2347 : int i;
2348 :
2349 : /* Guard against stack overflow due to overly deep partition hierarchy. */
2350 4186 : check_stack_depth();
2351 :
2352 : /*
2353 : * Prune as appropriate, if we have pruning steps matching the current
2354 : * execution context. Otherwise just include all partitions at this
2355 : * level.
2356 : */
2357 4186 : if (initial_prune && pprune->initial_pruning_steps)
2358 344 : partset = get_matching_partitions(&pprune->initial_context,
2359 : pprune->initial_pruning_steps);
2360 3842 : else if (!initial_prune && pprune->exec_pruning_steps)
2361 3500 : partset = get_matching_partitions(&pprune->exec_context,
2362 : pprune->exec_pruning_steps);
2363 : else
2364 342 : partset = pprune->present_parts;
2365 :
2366 : /* Translate partset into subplan indexes */
2367 4186 : i = -1;
2368 5852 : while ((i = bms_next_member(partset, i)) >= 0)
2369 : {
2370 1666 : if (pprune->subplan_map[i] >= 0)
2371 1250 : *validsubplans = bms_add_member(*validsubplans,
2372 1250 : pprune->subplan_map[i]);
2373 : else
2374 : {
2375 416 : int partidx = pprune->subpart_map[i];
2376 :
2377 416 : if (partidx >= 0)
2378 414 : find_matching_subplans_recurse(prunedata,
2379 : &prunedata->partrelprunedata[partidx],
2380 : initial_prune, validsubplans);
2381 : else
2382 : {
2383 : /*
2384 : * We get here if the planner already pruned all the sub-
2385 : * partitions for this partition. Silently ignore this
2386 : * partition in this case. The end result is the same: we
2387 : * would have pruned all partitions just the same, but we
2388 : * don't have any pruning steps to execute to verify this.
2389 : */
2390 : }
2391 : }
2392 : }
2393 4186 : }
|