Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * execPartition.c
4 : * Support routines for partitioning.
5 : *
6 : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : * IDENTIFICATION
10 : * src/backend/executor/execPartition.c
11 : *
12 : *-------------------------------------------------------------------------
13 : */
14 : #include "postgres.h"
15 :
16 : #include "access/table.h"
17 : #include "access/tableam.h"
18 : #include "catalog/partition.h"
19 : #include "executor/execPartition.h"
20 : #include "executor/executor.h"
21 : #include "executor/nodeModifyTable.h"
22 : #include "foreign/fdwapi.h"
23 : #include "mb/pg_wchar.h"
24 : #include "miscadmin.h"
25 : #include "partitioning/partbounds.h"
26 : #include "partitioning/partdesc.h"
27 : #include "partitioning/partprune.h"
28 : #include "rewrite/rewriteManip.h"
29 : #include "utils/acl.h"
30 : #include "utils/lsyscache.h"
31 : #include "utils/partcache.h"
32 : #include "utils/rls.h"
33 : #include "utils/ruleutils.h"
34 :
35 :
36 : /*-----------------------
37 : * PartitionTupleRouting - Encapsulates all information required to
38 : * route a tuple inserted into a partitioned table to one of its leaf
39 : * partitions.
40 : *
41 : * partition_root
42 : * The partitioned table that's the target of the command.
43 : *
44 : * partition_dispatch_info
45 : * Array of 'max_dispatch' elements containing a pointer to a
46 : * PartitionDispatch object for every partitioned table touched by tuple
47 : * routing. The entry for the target partitioned table is *always*
48 : * present in the 0th element of this array. See comment for
49 : * PartitionDispatchData->indexes for details on how this array is
50 : * indexed.
51 : *
52 : * nonleaf_partitions
53 : * Array of 'max_dispatch' elements containing pointers to fake
54 : * ResultRelInfo objects for nonleaf partitions, useful for checking
55 : * the partition constraint.
56 : *
57 : * num_dispatch
58 : * The current number of items stored in the 'partition_dispatch_info'
59 : * array. Also serves as the index of the next free array element for
60 : * new PartitionDispatch objects that need to be stored.
61 : *
62 : * max_dispatch
63 : * The current allocated size of the 'partition_dispatch_info' array.
64 : *
65 : * partitions
66 : * Array of 'max_partitions' elements containing a pointer to a
67 : * ResultRelInfo for every leaf partition touched by tuple routing.
68 : * Some of these are pointers to ResultRelInfos which are borrowed out of
69 : * the owning ModifyTableState node. The remainder have been built
70 : * especially for tuple routing. See comment for
71 : * PartitionDispatchData->indexes for details on how this array is
72 : * indexed.
73 : *
74 : * is_borrowed_rel
75 : * Array of 'max_partitions' booleans recording whether a given entry
76 : * in 'partitions' is a ResultRelInfo pointer borrowed from the owning
77 : * ModifyTableState node, rather than being built here.
78 : *
79 : * num_partitions
80 : * The current number of items stored in the 'partitions' array. Also
81 : * serves as the index of the next free array element for new
82 : * ResultRelInfo objects that need to be stored.
83 : *
84 : * max_partitions
85 : * The current allocated size of the 'partitions' array.
86 : *
87 : * memcxt
88 : * Memory context used to allocate subsidiary structs.
89 : *-----------------------
90 : */
91 : struct PartitionTupleRouting
92 : {
93 : Relation partition_root;
94 : PartitionDispatch *partition_dispatch_info;
95 : ResultRelInfo **nonleaf_partitions;
96 : int num_dispatch;
97 : int max_dispatch;
98 : ResultRelInfo **partitions;
99 : bool *is_borrowed_rel;
100 : int num_partitions;
101 : int max_partitions;
102 : MemoryContext memcxt;
103 : };
104 :
105 : /*-----------------------
106 : * PartitionDispatch - information about one partitioned table in a partition
107 : * hierarchy required to route a tuple to any of its partitions. A
108 : * PartitionDispatch is always encapsulated inside a PartitionTupleRouting
109 : * struct and stored inside its 'partition_dispatch_info' array.
110 : *
111 : * reldesc
112 : * Relation descriptor of the table
113 : *
114 : * key
115 : * Partition key information of the table
116 : *
117 : * keystate
118 : * Execution state required for expressions in the partition key
119 : *
120 : * partdesc
121 : * Partition descriptor of the table
122 : *
123 : * tupslot
124 : * A standalone TupleTableSlot initialized with this table's tuple
125 : * descriptor, or NULL if no tuple conversion between the parent is
126 : * required.
127 : *
128 : * tupmap
129 : * TupleConversionMap to convert from the parent's rowtype to this table's
130 : * rowtype (when extracting the partition key of a tuple just before
131 : * routing it through this table). A NULL value is stored if no tuple
132 : * conversion is required.
133 : *
134 : * indexes
135 : * Array of partdesc->nparts elements. For leaf partitions the index
136 : * corresponds to the partition's ResultRelInfo in the encapsulating
137 : * PartitionTupleRouting's partitions array. For partitioned partitions,
138 : * the index corresponds to the PartitionDispatch for it in its
139 : * partition_dispatch_info array. -1 indicates we've not yet allocated
140 : * anything in PartitionTupleRouting for the partition.
141 : *-----------------------
142 : */
143 : typedef struct PartitionDispatchData
144 : {
145 : Relation reldesc;
146 : PartitionKey key;
147 : List *keystate; /* list of ExprState */
148 : PartitionDesc partdesc;
149 : TupleTableSlot *tupslot;
150 : AttrMap *tupmap;
151 : int indexes[FLEXIBLE_ARRAY_MEMBER];
152 : } PartitionDispatchData;
153 :
154 :
155 : static ResultRelInfo *ExecInitPartitionInfo(ModifyTableState *mtstate,
156 : EState *estate, PartitionTupleRouting *proute,
157 : PartitionDispatch dispatch,
158 : ResultRelInfo *rootResultRelInfo,
159 : int partidx);
160 : static void ExecInitRoutingInfo(ModifyTableState *mtstate,
161 : EState *estate,
162 : PartitionTupleRouting *proute,
163 : PartitionDispatch dispatch,
164 : ResultRelInfo *partRelInfo,
165 : int partidx,
166 : bool is_borrowed_rel);
167 : static PartitionDispatch ExecInitPartitionDispatchInfo(EState *estate,
168 : PartitionTupleRouting *proute,
169 : Oid partoid, PartitionDispatch parent_pd,
170 : int partidx, ResultRelInfo *rootResultRelInfo);
171 : static void FormPartitionKeyDatum(PartitionDispatch pd,
172 : TupleTableSlot *slot,
173 : EState *estate,
174 : Datum *values,
175 : bool *isnull);
176 : static int get_partition_for_tuple(PartitionDispatch pd, Datum *values,
177 : bool *isnull);
178 : static char *ExecBuildSlotPartitionKeyDescription(Relation rel,
179 : Datum *values,
180 : bool *isnull,
181 : int maxfieldlen);
182 : static List *adjust_partition_colnos(List *colnos, ResultRelInfo *leaf_part_rri);
183 : static List *adjust_partition_colnos_using_map(List *colnos, AttrMap *attrMap);
184 : static PartitionPruneState *CreatePartitionPruneState(EState *estate,
185 : PartitionPruneInfo *pruneinfo,
186 : Bitmapset **all_leafpart_rtis);
187 : static void InitPartitionPruneContext(PartitionPruneContext *context,
188 : List *pruning_steps,
189 : PartitionDesc partdesc,
190 : PartitionKey partkey,
191 : PlanState *planstate,
192 : ExprContext *econtext);
193 : static void InitExecPartitionPruneContexts(PartitionPruneState *prunestate,
194 : PlanState *parent_plan,
195 : Bitmapset *initially_valid_subplans,
196 : int n_total_subplans);
197 : static void find_matching_subplans_recurse(PartitionPruningData *prunedata,
198 : PartitionedRelPruningData *pprune,
199 : bool initial_prune,
200 : Bitmapset **validsubplans,
201 : Bitmapset **validsubplan_rtis);
202 :
203 :
204 : /*
205 : * ExecSetupPartitionTupleRouting - sets up information needed during
206 : * tuple routing for partitioned tables, encapsulates it in
207 : * PartitionTupleRouting, and returns it.
208 : *
209 : * Callers must use the returned PartitionTupleRouting during calls to
210 : * ExecFindPartition(). The actual ResultRelInfo for a partition is only
211 : * allocated when the partition is found for the first time.
212 : *
213 : * The current memory context is used to allocate this struct and all
214 : * subsidiary structs that will be allocated from it later on. Typically
215 : * it should be estate->es_query_cxt.
216 : */
217 : PartitionTupleRouting *
218 4966 : ExecSetupPartitionTupleRouting(EState *estate, Relation rel)
219 : {
220 : PartitionTupleRouting *proute;
221 :
222 : /*
223 : * Here we attempt to expend as little effort as possible in setting up
224 : * the PartitionTupleRouting. Each partition's ResultRelInfo is built on
225 : * demand, only when we actually need to route a tuple to that partition.
226 : * The reason for this is that a common case is for INSERT to insert a
227 : * single tuple into a partitioned table and this must be fast.
228 : */
229 4966 : proute = (PartitionTupleRouting *) palloc0(sizeof(PartitionTupleRouting));
230 4966 : proute->partition_root = rel;
231 4966 : proute->memcxt = CurrentMemoryContext;
232 : /* Rest of members initialized by zeroing */
233 :
234 : /*
235 : * Initialize this table's PartitionDispatch object. Here we pass in the
236 : * parent as NULL as we don't need to care about any parent of the target
237 : * partitioned table.
238 : */
239 4966 : ExecInitPartitionDispatchInfo(estate, proute, RelationGetRelid(rel),
240 : NULL, 0, NULL);
241 :
242 4966 : return proute;
243 : }
244 :
245 : /*
246 : * ExecFindPartition -- Return the ResultRelInfo for the leaf partition that
247 : * the tuple contained in *slot should belong to.
248 : *
249 : * If the partition's ResultRelInfo does not yet exist in 'proute' then we set
250 : * one up or reuse one from mtstate's resultRelInfo array. When reusing a
251 : * ResultRelInfo from the mtstate we verify that the relation is a valid
252 : * target for INSERTs and initialize tuple routing information.
253 : *
254 : * rootResultRelInfo is the relation named in the query.
255 : *
256 : * estate must be non-NULL; we'll need it to compute any expressions in the
257 : * partition keys. Also, its per-tuple contexts are used as evaluation
258 : * scratch space.
259 : *
260 : * If no leaf partition is found, this routine errors out with the appropriate
261 : * error message. An error may also be raised if the found target partition
262 : * is not a valid target for an INSERT.
263 : */
264 : ResultRelInfo *
265 1000874 : ExecFindPartition(ModifyTableState *mtstate,
266 : ResultRelInfo *rootResultRelInfo,
267 : PartitionTupleRouting *proute,
268 : TupleTableSlot *slot, EState *estate)
269 : {
270 1000874 : PartitionDispatch *pd = proute->partition_dispatch_info;
271 : Datum values[PARTITION_MAX_KEYS];
272 : bool isnull[PARTITION_MAX_KEYS];
273 : Relation rel;
274 : PartitionDispatch dispatch;
275 : PartitionDesc partdesc;
276 1000874 : ExprContext *ecxt = GetPerTupleExprContext(estate);
277 1000874 : TupleTableSlot *ecxt_scantuple_saved = ecxt->ecxt_scantuple;
278 1000874 : TupleTableSlot *rootslot = slot;
279 1000874 : TupleTableSlot *myslot = NULL;
280 : MemoryContext oldcxt;
281 1000874 : ResultRelInfo *rri = NULL;
282 :
283 : /* use per-tuple context here to avoid leaking memory */
284 1000874 : oldcxt = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
285 :
286 : /*
287 : * First check the root table's partition constraint, if any. No point in
288 : * routing the tuple if it doesn't belong in the root table itself.
289 : */
290 1000874 : if (rootResultRelInfo->ri_RelationDesc->rd_rel->relispartition)
291 4496 : ExecPartitionCheck(rootResultRelInfo, slot, estate, true);
292 :
293 : /* start with the root partitioned table */
294 1000842 : dispatch = pd[0];
295 2114004 : while (dispatch != NULL)
296 : {
297 1113336 : int partidx = -1;
298 : bool is_leaf;
299 :
300 1113336 : CHECK_FOR_INTERRUPTS();
301 :
302 1113336 : rel = dispatch->reldesc;
303 1113336 : partdesc = dispatch->partdesc;
304 :
305 : /*
306 : * Extract partition key from tuple. Expression evaluation machinery
307 : * that FormPartitionKeyDatum() invokes expects ecxt_scantuple to
308 : * point to the correct tuple slot. The slot might have changed from
309 : * what was used for the parent table if the table of the current
310 : * partitioning level has different tuple descriptor from the parent.
311 : * So update ecxt_scantuple accordingly.
312 : */
313 1113336 : ecxt->ecxt_scantuple = slot;
314 1113336 : FormPartitionKeyDatum(dispatch, slot, estate, values, isnull);
315 :
316 : /*
317 : * If this partitioned table has no partitions or no partition for
318 : * these values, error out.
319 : */
320 2226630 : if (partdesc->nparts == 0 ||
321 1113294 : (partidx = get_partition_for_tuple(dispatch, values, isnull)) < 0)
322 : {
323 : char *val_desc;
324 :
325 154 : val_desc = ExecBuildSlotPartitionKeyDescription(rel,
326 : values, isnull, 64);
327 : Assert(OidIsValid(RelationGetRelid(rel)));
328 154 : ereport(ERROR,
329 : (errcode(ERRCODE_CHECK_VIOLATION),
330 : errmsg("no partition of relation \"%s\" found for row",
331 : RelationGetRelationName(rel)),
332 : val_desc ?
333 : errdetail("Partition key of the failing row contains %s.",
334 : val_desc) : 0,
335 : errtable(rel)));
336 : }
337 :
338 1113182 : is_leaf = partdesc->is_leaf[partidx];
339 1113182 : if (is_leaf)
340 : {
341 : /*
342 : * We've reached the leaf -- hurray, we're done. Look to see if
343 : * we've already got a ResultRelInfo for this partition.
344 : */
345 1000686 : if (likely(dispatch->indexes[partidx] >= 0))
346 : {
347 : /* ResultRelInfo already built */
348 : Assert(dispatch->indexes[partidx] < proute->num_partitions);
349 993912 : rri = proute->partitions[dispatch->indexes[partidx]];
350 : }
351 : else
352 : {
353 : /*
354 : * If the partition is known in the owning ModifyTableState
355 : * node, we can re-use that ResultRelInfo instead of creating
356 : * a new one with ExecInitPartitionInfo().
357 : */
358 6774 : rri = ExecLookupResultRelByOid(mtstate,
359 6774 : partdesc->oids[partidx],
360 : true, false);
361 6774 : if (rri)
362 : {
363 : /* Verify this ResultRelInfo allows INSERTs */
364 500 : CheckValidResultRel(rri, CMD_INSERT, NIL);
365 :
366 : /*
367 : * Initialize information needed to insert this and
368 : * subsequent tuples routed to this partition.
369 : */
370 500 : ExecInitRoutingInfo(mtstate, estate, proute, dispatch,
371 : rri, partidx, true);
372 : }
373 : else
374 : {
375 : /* We need to create a new one. */
376 6274 : rri = ExecInitPartitionInfo(mtstate, estate, proute,
377 : dispatch,
378 : rootResultRelInfo, partidx);
379 : }
380 : }
381 : Assert(rri != NULL);
382 :
383 : /* Signal to terminate the loop */
384 1000668 : dispatch = NULL;
385 : }
386 : else
387 : {
388 : /*
389 : * Partition is a sub-partitioned table; get the PartitionDispatch
390 : */
391 112496 : if (likely(dispatch->indexes[partidx] >= 0))
392 : {
393 : /* Already built. */
394 : Assert(dispatch->indexes[partidx] < proute->num_dispatch);
395 :
396 111332 : rri = proute->nonleaf_partitions[dispatch->indexes[partidx]];
397 :
398 : /*
399 : * Move down to the next partition level and search again
400 : * until we find a leaf partition that matches this tuple
401 : */
402 111332 : dispatch = pd[dispatch->indexes[partidx]];
403 : }
404 : else
405 : {
406 : /* Not yet built. Do that now. */
407 : PartitionDispatch subdispatch;
408 :
409 : /*
410 : * Create the new PartitionDispatch. We pass the current one
411 : * in as the parent PartitionDispatch
412 : */
413 1164 : subdispatch = ExecInitPartitionDispatchInfo(estate,
414 : proute,
415 1164 : partdesc->oids[partidx],
416 : dispatch, partidx,
417 : mtstate->rootResultRelInfo);
418 : Assert(dispatch->indexes[partidx] >= 0 &&
419 : dispatch->indexes[partidx] < proute->num_dispatch);
420 :
421 1164 : rri = proute->nonleaf_partitions[dispatch->indexes[partidx]];
422 1164 : dispatch = subdispatch;
423 : }
424 :
425 : /*
426 : * Convert the tuple to the new parent's layout, if different from
427 : * the previous parent.
428 : */
429 112496 : if (dispatch->tupslot)
430 : {
431 61692 : AttrMap *map = dispatch->tupmap;
432 61692 : TupleTableSlot *tempslot = myslot;
433 :
434 61692 : myslot = dispatch->tupslot;
435 61692 : slot = execute_attr_map_slot(map, slot, myslot);
436 :
437 61692 : if (tempslot != NULL)
438 294 : ExecClearTuple(tempslot);
439 : }
440 : }
441 :
442 : /*
443 : * If this partition is the default one, we must check its partition
444 : * constraint now, which may have changed concurrently due to
445 : * partitions being added to the parent.
446 : *
447 : * (We do this here, and do not rely on ExecInsert doing it, because
448 : * we don't want to miss doing it for non-leaf partitions.)
449 : */
450 1113164 : if (partidx == partdesc->boundinfo->default_index)
451 : {
452 : /*
453 : * The tuple must match the partition's layout for the constraint
454 : * expression to be evaluated successfully. If the partition is
455 : * sub-partitioned, that would already be the case due to the code
456 : * above, but for a leaf partition the tuple still matches the
457 : * parent's layout.
458 : *
459 : * Note that we have a map to convert from root to current
460 : * partition, but not from immediate parent to current partition.
461 : * So if we have to convert, do it from the root slot; if not, use
462 : * the root slot as-is.
463 : */
464 594 : if (is_leaf)
465 : {
466 550 : TupleConversionMap *map = ExecGetRootToChildMap(rri, estate);
467 :
468 550 : if (map)
469 162 : slot = execute_attr_map_slot(map->attrMap, rootslot,
470 : rri->ri_PartitionTupleSlot);
471 : else
472 388 : slot = rootslot;
473 : }
474 :
475 594 : ExecPartitionCheck(rri, slot, estate, true);
476 : }
477 : }
478 :
479 : /* Release the tuple in the lowest parent's dedicated slot. */
480 1000668 : if (myslot != NULL)
481 61360 : ExecClearTuple(myslot);
482 : /* and restore ecxt's scantuple */
483 1000668 : ecxt->ecxt_scantuple = ecxt_scantuple_saved;
484 1000668 : MemoryContextSwitchTo(oldcxt);
485 :
486 1000668 : return rri;
487 : }
488 :
489 : /*
490 : * ExecInitPartitionInfo
491 : * Lock the partition and initialize ResultRelInfo. Also setup other
492 : * information for the partition and store it in the next empty slot in
493 : * the proute->partitions array.
494 : *
495 : * Returns the ResultRelInfo
496 : */
497 : static ResultRelInfo *
498 6274 : ExecInitPartitionInfo(ModifyTableState *mtstate, EState *estate,
499 : PartitionTupleRouting *proute,
500 : PartitionDispatch dispatch,
501 : ResultRelInfo *rootResultRelInfo,
502 : int partidx)
503 : {
504 6274 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
505 6274 : Oid partOid = dispatch->partdesc->oids[partidx];
506 : Relation partrel;
507 6274 : int firstVarno = mtstate->resultRelInfo[0].ri_RangeTableIndex;
508 6274 : Relation firstResultRel = mtstate->resultRelInfo[0].ri_RelationDesc;
509 : ResultRelInfo *leaf_part_rri;
510 : MemoryContext oldcxt;
511 6274 : AttrMap *part_attmap = NULL;
512 : bool found_whole_row;
513 :
514 6274 : oldcxt = MemoryContextSwitchTo(proute->memcxt);
515 :
516 6274 : partrel = table_open(partOid, RowExclusiveLock);
517 :
518 6274 : leaf_part_rri = makeNode(ResultRelInfo);
519 6274 : InitResultRelInfo(leaf_part_rri,
520 : partrel,
521 : 0,
522 : rootResultRelInfo,
523 : estate->es_instrument);
524 :
525 : /*
526 : * Verify result relation is a valid target for an INSERT. An UPDATE of a
527 : * partition-key becomes a DELETE+INSERT operation, so this check is still
528 : * required when the operation is CMD_UPDATE.
529 : */
530 6274 : CheckValidResultRel(leaf_part_rri, CMD_INSERT, NIL);
531 :
532 : /*
533 : * Open partition indices. The user may have asked to check for conflicts
534 : * within this leaf partition and do "nothing" instead of throwing an
535 : * error. Be prepared in that case by initializing the index information
536 : * needed by ExecInsert() to perform speculative insertions.
537 : */
538 6268 : if (partrel->rd_rel->relhasindex &&
539 1714 : leaf_part_rri->ri_IndexRelationDescs == NULL)
540 1714 : ExecOpenIndices(leaf_part_rri,
541 3254 : (node != NULL &&
542 1540 : node->onConflictAction != ONCONFLICT_NONE));
543 :
544 : /*
545 : * Build WITH CHECK OPTION constraints for the partition. Note that we
546 : * didn't build the withCheckOptionList for partitions within the planner,
547 : * but simple translation of varattnos will suffice. This only occurs for
548 : * the INSERT case or in the case of UPDATE/MERGE tuple routing where we
549 : * didn't find a result rel to reuse.
550 : */
551 6268 : if (node && node->withCheckOptionLists != NIL)
552 : {
553 : List *wcoList;
554 96 : List *wcoExprs = NIL;
555 : ListCell *ll;
556 :
557 : /*
558 : * In the case of INSERT on a partitioned table, there is only one
559 : * plan. Likewise, there is only one WCO list, not one per partition.
560 : * For UPDATE/MERGE, there are as many WCO lists as there are plans.
561 : */
562 : Assert((node->operation == CMD_INSERT &&
563 : list_length(node->withCheckOptionLists) == 1 &&
564 : list_length(node->resultRelations) == 1) ||
565 : (node->operation == CMD_UPDATE &&
566 : list_length(node->withCheckOptionLists) ==
567 : list_length(node->resultRelations)) ||
568 : (node->operation == CMD_MERGE &&
569 : list_length(node->withCheckOptionLists) ==
570 : list_length(node->resultRelations)));
571 :
572 : /*
573 : * Use the WCO list of the first plan as a reference to calculate
574 : * attno's for the WCO list of this partition. In the INSERT case,
575 : * that refers to the root partitioned table, whereas in the UPDATE
576 : * tuple routing case, that refers to the first partition in the
577 : * mtstate->resultRelInfo array. In any case, both that relation and
578 : * this partition should have the same columns, so we should be able
579 : * to map attributes successfully.
580 : */
581 96 : wcoList = linitial(node->withCheckOptionLists);
582 :
583 : /*
584 : * Convert Vars in it to contain this partition's attribute numbers.
585 : */
586 : part_attmap =
587 96 : build_attrmap_by_name(RelationGetDescr(partrel),
588 : RelationGetDescr(firstResultRel),
589 : false);
590 : wcoList = (List *)
591 96 : map_variable_attnos((Node *) wcoList,
592 : firstVarno, 0,
593 : part_attmap,
594 96 : RelationGetForm(partrel)->reltype,
595 : &found_whole_row);
596 : /* We ignore the value of found_whole_row. */
597 :
598 270 : foreach(ll, wcoList)
599 : {
600 174 : WithCheckOption *wco = lfirst_node(WithCheckOption, ll);
601 174 : ExprState *wcoExpr = ExecInitQual(castNode(List, wco->qual),
602 : &mtstate->ps);
603 :
604 174 : wcoExprs = lappend(wcoExprs, wcoExpr);
605 : }
606 :
607 96 : leaf_part_rri->ri_WithCheckOptions = wcoList;
608 96 : leaf_part_rri->ri_WithCheckOptionExprs = wcoExprs;
609 : }
610 :
611 : /*
612 : * Build the RETURNING projection for the partition. Note that we didn't
613 : * build the returningList for partitions within the planner, but simple
614 : * translation of varattnos will suffice. This only occurs for the INSERT
615 : * case or in the case of UPDATE/MERGE tuple routing where we didn't find
616 : * a result rel to reuse.
617 : */
618 6268 : if (node && node->returningLists != NIL)
619 : {
620 : TupleTableSlot *slot;
621 : ExprContext *econtext;
622 : List *returningList;
623 :
624 : /* See the comment above for WCO lists. */
625 : Assert((node->operation == CMD_INSERT &&
626 : list_length(node->returningLists) == 1 &&
627 : list_length(node->resultRelations) == 1) ||
628 : (node->operation == CMD_UPDATE &&
629 : list_length(node->returningLists) ==
630 : list_length(node->resultRelations)) ||
631 : (node->operation == CMD_MERGE &&
632 : list_length(node->returningLists) ==
633 : list_length(node->resultRelations)));
634 :
635 : /*
636 : * Use the RETURNING list of the first plan as a reference to
637 : * calculate attno's for the RETURNING list of this partition. See
638 : * the comment above for WCO lists for more details on why this is
639 : * okay.
640 : */
641 212 : returningList = linitial(node->returningLists);
642 :
643 : /*
644 : * Convert Vars in it to contain this partition's attribute numbers.
645 : */
646 212 : if (part_attmap == NULL)
647 : part_attmap =
648 212 : build_attrmap_by_name(RelationGetDescr(partrel),
649 : RelationGetDescr(firstResultRel),
650 : false);
651 : returningList = (List *)
652 212 : map_variable_attnos((Node *) returningList,
653 : firstVarno, 0,
654 : part_attmap,
655 212 : RelationGetForm(partrel)->reltype,
656 : &found_whole_row);
657 : /* We ignore the value of found_whole_row. */
658 :
659 212 : leaf_part_rri->ri_returningList = returningList;
660 :
661 : /*
662 : * Initialize the projection itself.
663 : *
664 : * Use the slot and the expression context that would have been set up
665 : * in ExecInitModifyTable() for projection's output.
666 : */
667 : Assert(mtstate->ps.ps_ResultTupleSlot != NULL);
668 212 : slot = mtstate->ps.ps_ResultTupleSlot;
669 : Assert(mtstate->ps.ps_ExprContext != NULL);
670 212 : econtext = mtstate->ps.ps_ExprContext;
671 212 : leaf_part_rri->ri_projectReturning =
672 212 : ExecBuildProjectionInfo(returningList, econtext, slot,
673 : &mtstate->ps, RelationGetDescr(partrel));
674 : }
675 :
676 : /* Set up information needed for routing tuples to the partition. */
677 6268 : ExecInitRoutingInfo(mtstate, estate, proute, dispatch,
678 : leaf_part_rri, partidx, false);
679 :
680 : /*
681 : * If there is an ON CONFLICT clause, initialize state for it.
682 : */
683 6268 : if (node && node->onConflictAction != ONCONFLICT_NONE)
684 : {
685 222 : TupleDesc partrelDesc = RelationGetDescr(partrel);
686 222 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
687 : ListCell *lc;
688 222 : List *arbiterIndexes = NIL;
689 :
690 : /*
691 : * If there is a list of arbiter indexes, map it to a list of indexes
692 : * in the partition. We do that by scanning the partition's index
693 : * list and searching for ancestry relationships to each index in the
694 : * ancestor table.
695 : */
696 222 : if (rootResultRelInfo->ri_onConflictArbiterIndexes != NIL)
697 : {
698 : List *childIdxs;
699 :
700 172 : childIdxs = RelationGetIndexList(leaf_part_rri->ri_RelationDesc);
701 :
702 356 : foreach(lc, childIdxs)
703 : {
704 184 : Oid childIdx = lfirst_oid(lc);
705 : List *ancestors;
706 : ListCell *lc2;
707 :
708 184 : ancestors = get_partition_ancestors(childIdx);
709 368 : foreach(lc2, rootResultRelInfo->ri_onConflictArbiterIndexes)
710 : {
711 184 : if (list_member_oid(ancestors, lfirst_oid(lc2)))
712 172 : arbiterIndexes = lappend_oid(arbiterIndexes, childIdx);
713 : }
714 184 : list_free(ancestors);
715 : }
716 : }
717 :
718 : /*
719 : * If the resulting lists are of inequal length, something is wrong.
720 : * (This shouldn't happen, since arbiter index selection should not
721 : * pick up an invalid index.)
722 : */
723 444 : if (list_length(rootResultRelInfo->ri_onConflictArbiterIndexes) !=
724 222 : list_length(arbiterIndexes))
725 0 : elog(ERROR, "invalid arbiter index list");
726 222 : leaf_part_rri->ri_onConflictArbiterIndexes = arbiterIndexes;
727 :
728 : /*
729 : * In the DO UPDATE case, we have some more state to initialize.
730 : */
731 222 : if (node->onConflictAction == ONCONFLICT_UPDATE)
732 : {
733 166 : OnConflictSetState *onconfl = makeNode(OnConflictSetState);
734 : TupleConversionMap *map;
735 :
736 166 : map = ExecGetRootToChildMap(leaf_part_rri, estate);
737 :
738 : Assert(node->onConflictSet != NIL);
739 : Assert(rootResultRelInfo->ri_onConflict != NULL);
740 :
741 166 : leaf_part_rri->ri_onConflict = onconfl;
742 :
743 : /*
744 : * Need a separate existing slot for each partition, as the
745 : * partition could be of a different AM, even if the tuple
746 : * descriptors match.
747 : */
748 166 : onconfl->oc_Existing =
749 166 : table_slot_create(leaf_part_rri->ri_RelationDesc,
750 166 : &mtstate->ps.state->es_tupleTable);
751 :
752 : /*
753 : * If the partition's tuple descriptor matches exactly the root
754 : * parent (the common case), we can re-use most of the parent's ON
755 : * CONFLICT SET state, skipping a bunch of work. Otherwise, we
756 : * need to create state specific to this partition.
757 : */
758 166 : if (map == NULL)
759 : {
760 : /*
761 : * It's safe to reuse these from the partition root, as we
762 : * only process one tuple at a time (therefore we won't
763 : * overwrite needed data in slots), and the results of
764 : * projections are independent of the underlying storage.
765 : * Projections and where clauses themselves don't store state
766 : * / are independent of the underlying storage.
767 : */
768 90 : onconfl->oc_ProjSlot =
769 90 : rootResultRelInfo->ri_onConflict->oc_ProjSlot;
770 90 : onconfl->oc_ProjInfo =
771 90 : rootResultRelInfo->ri_onConflict->oc_ProjInfo;
772 90 : onconfl->oc_WhereClause =
773 90 : rootResultRelInfo->ri_onConflict->oc_WhereClause;
774 : }
775 : else
776 : {
777 : List *onconflset;
778 : List *onconflcols;
779 :
780 : /*
781 : * Translate expressions in onConflictSet to account for
782 : * different attribute numbers. For that, map partition
783 : * varattnos twice: first to catch the EXCLUDED
784 : * pseudo-relation (INNER_VAR), and second to handle the main
785 : * target relation (firstVarno).
786 : */
787 76 : onconflset = copyObject(node->onConflictSet);
788 76 : if (part_attmap == NULL)
789 : part_attmap =
790 70 : build_attrmap_by_name(RelationGetDescr(partrel),
791 : RelationGetDescr(firstResultRel),
792 : false);
793 : onconflset = (List *)
794 76 : map_variable_attnos((Node *) onconflset,
795 : INNER_VAR, 0,
796 : part_attmap,
797 76 : RelationGetForm(partrel)->reltype,
798 : &found_whole_row);
799 : /* We ignore the value of found_whole_row. */
800 : onconflset = (List *)
801 76 : map_variable_attnos((Node *) onconflset,
802 : firstVarno, 0,
803 : part_attmap,
804 76 : RelationGetForm(partrel)->reltype,
805 : &found_whole_row);
806 : /* We ignore the value of found_whole_row. */
807 :
808 : /* Finally, adjust the target colnos to match the partition. */
809 76 : onconflcols = adjust_partition_colnos(node->onConflictCols,
810 : leaf_part_rri);
811 :
812 : /* create the tuple slot for the UPDATE SET projection */
813 76 : onconfl->oc_ProjSlot =
814 76 : table_slot_create(partrel,
815 76 : &mtstate->ps.state->es_tupleTable);
816 :
817 : /* build UPDATE SET projection state */
818 76 : onconfl->oc_ProjInfo =
819 76 : ExecBuildUpdateProjection(onconflset,
820 : true,
821 : onconflcols,
822 : partrelDesc,
823 : econtext,
824 : onconfl->oc_ProjSlot,
825 : &mtstate->ps);
826 :
827 : /*
828 : * If there is a WHERE clause, initialize state where it will
829 : * be evaluated, mapping the attribute numbers appropriately.
830 : * As with onConflictSet, we need to map partition varattnos
831 : * to the partition's tupdesc.
832 : */
833 76 : if (node->onConflictWhere)
834 : {
835 : List *clause;
836 :
837 30 : clause = copyObject((List *) node->onConflictWhere);
838 : clause = (List *)
839 30 : map_variable_attnos((Node *) clause,
840 : INNER_VAR, 0,
841 : part_attmap,
842 30 : RelationGetForm(partrel)->reltype,
843 : &found_whole_row);
844 : /* We ignore the value of found_whole_row. */
845 : clause = (List *)
846 30 : map_variable_attnos((Node *) clause,
847 : firstVarno, 0,
848 : part_attmap,
849 30 : RelationGetForm(partrel)->reltype,
850 : &found_whole_row);
851 : /* We ignore the value of found_whole_row. */
852 30 : onconfl->oc_WhereClause =
853 30 : ExecInitQual((List *) clause, &mtstate->ps);
854 : }
855 : }
856 : }
857 : }
858 :
859 : /*
860 : * Since we've just initialized this ResultRelInfo, it's not in any list
861 : * attached to the estate as yet. Add it, so that it can be found later.
862 : *
863 : * Note that the entries in this list appear in no predetermined order,
864 : * because partition result rels are initialized as and when they're
865 : * needed.
866 : */
867 6268 : MemoryContextSwitchTo(estate->es_query_cxt);
868 6268 : estate->es_tuple_routing_result_relations =
869 6268 : lappend(estate->es_tuple_routing_result_relations,
870 : leaf_part_rri);
871 :
872 : /*
873 : * Initialize information about this partition that's needed to handle
874 : * MERGE. We take the "first" result relation's mergeActionList as
875 : * reference and make copy for this relation, converting stuff that
876 : * references attribute numbers to match this relation's.
877 : *
878 : * This duplicates much of the logic in ExecInitMerge(), so if something
879 : * changes there, look here too.
880 : */
881 6268 : if (node && node->operation == CMD_MERGE)
882 : {
883 24 : List *firstMergeActionList = linitial(node->mergeActionLists);
884 : ListCell *lc;
885 24 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
886 : Node *joinCondition;
887 :
888 24 : if (part_attmap == NULL)
889 : part_attmap =
890 12 : build_attrmap_by_name(RelationGetDescr(partrel),
891 : RelationGetDescr(firstResultRel),
892 : false);
893 :
894 24 : if (unlikely(!leaf_part_rri->ri_projectNewInfoValid))
895 24 : ExecInitMergeTupleSlots(mtstate, leaf_part_rri);
896 :
897 : /* Initialize state for join condition checking. */
898 : joinCondition =
899 24 : map_variable_attnos(linitial(node->mergeJoinConditions),
900 : firstVarno, 0,
901 : part_attmap,
902 24 : RelationGetForm(partrel)->reltype,
903 : &found_whole_row);
904 : /* We ignore the value of found_whole_row. */
905 24 : leaf_part_rri->ri_MergeJoinCondition =
906 24 : ExecInitQual((List *) joinCondition, &mtstate->ps);
907 :
908 60 : foreach(lc, firstMergeActionList)
909 : {
910 : /* Make a copy for this relation to be safe. */
911 36 : MergeAction *action = copyObject(lfirst(lc));
912 : MergeActionState *action_state;
913 :
914 : /* Generate the action's state for this relation */
915 36 : action_state = makeNode(MergeActionState);
916 36 : action_state->mas_action = action;
917 :
918 : /* And put the action in the appropriate list */
919 72 : leaf_part_rri->ri_MergeActions[action->matchKind] =
920 36 : lappend(leaf_part_rri->ri_MergeActions[action->matchKind],
921 : action_state);
922 :
923 36 : switch (action->commandType)
924 : {
925 12 : case CMD_INSERT:
926 :
927 : /*
928 : * ExecCheckPlanOutput() already done on the targetlist
929 : * when "first" result relation initialized and it is same
930 : * for all result relations.
931 : */
932 12 : action_state->mas_proj =
933 12 : ExecBuildProjectionInfo(action->targetList, econtext,
934 : leaf_part_rri->ri_newTupleSlot,
935 : &mtstate->ps,
936 : RelationGetDescr(partrel));
937 12 : break;
938 18 : case CMD_UPDATE:
939 :
940 : /*
941 : * Convert updateColnos from "first" result relation
942 : * attribute numbers to this result rel's.
943 : */
944 18 : if (part_attmap)
945 18 : action->updateColnos =
946 18 : adjust_partition_colnos_using_map(action->updateColnos,
947 : part_attmap);
948 18 : action_state->mas_proj =
949 18 : ExecBuildUpdateProjection(action->targetList,
950 : true,
951 : action->updateColnos,
952 18 : RelationGetDescr(leaf_part_rri->ri_RelationDesc),
953 : econtext,
954 : leaf_part_rri->ri_newTupleSlot,
955 : NULL);
956 18 : break;
957 6 : case CMD_DELETE:
958 : case CMD_NOTHING:
959 : /* Nothing to do */
960 6 : break;
961 :
962 0 : default:
963 0 : elog(ERROR, "unknown action in MERGE WHEN clause");
964 : }
965 :
966 : /* found_whole_row intentionally ignored. */
967 36 : action->qual =
968 36 : map_variable_attnos(action->qual,
969 : firstVarno, 0,
970 : part_attmap,
971 36 : RelationGetForm(partrel)->reltype,
972 : &found_whole_row);
973 36 : action_state->mas_whenqual =
974 36 : ExecInitQual((List *) action->qual, &mtstate->ps);
975 : }
976 : }
977 6268 : MemoryContextSwitchTo(oldcxt);
978 :
979 6268 : return leaf_part_rri;
980 : }
981 :
982 : /*
983 : * ExecInitRoutingInfo
984 : * Set up information needed for translating tuples between root
985 : * partitioned table format and partition format, and keep track of it
986 : * in PartitionTupleRouting.
987 : */
988 : static void
989 6768 : ExecInitRoutingInfo(ModifyTableState *mtstate,
990 : EState *estate,
991 : PartitionTupleRouting *proute,
992 : PartitionDispatch dispatch,
993 : ResultRelInfo *partRelInfo,
994 : int partidx,
995 : bool is_borrowed_rel)
996 : {
997 : MemoryContext oldcxt;
998 : int rri_index;
999 :
1000 6768 : oldcxt = MemoryContextSwitchTo(proute->memcxt);
1001 :
1002 : /*
1003 : * Set up tuple conversion between root parent and the partition if the
1004 : * two have different rowtypes. If conversion is indeed required, also
1005 : * initialize a slot dedicated to storing this partition's converted
1006 : * tuples. Various operations that are applied to tuples after routing,
1007 : * such as checking constraints, will refer to this slot.
1008 : */
1009 6768 : if (ExecGetRootToChildMap(partRelInfo, estate) != NULL)
1010 : {
1011 1298 : Relation partrel = partRelInfo->ri_RelationDesc;
1012 :
1013 : /*
1014 : * This pins the partition's TupleDesc, which will be released at the
1015 : * end of the command.
1016 : */
1017 1298 : partRelInfo->ri_PartitionTupleSlot =
1018 1298 : table_slot_create(partrel, &estate->es_tupleTable);
1019 : }
1020 : else
1021 5470 : partRelInfo->ri_PartitionTupleSlot = NULL;
1022 :
1023 : /*
1024 : * If the partition is a foreign table, let the FDW init itself for
1025 : * routing tuples to the partition.
1026 : */
1027 6768 : if (partRelInfo->ri_FdwRoutine != NULL &&
1028 84 : partRelInfo->ri_FdwRoutine->BeginForeignInsert != NULL)
1029 84 : partRelInfo->ri_FdwRoutine->BeginForeignInsert(mtstate, partRelInfo);
1030 :
1031 : /*
1032 : * Determine if the FDW supports batch insert and determine the batch size
1033 : * (a FDW may support batching, but it may be disabled for the
1034 : * server/table or for this particular query).
1035 : *
1036 : * If the FDW does not support batching, we set the batch size to 1.
1037 : */
1038 6756 : if (partRelInfo->ri_FdwRoutine != NULL &&
1039 72 : partRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize &&
1040 72 : partRelInfo->ri_FdwRoutine->ExecForeignBatchInsert)
1041 72 : partRelInfo->ri_BatchSize =
1042 72 : partRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize(partRelInfo);
1043 : else
1044 6684 : partRelInfo->ri_BatchSize = 1;
1045 :
1046 : Assert(partRelInfo->ri_BatchSize >= 1);
1047 :
1048 6756 : partRelInfo->ri_CopyMultiInsertBuffer = NULL;
1049 :
1050 : /*
1051 : * Keep track of it in the PartitionTupleRouting->partitions array.
1052 : */
1053 : Assert(dispatch->indexes[partidx] == -1);
1054 :
1055 6756 : rri_index = proute->num_partitions++;
1056 :
1057 : /* Allocate or enlarge the array, as needed */
1058 6756 : if (proute->num_partitions >= proute->max_partitions)
1059 : {
1060 4680 : if (proute->max_partitions == 0)
1061 : {
1062 4668 : proute->max_partitions = 8;
1063 4668 : proute->partitions = (ResultRelInfo **)
1064 4668 : palloc(sizeof(ResultRelInfo *) * proute->max_partitions);
1065 4668 : proute->is_borrowed_rel = (bool *)
1066 4668 : palloc(sizeof(bool) * proute->max_partitions);
1067 : }
1068 : else
1069 : {
1070 12 : proute->max_partitions *= 2;
1071 12 : proute->partitions = (ResultRelInfo **)
1072 12 : repalloc(proute->partitions, sizeof(ResultRelInfo *) *
1073 12 : proute->max_partitions);
1074 12 : proute->is_borrowed_rel = (bool *)
1075 12 : repalloc(proute->is_borrowed_rel, sizeof(bool) *
1076 12 : proute->max_partitions);
1077 : }
1078 : }
1079 :
1080 6756 : proute->partitions[rri_index] = partRelInfo;
1081 6756 : proute->is_borrowed_rel[rri_index] = is_borrowed_rel;
1082 6756 : dispatch->indexes[partidx] = rri_index;
1083 :
1084 6756 : MemoryContextSwitchTo(oldcxt);
1085 6756 : }
1086 :
1087 : /*
1088 : * ExecInitPartitionDispatchInfo
1089 : * Lock the partitioned table (if not locked already) and initialize
1090 : * PartitionDispatch for a partitioned table and store it in the next
1091 : * available slot in the proute->partition_dispatch_info array. Also,
1092 : * record the index into this array in the parent_pd->indexes[] array in
1093 : * the partidx element so that we can properly retrieve the newly created
1094 : * PartitionDispatch later.
1095 : */
1096 : static PartitionDispatch
1097 6130 : ExecInitPartitionDispatchInfo(EState *estate,
1098 : PartitionTupleRouting *proute, Oid partoid,
1099 : PartitionDispatch parent_pd, int partidx,
1100 : ResultRelInfo *rootResultRelInfo)
1101 : {
1102 : Relation rel;
1103 : PartitionDesc partdesc;
1104 : PartitionDispatch pd;
1105 : int dispatchidx;
1106 : MemoryContext oldcxt;
1107 :
1108 : /*
1109 : * For data modification, it is better that executor does not include
1110 : * partitions being detached, except when running in snapshot-isolation
1111 : * mode. This means that a read-committed transaction immediately gets a
1112 : * "no partition for tuple" error when a tuple is inserted into a
1113 : * partition that's being detached concurrently, but a transaction in
1114 : * repeatable-read mode can still use such a partition.
1115 : */
1116 6130 : if (estate->es_partition_directory == NULL)
1117 4930 : estate->es_partition_directory =
1118 4930 : CreatePartitionDirectory(estate->es_query_cxt,
1119 : !IsolationUsesXactSnapshot());
1120 :
1121 6130 : oldcxt = MemoryContextSwitchTo(proute->memcxt);
1122 :
1123 : /*
1124 : * Only sub-partitioned tables need to be locked here. The root
1125 : * partitioned table will already have been locked as it's referenced in
1126 : * the query's rtable.
1127 : */
1128 6130 : if (partoid != RelationGetRelid(proute->partition_root))
1129 1164 : rel = table_open(partoid, RowExclusiveLock);
1130 : else
1131 4966 : rel = proute->partition_root;
1132 6130 : partdesc = PartitionDirectoryLookup(estate->es_partition_directory, rel);
1133 :
1134 6130 : pd = (PartitionDispatch) palloc(offsetof(PartitionDispatchData, indexes) +
1135 6130 : partdesc->nparts * sizeof(int));
1136 6130 : pd->reldesc = rel;
1137 6130 : pd->key = RelationGetPartitionKey(rel);
1138 6130 : pd->keystate = NIL;
1139 6130 : pd->partdesc = partdesc;
1140 6130 : if (parent_pd != NULL)
1141 : {
1142 1164 : TupleDesc tupdesc = RelationGetDescr(rel);
1143 :
1144 : /*
1145 : * For sub-partitioned tables where the column order differs from its
1146 : * direct parent partitioned table, we must store a tuple table slot
1147 : * initialized with its tuple descriptor and a tuple conversion map to
1148 : * convert a tuple from its parent's rowtype to its own. This is to
1149 : * make sure that we are looking at the correct row using the correct
1150 : * tuple descriptor when computing its partition key for tuple
1151 : * routing.
1152 : */
1153 1164 : pd->tupmap = build_attrmap_by_name_if_req(RelationGetDescr(parent_pd->reldesc),
1154 : tupdesc,
1155 : false);
1156 1164 : pd->tupslot = pd->tupmap ?
1157 1164 : MakeSingleTupleTableSlot(tupdesc, &TTSOpsVirtual) : NULL;
1158 : }
1159 : else
1160 : {
1161 : /* Not required for the root partitioned table */
1162 4966 : pd->tupmap = NULL;
1163 4966 : pd->tupslot = NULL;
1164 : }
1165 :
1166 : /*
1167 : * Initialize with -1 to signify that the corresponding partition's
1168 : * ResultRelInfo or PartitionDispatch has not been created yet.
1169 : */
1170 6130 : memset(pd->indexes, -1, sizeof(int) * partdesc->nparts);
1171 :
1172 : /* Track in PartitionTupleRouting for later use */
1173 6130 : dispatchidx = proute->num_dispatch++;
1174 :
1175 : /* Allocate or enlarge the array, as needed */
1176 6130 : if (proute->num_dispatch >= proute->max_dispatch)
1177 : {
1178 4966 : if (proute->max_dispatch == 0)
1179 : {
1180 4966 : proute->max_dispatch = 4;
1181 4966 : proute->partition_dispatch_info = (PartitionDispatch *)
1182 4966 : palloc(sizeof(PartitionDispatch) * proute->max_dispatch);
1183 4966 : proute->nonleaf_partitions = (ResultRelInfo **)
1184 4966 : palloc(sizeof(ResultRelInfo *) * proute->max_dispatch);
1185 : }
1186 : else
1187 : {
1188 0 : proute->max_dispatch *= 2;
1189 0 : proute->partition_dispatch_info = (PartitionDispatch *)
1190 0 : repalloc(proute->partition_dispatch_info,
1191 0 : sizeof(PartitionDispatch) * proute->max_dispatch);
1192 0 : proute->nonleaf_partitions = (ResultRelInfo **)
1193 0 : repalloc(proute->nonleaf_partitions,
1194 0 : sizeof(ResultRelInfo *) * proute->max_dispatch);
1195 : }
1196 : }
1197 6130 : proute->partition_dispatch_info[dispatchidx] = pd;
1198 :
1199 : /*
1200 : * If setting up a PartitionDispatch for a sub-partitioned table, we may
1201 : * also need a minimally valid ResultRelInfo for checking the partition
1202 : * constraint later; set that up now.
1203 : */
1204 6130 : if (parent_pd)
1205 : {
1206 1164 : ResultRelInfo *rri = makeNode(ResultRelInfo);
1207 :
1208 1164 : InitResultRelInfo(rri, rel, 0, rootResultRelInfo, 0);
1209 1164 : proute->nonleaf_partitions[dispatchidx] = rri;
1210 : }
1211 : else
1212 4966 : proute->nonleaf_partitions[dispatchidx] = NULL;
1213 :
1214 : /*
1215 : * Finally, if setting up a PartitionDispatch for a sub-partitioned table,
1216 : * install a downlink in the parent to allow quick descent.
1217 : */
1218 6130 : if (parent_pd)
1219 : {
1220 : Assert(parent_pd->indexes[partidx] == -1);
1221 1164 : parent_pd->indexes[partidx] = dispatchidx;
1222 : }
1223 :
1224 6130 : MemoryContextSwitchTo(oldcxt);
1225 :
1226 6130 : return pd;
1227 : }
1228 :
1229 : /*
1230 : * ExecCleanupTupleRouting -- Clean up objects allocated for partition tuple
1231 : * routing.
1232 : *
1233 : * Close all the partitioned tables, leaf partitions, and their indices.
1234 : */
1235 : void
1236 4212 : ExecCleanupTupleRouting(ModifyTableState *mtstate,
1237 : PartitionTupleRouting *proute)
1238 : {
1239 : int i;
1240 :
1241 : /*
1242 : * Remember, proute->partition_dispatch_info[0] corresponds to the root
1243 : * partitioned table, which we must not try to close, because it is the
1244 : * main target table of the query that will be closed by callers such as
1245 : * ExecEndPlan() or DoCopy(). Also, tupslot is NULL for the root
1246 : * partitioned table.
1247 : */
1248 5152 : for (i = 1; i < proute->num_dispatch; i++)
1249 : {
1250 940 : PartitionDispatch pd = proute->partition_dispatch_info[i];
1251 :
1252 940 : table_close(pd->reldesc, NoLock);
1253 :
1254 940 : if (pd->tupslot)
1255 448 : ExecDropSingleTupleTableSlot(pd->tupslot);
1256 : }
1257 :
1258 10432 : for (i = 0; i < proute->num_partitions; i++)
1259 : {
1260 6220 : ResultRelInfo *resultRelInfo = proute->partitions[i];
1261 :
1262 : /* Allow any FDWs to shut down */
1263 6220 : if (resultRelInfo->ri_FdwRoutine != NULL &&
1264 68 : resultRelInfo->ri_FdwRoutine->EndForeignInsert != NULL)
1265 68 : resultRelInfo->ri_FdwRoutine->EndForeignInsert(mtstate->ps.state,
1266 : resultRelInfo);
1267 :
1268 : /*
1269 : * Close it if it's not one of the result relations borrowed from the
1270 : * owning ModifyTableState; those will be closed by ExecEndPlan().
1271 : */
1272 6220 : if (proute->is_borrowed_rel[i])
1273 452 : continue;
1274 :
1275 5768 : ExecCloseIndices(resultRelInfo);
1276 5768 : table_close(resultRelInfo->ri_RelationDesc, NoLock);
1277 : }
1278 4212 : }
1279 :
1280 : /* ----------------
1281 : * FormPartitionKeyDatum
1282 : * Construct values[] and isnull[] arrays for the partition key
1283 : * of a tuple.
1284 : *
1285 : * pd Partition dispatch object of the partitioned table
1286 : * slot Heap tuple from which to extract partition key
1287 : * estate executor state for evaluating any partition key
1288 : * expressions (must be non-NULL)
1289 : * values Array of partition key Datums (output area)
1290 : * isnull Array of is-null indicators (output area)
1291 : *
1292 : * the ecxt_scantuple slot of estate's per-tuple expr context must point to
1293 : * the heap tuple passed in.
1294 : * ----------------
1295 : */
1296 : static void
1297 1113336 : FormPartitionKeyDatum(PartitionDispatch pd,
1298 : TupleTableSlot *slot,
1299 : EState *estate,
1300 : Datum *values,
1301 : bool *isnull)
1302 : {
1303 : ListCell *partexpr_item;
1304 : int i;
1305 :
1306 1113336 : if (pd->key->partexprs != NIL && pd->keystate == NIL)
1307 : {
1308 : /* Check caller has set up context correctly */
1309 : Assert(estate != NULL &&
1310 : GetPerTupleExprContext(estate)->ecxt_scantuple == slot);
1311 :
1312 : /* First time through, set up expression evaluation state */
1313 534 : pd->keystate = ExecPrepareExprList(pd->key->partexprs, estate);
1314 : }
1315 :
1316 1113336 : partexpr_item = list_head(pd->keystate);
1317 2249484 : for (i = 0; i < pd->key->partnatts; i++)
1318 : {
1319 1136148 : AttrNumber keycol = pd->key->partattrs[i];
1320 : Datum datum;
1321 : bool isNull;
1322 :
1323 1136148 : if (keycol != 0)
1324 : {
1325 : /* Plain column; get the value directly from the heap tuple */
1326 1048524 : datum = slot_getattr(slot, keycol, &isNull);
1327 : }
1328 : else
1329 : {
1330 : /* Expression; need to evaluate it */
1331 87624 : if (partexpr_item == NULL)
1332 0 : elog(ERROR, "wrong number of partition key expressions");
1333 87624 : datum = ExecEvalExprSwitchContext((ExprState *) lfirst(partexpr_item),
1334 87624 : GetPerTupleExprContext(estate),
1335 : &isNull);
1336 87624 : partexpr_item = lnext(pd->keystate, partexpr_item);
1337 : }
1338 1136148 : values[i] = datum;
1339 1136148 : isnull[i] = isNull;
1340 : }
1341 :
1342 1113336 : if (partexpr_item != NULL)
1343 0 : elog(ERROR, "wrong number of partition key expressions");
1344 1113336 : }
1345 :
1346 : /*
1347 : * The number of times the same partition must be found in a row before we
1348 : * switch from a binary search for the given values to just checking if the
1349 : * values belong to the last found partition. This must be above 0.
1350 : */
1351 : #define PARTITION_CACHED_FIND_THRESHOLD 16
1352 :
1353 : /*
1354 : * get_partition_for_tuple
1355 : * Finds partition of relation which accepts the partition key specified
1356 : * in values and isnull.
1357 : *
1358 : * Calling this function can be quite expensive when LIST and RANGE
1359 : * partitioned tables have many partitions. This is due to the binary search
1360 : * that's done to find the correct partition. Many of the use cases for LIST
1361 : * and RANGE partitioned tables make it likely that the same partition is
1362 : * found in subsequent ExecFindPartition() calls. This is especially true for
1363 : * cases such as RANGE partitioned tables on a TIMESTAMP column where the
1364 : * partition key is the current time. When asked to find a partition for a
1365 : * RANGE or LIST partitioned table, we record the partition index and datum
1366 : * offset we've found for the given 'values' in the PartitionDesc (which is
1367 : * stored in relcache), and if we keep finding the same partition
1368 : * PARTITION_CACHED_FIND_THRESHOLD times in a row, then we'll enable caching
1369 : * logic and instead of performing a binary search to find the correct
1370 : * partition, we'll just double-check that 'values' still belong to the last
1371 : * found partition, and if so, we'll return that partition index, thus
1372 : * skipping the need for the binary search. If we fail to match the last
1373 : * partition when double checking, then we fall back on doing a binary search.
1374 : * In this case, unless we find 'values' belong to the DEFAULT partition,
1375 : * we'll reset the number of times we've hit the same partition so that we
1376 : * don't attempt to use the cache again until we've found that partition at
1377 : * least PARTITION_CACHED_FIND_THRESHOLD times in a row.
1378 : *
1379 : * For cases where the partition changes on each lookup, the amount of
1380 : * additional work required just amounts to recording the last found partition
1381 : * and bound offset then resetting the found counter. This is cheap and does
1382 : * not appear to cause any meaningful slowdowns for such cases.
1383 : *
1384 : * No caching of partitions is done when the last found partition is the
1385 : * DEFAULT or NULL partition. For the case of the DEFAULT partition, there
1386 : * is no bound offset storing the matching datum, so we cannot confirm the
1387 : * indexes match. For the NULL partition, this is just so cheap, there's no
1388 : * sense in caching.
1389 : *
1390 : * Return value is index of the partition (>= 0 and < partdesc->nparts) if one
1391 : * found or -1 if none found.
1392 : */
1393 : static int
1394 1113294 : get_partition_for_tuple(PartitionDispatch pd, Datum *values, bool *isnull)
1395 : {
1396 1113294 : int bound_offset = -1;
1397 1113294 : int part_index = -1;
1398 1113294 : PartitionKey key = pd->key;
1399 1113294 : PartitionDesc partdesc = pd->partdesc;
1400 1113294 : PartitionBoundInfo boundinfo = partdesc->boundinfo;
1401 :
1402 : /*
1403 : * In the switch statement below, when we perform a cached lookup for
1404 : * RANGE and LIST partitioned tables, if we find that the last found
1405 : * partition matches the 'values', we return the partition index right
1406 : * away. We do this instead of breaking out of the switch as we don't
1407 : * want to execute the code about the DEFAULT partition or do any updates
1408 : * for any of the cache-related fields. That would be a waste of effort
1409 : * as we already know it's not the DEFAULT partition and have no need to
1410 : * increment the number of times we found the same partition any higher
1411 : * than PARTITION_CACHED_FIND_THRESHOLD.
1412 : */
1413 :
1414 : /* Route as appropriate based on partitioning strategy. */
1415 1113294 : switch (key->strategy)
1416 : {
1417 210726 : case PARTITION_STRATEGY_HASH:
1418 : {
1419 : uint64 rowHash;
1420 :
1421 : /* hash partitioning is too cheap to bother caching */
1422 210726 : rowHash = compute_partition_hash_value(key->partnatts,
1423 : key->partsupfunc,
1424 210726 : key->partcollation,
1425 : values, isnull);
1426 :
1427 : /*
1428 : * HASH partitions can't have a DEFAULT partition and we don't
1429 : * do any caching work for them, so just return the part index
1430 : */
1431 210726 : return boundinfo->indexes[rowHash % boundinfo->nindexes];
1432 : }
1433 :
1434 170956 : case PARTITION_STRATEGY_LIST:
1435 170956 : if (isnull[0])
1436 : {
1437 : /* this is far too cheap to bother doing any caching */
1438 132 : if (partition_bound_accepts_nulls(boundinfo))
1439 : {
1440 : /*
1441 : * When there is a NULL partition we just return that
1442 : * directly. We don't have a bound_offset so it's not
1443 : * valid to drop into the code after the switch which
1444 : * checks and updates the cache fields. We perhaps should
1445 : * be invalidating the details of the last cached
1446 : * partition but there's no real need to. Keeping those
1447 : * fields set gives a chance at matching to the cached
1448 : * partition on the next lookup.
1449 : */
1450 102 : return boundinfo->null_index;
1451 : }
1452 : }
1453 : else
1454 : {
1455 : bool equal;
1456 :
1457 170824 : if (partdesc->last_found_count >= PARTITION_CACHED_FIND_THRESHOLD)
1458 : {
1459 23892 : int last_datum_offset = partdesc->last_found_datum_index;
1460 23892 : Datum lastDatum = boundinfo->datums[last_datum_offset][0];
1461 : int32 cmpval;
1462 :
1463 : /* does the last found datum index match this datum? */
1464 23892 : cmpval = DatumGetInt32(FunctionCall2Coll(&key->partsupfunc[0],
1465 23892 : key->partcollation[0],
1466 : lastDatum,
1467 : values[0]));
1468 :
1469 23892 : if (cmpval == 0)
1470 23538 : return boundinfo->indexes[last_datum_offset];
1471 :
1472 : /* fall-through and do a manual lookup */
1473 : }
1474 :
1475 147286 : bound_offset = partition_list_bsearch(key->partsupfunc,
1476 : key->partcollation,
1477 : boundinfo,
1478 : values[0], &equal);
1479 147286 : if (bound_offset >= 0 && equal)
1480 146888 : part_index = boundinfo->indexes[bound_offset];
1481 : }
1482 147316 : break;
1483 :
1484 731612 : case PARTITION_STRATEGY_RANGE:
1485 : {
1486 731612 : bool equal = false,
1487 731612 : range_partkey_has_null = false;
1488 : int i;
1489 :
1490 : /*
1491 : * No range includes NULL, so this will be accepted by the
1492 : * default partition if there is one, and otherwise rejected.
1493 : */
1494 1485616 : for (i = 0; i < key->partnatts; i++)
1495 : {
1496 754058 : if (isnull[i])
1497 : {
1498 54 : range_partkey_has_null = true;
1499 54 : break;
1500 : }
1501 : }
1502 :
1503 : /* NULLs belong in the DEFAULT partition */
1504 731612 : if (range_partkey_has_null)
1505 54 : break;
1506 :
1507 731558 : if (partdesc->last_found_count >= PARTITION_CACHED_FIND_THRESHOLD)
1508 : {
1509 244056 : int last_datum_offset = partdesc->last_found_datum_index;
1510 244056 : Datum *lastDatums = boundinfo->datums[last_datum_offset];
1511 244056 : PartitionRangeDatumKind *kind = boundinfo->kind[last_datum_offset];
1512 : int32 cmpval;
1513 :
1514 : /* check if the value is >= to the lower bound */
1515 244056 : cmpval = partition_rbound_datum_cmp(key->partsupfunc,
1516 : key->partcollation,
1517 : lastDatums,
1518 : kind,
1519 : values,
1520 244056 : key->partnatts);
1521 :
1522 : /*
1523 : * If it's equal to the lower bound then no need to check
1524 : * the upper bound.
1525 : */
1526 244056 : if (cmpval == 0)
1527 243806 : return boundinfo->indexes[last_datum_offset + 1];
1528 :
1529 238158 : if (cmpval < 0 && last_datum_offset + 1 < boundinfo->ndatums)
1530 : {
1531 : /* check if the value is below the upper bound */
1532 238128 : lastDatums = boundinfo->datums[last_datum_offset + 1];
1533 238128 : kind = boundinfo->kind[last_datum_offset + 1];
1534 238128 : cmpval = partition_rbound_datum_cmp(key->partsupfunc,
1535 : key->partcollation,
1536 : lastDatums,
1537 : kind,
1538 : values,
1539 238128 : key->partnatts);
1540 :
1541 238128 : if (cmpval > 0)
1542 237908 : return boundinfo->indexes[last_datum_offset + 1];
1543 : }
1544 : /* fall-through and do a manual lookup */
1545 : }
1546 :
1547 487752 : bound_offset = partition_range_datum_bsearch(key->partsupfunc,
1548 : key->partcollation,
1549 : boundinfo,
1550 487752 : key->partnatts,
1551 : values,
1552 : &equal);
1553 :
1554 : /*
1555 : * The bound at bound_offset is less than or equal to the
1556 : * tuple value, so the bound at offset+1 is the upper bound of
1557 : * the partition we're looking for, if there actually exists
1558 : * one.
1559 : */
1560 487752 : part_index = boundinfo->indexes[bound_offset + 1];
1561 : }
1562 487752 : break;
1563 :
1564 0 : default:
1565 0 : elog(ERROR, "unexpected partition strategy: %d",
1566 : (int) key->strategy);
1567 : }
1568 :
1569 : /*
1570 : * part_index < 0 means we failed to find a partition of this parent. Use
1571 : * the default partition, if there is one.
1572 : */
1573 635122 : if (part_index < 0)
1574 : {
1575 : /*
1576 : * No need to reset the cache fields here. The next set of values
1577 : * might end up belonging to the cached partition, so leaving the
1578 : * cache alone improves the chances of a cache hit on the next lookup.
1579 : */
1580 706 : return boundinfo->default_index;
1581 : }
1582 :
1583 : /* we should only make it here when the code above set bound_offset */
1584 : Assert(bound_offset >= 0);
1585 :
1586 : /*
1587 : * Attend to the cache fields. If the bound_offset matches the last
1588 : * cached bound offset then we've found the same partition as last time,
1589 : * so bump the count by one. If all goes well, we'll eventually reach
1590 : * PARTITION_CACHED_FIND_THRESHOLD and try the cache path next time
1591 : * around. Otherwise, we'll reset the cache count back to 1 to mark that
1592 : * we've found this partition for the first time.
1593 : */
1594 634416 : if (bound_offset == partdesc->last_found_datum_index)
1595 437508 : partdesc->last_found_count++;
1596 : else
1597 : {
1598 196908 : partdesc->last_found_count = 1;
1599 196908 : partdesc->last_found_part_index = part_index;
1600 196908 : partdesc->last_found_datum_index = bound_offset;
1601 : }
1602 :
1603 634416 : return part_index;
1604 : }
1605 :
1606 : /*
1607 : * ExecBuildSlotPartitionKeyDescription
1608 : *
1609 : * This works very much like BuildIndexValueDescription() and is currently
1610 : * used for building error messages when ExecFindPartition() fails to find
1611 : * partition for a row.
1612 : */
1613 : static char *
1614 154 : ExecBuildSlotPartitionKeyDescription(Relation rel,
1615 : Datum *values,
1616 : bool *isnull,
1617 : int maxfieldlen)
1618 : {
1619 : StringInfoData buf;
1620 154 : PartitionKey key = RelationGetPartitionKey(rel);
1621 154 : int partnatts = get_partition_natts(key);
1622 : int i;
1623 154 : Oid relid = RelationGetRelid(rel);
1624 : AclResult aclresult;
1625 :
1626 154 : if (check_enable_rls(relid, InvalidOid, true) == RLS_ENABLED)
1627 0 : return NULL;
1628 :
1629 : /* If the user has table-level access, just go build the description. */
1630 154 : aclresult = pg_class_aclcheck(relid, GetUserId(), ACL_SELECT);
1631 154 : if (aclresult != ACLCHECK_OK)
1632 : {
1633 : /*
1634 : * Step through the columns of the partition key and make sure the
1635 : * user has SELECT rights on all of them.
1636 : */
1637 24 : for (i = 0; i < partnatts; i++)
1638 : {
1639 18 : AttrNumber attnum = get_partition_col_attnum(key, i);
1640 :
1641 : /*
1642 : * If this partition key column is an expression, we return no
1643 : * detail rather than try to figure out what column(s) the
1644 : * expression includes and if the user has SELECT rights on them.
1645 : */
1646 30 : if (attnum == InvalidAttrNumber ||
1647 12 : pg_attribute_aclcheck(relid, attnum, GetUserId(),
1648 : ACL_SELECT) != ACLCHECK_OK)
1649 12 : return NULL;
1650 : }
1651 : }
1652 :
1653 142 : initStringInfo(&buf);
1654 142 : appendStringInfo(&buf, "(%s) = (",
1655 : pg_get_partkeydef_columns(relid, true));
1656 :
1657 338 : for (i = 0; i < partnatts; i++)
1658 : {
1659 : char *val;
1660 : int vallen;
1661 :
1662 196 : if (isnull[i])
1663 30 : val = "null";
1664 : else
1665 : {
1666 : Oid foutoid;
1667 : bool typisvarlena;
1668 :
1669 166 : getTypeOutputInfo(get_partition_col_typid(key, i),
1670 : &foutoid, &typisvarlena);
1671 166 : val = OidOutputFunctionCall(foutoid, values[i]);
1672 : }
1673 :
1674 196 : if (i > 0)
1675 54 : appendStringInfoString(&buf, ", ");
1676 :
1677 : /* truncate if needed */
1678 196 : vallen = strlen(val);
1679 196 : if (vallen <= maxfieldlen)
1680 196 : appendBinaryStringInfo(&buf, val, vallen);
1681 : else
1682 : {
1683 0 : vallen = pg_mbcliplen(val, vallen, maxfieldlen);
1684 0 : appendBinaryStringInfo(&buf, val, vallen);
1685 0 : appendStringInfoString(&buf, "...");
1686 : }
1687 : }
1688 :
1689 142 : appendStringInfoChar(&buf, ')');
1690 :
1691 142 : return buf.data;
1692 : }
1693 :
1694 : /*
1695 : * adjust_partition_colnos
1696 : * Adjust the list of UPDATE target column numbers to account for
1697 : * attribute differences between the parent and the partition.
1698 : *
1699 : * Note: mustn't be called if no adjustment is required.
1700 : */
1701 : static List *
1702 76 : adjust_partition_colnos(List *colnos, ResultRelInfo *leaf_part_rri)
1703 : {
1704 76 : TupleConversionMap *map = ExecGetChildToRootMap(leaf_part_rri);
1705 :
1706 : Assert(map != NULL);
1707 :
1708 76 : return adjust_partition_colnos_using_map(colnos, map->attrMap);
1709 : }
1710 :
1711 : /*
1712 : * adjust_partition_colnos_using_map
1713 : * Like adjust_partition_colnos, but uses a caller-supplied map instead
1714 : * of assuming to map from the "root" result relation.
1715 : *
1716 : * Note: mustn't be called if no adjustment is required.
1717 : */
1718 : static List *
1719 94 : adjust_partition_colnos_using_map(List *colnos, AttrMap *attrMap)
1720 : {
1721 94 : List *new_colnos = NIL;
1722 : ListCell *lc;
1723 :
1724 : Assert(attrMap != NULL); /* else we shouldn't be here */
1725 :
1726 232 : foreach(lc, colnos)
1727 : {
1728 138 : AttrNumber parentattrno = lfirst_int(lc);
1729 :
1730 138 : if (parentattrno <= 0 ||
1731 138 : parentattrno > attrMap->maplen ||
1732 138 : attrMap->attnums[parentattrno - 1] == 0)
1733 0 : elog(ERROR, "unexpected attno %d in target column list",
1734 : parentattrno);
1735 138 : new_colnos = lappend_int(new_colnos,
1736 138 : attrMap->attnums[parentattrno - 1]);
1737 : }
1738 :
1739 94 : return new_colnos;
1740 : }
1741 :
1742 : /*-------------------------------------------------------------------------
1743 : * Run-Time Partition Pruning Support.
1744 : *
1745 : * The following series of functions exist to support the removal of unneeded
1746 : * subplans for queries against partitioned tables. The supporting functions
1747 : * here are designed to work with any plan type which supports an arbitrary
1748 : * number of subplans, e.g. Append, MergeAppend.
1749 : *
1750 : * When pruning involves comparison of a partition key to a constant, it's
1751 : * done by the planner. However, if we have a comparison to a non-constant
1752 : * but not volatile expression, that presents an opportunity for run-time
1753 : * pruning by the executor, allowing irrelevant partitions to be skipped
1754 : * dynamically.
1755 : *
1756 : * We must distinguish expressions containing PARAM_EXEC Params from
1757 : * expressions that don't contain those. Even though a PARAM_EXEC Param is
1758 : * considered to be a stable expression, it can change value from one plan
1759 : * node scan to the next during query execution. Stable comparison
1760 : * expressions that don't involve such Params allow partition pruning to be
1761 : * done once during executor startup. Expressions that do involve such Params
1762 : * require us to prune separately for each scan of the parent plan node.
1763 : *
1764 : * Note that pruning away unneeded subplans during executor startup has the
1765 : * added benefit of not having to initialize the unneeded subplans at all.
1766 : *
1767 : *
1768 : * Functions:
1769 : *
1770 : * ExecDoInitialPruning:
1771 : * Perform runtime "initial" pruning, if necessary, to determine the set
1772 : * of child subnodes that need to be initialized during ExecInitNode() for
1773 : * all plan nodes that contain a PartitionPruneInfo.
1774 : *
1775 : * ExecInitPartitionExecPruning:
1776 : * Updates the PartitionPruneState found at given part_prune_index in
1777 : * EState.es_part_prune_states for use during "exec" pruning if required.
1778 : * Also returns the set of subplans to initialize that would be stored at
1779 : * part_prune_index in EState.es_part_prune_results by
1780 : * ExecDoInitialPruning(). Maps in PartitionPruneState are updated to
1781 : * account for initial pruning possibly having eliminated some of the
1782 : * subplans.
1783 : *
1784 : * ExecFindMatchingSubPlans:
1785 : * Returns indexes of matching subplans after evaluating the expressions
1786 : * that are safe to evaluate at a given point. This function is first
1787 : * called during ExecDoInitialPruning() to find the initially matching
1788 : * subplans based on performing the initial pruning steps and then must be
1789 : * called again each time the value of a Param listed in
1790 : * PartitionPruneState's 'execparamids' changes.
1791 : *-------------------------------------------------------------------------
1792 : */
1793 :
1794 :
1795 : /*
1796 : * ExecDoInitialPruning
1797 : * Perform runtime "initial" pruning, if necessary, to determine the set
1798 : * of child subnodes that need to be initialized during ExecInitNode() for
1799 : * plan nodes that support partition pruning.
1800 : *
1801 : * This function iterates over each PartitionPruneInfo entry in
1802 : * estate->es_part_prune_infos. For each entry, it creates a PartitionPruneState
1803 : * and adds it to es_part_prune_states. ExecInitPartitionExecPruning() accesses
1804 : * these states through their corresponding indexes in es_part_prune_states and
1805 : * assign each state to the parent node's PlanState, from where it will be used
1806 : * for "exec" pruning.
1807 : *
1808 : * If initial pruning steps exist for a PartitionPruneInfo entry, this function
1809 : * executes those pruning steps and stores the result as a bitmapset of valid
1810 : * child subplans, identifying which subplans should be initialized for
1811 : * execution. The results are saved in estate->es_part_prune_results.
1812 : *
1813 : * If no initial pruning is performed for a given PartitionPruneInfo, a NULL
1814 : * entry is still added to es_part_prune_results to maintain alignment with
1815 : * es_part_prune_infos. This ensures that ExecInitPartitionExecPruning() can
1816 : * use the same index to retrieve the pruning results.
1817 : */
1818 : void
1819 582630 : ExecDoInitialPruning(EState *estate)
1820 : {
1821 : ListCell *lc;
1822 :
1823 583428 : foreach(lc, estate->es_part_prune_infos)
1824 : {
1825 798 : PartitionPruneInfo *pruneinfo = lfirst_node(PartitionPruneInfo, lc);
1826 : PartitionPruneState *prunestate;
1827 798 : Bitmapset *validsubplans = NULL;
1828 798 : Bitmapset *all_leafpart_rtis = NULL;
1829 798 : Bitmapset *validsubplan_rtis = NULL;
1830 :
1831 : /* Create and save the PartitionPruneState. */
1832 798 : prunestate = CreatePartitionPruneState(estate, pruneinfo,
1833 : &all_leafpart_rtis);
1834 798 : estate->es_part_prune_states = lappend(estate->es_part_prune_states,
1835 : prunestate);
1836 :
1837 : /*
1838 : * Perform initial pruning steps, if any, and save the result
1839 : * bitmapset or NULL as described in the header comment.
1840 : */
1841 798 : if (prunestate->do_initial_prune)
1842 446 : validsubplans = ExecFindMatchingSubPlans(prunestate, true,
1843 : &validsubplan_rtis);
1844 : else
1845 352 : validsubplan_rtis = all_leafpart_rtis;
1846 :
1847 798 : estate->es_unpruned_relids = bms_add_members(estate->es_unpruned_relids,
1848 : validsubplan_rtis);
1849 798 : estate->es_part_prune_results = lappend(estate->es_part_prune_results,
1850 : validsubplans);
1851 : }
1852 582630 : }
1853 :
1854 : /*
1855 : * ExecInitPartitionExecPruning
1856 : * Initialize the data structures needed for runtime "exec" partition
1857 : * pruning and return the result of initial pruning, if available.
1858 : *
1859 : * 'relids' identifies the relation to which both the parent plan and the
1860 : * PartitionPruneInfo given by 'part_prune_index' belong.
1861 : *
1862 : * On return, *initially_valid_subplans is assigned the set of indexes of
1863 : * child subplans that must be initialized along with the parent plan node.
1864 : * Initial pruning would have been performed by ExecDoInitialPruning(), if
1865 : * necessary, and the bitmapset of surviving subplans' indexes would have
1866 : * been stored as the part_prune_index'th element of
1867 : * EState.es_part_prune_results.
1868 : *
1869 : * If subplans were indeed pruned during initial pruning, the subplan_map
1870 : * arrays in the returned PartitionPruneState are re-sequenced to exclude those
1871 : * subplans, but only if the maps will be needed for subsequent execution
1872 : * pruning passes.
1873 : */
1874 : PartitionPruneState *
1875 798 : ExecInitPartitionExecPruning(PlanState *planstate,
1876 : int n_total_subplans,
1877 : int part_prune_index,
1878 : Bitmapset *relids,
1879 : Bitmapset **initially_valid_subplans)
1880 : {
1881 : PartitionPruneState *prunestate;
1882 798 : EState *estate = planstate->state;
1883 : PartitionPruneInfo *pruneinfo;
1884 :
1885 : /* Obtain the pruneinfo we need. */
1886 798 : pruneinfo = list_nth_node(PartitionPruneInfo, estate->es_part_prune_infos,
1887 : part_prune_index);
1888 :
1889 : /* Its relids better match the plan node's or the planner messed up. */
1890 798 : if (!bms_equal(relids, pruneinfo->relids))
1891 0 : elog(ERROR, "wrong pruneinfo with relids=%s found at part_prune_index=%d contained in plan node with relids=%s",
1892 : bmsToString(pruneinfo->relids), part_prune_index,
1893 : bmsToString(relids));
1894 :
1895 : /*
1896 : * The PartitionPruneState would have been created by
1897 : * ExecDoInitialPruning() and stored as the part_prune_index'th element of
1898 : * EState.es_part_prune_states.
1899 : */
1900 798 : prunestate = list_nth(estate->es_part_prune_states, part_prune_index);
1901 : Assert(prunestate != NULL);
1902 :
1903 : /* Use the result of initial pruning done by ExecDoInitialPruning(). */
1904 798 : if (prunestate->do_initial_prune)
1905 446 : *initially_valid_subplans = list_nth_node(Bitmapset,
1906 : estate->es_part_prune_results,
1907 : part_prune_index);
1908 : else
1909 : {
1910 : /* No pruning, so we'll need to initialize all subplans */
1911 : Assert(n_total_subplans > 0);
1912 352 : *initially_valid_subplans = bms_add_range(NULL, 0,
1913 : n_total_subplans - 1);
1914 : }
1915 :
1916 : /*
1917 : * The exec pruning state must also be initialized, if needed, before it
1918 : * can be used for pruning during execution.
1919 : *
1920 : * This also re-sequences subplan indexes contained in prunestate to
1921 : * account for any that were removed due to initial pruning; refer to the
1922 : * condition in InitExecPartitionPruneContexts() that is used to determine
1923 : * whether to do this. If no exec pruning needs to be done, we would thus
1924 : * leave the maps to be in an invalid state, but that's ok since that data
1925 : * won't be consulted again (cf initial Assert in
1926 : * ExecFindMatchingSubPlans).
1927 : */
1928 798 : if (prunestate->do_exec_prune)
1929 394 : InitExecPartitionPruneContexts(prunestate, planstate,
1930 : *initially_valid_subplans,
1931 : n_total_subplans);
1932 :
1933 798 : return prunestate;
1934 : }
1935 :
1936 : /*
1937 : * CreatePartitionPruneState
1938 : * Build the data structure required for calling ExecFindMatchingSubPlans
1939 : *
1940 : * This includes PartitionPruneContexts (stored in each
1941 : * PartitionedRelPruningData corresponding to a PartitionedRelPruneInfo),
1942 : * which hold the ExprStates needed to evaluate pruning expressions, and
1943 : * mapping arrays to convert partition indexes from the pruning logic
1944 : * into subplan indexes in the parent plan node's list of child subplans.
1945 : *
1946 : * 'pruneinfo' is a PartitionPruneInfo as generated by
1947 : * make_partition_pruneinfo. Here we build a PartitionPruneState containing a
1948 : * PartitionPruningData for each partitioning hierarchy (i.e., each sublist of
1949 : * pruneinfo->prune_infos), each of which contains a PartitionedRelPruningData
1950 : * for each PartitionedRelPruneInfo appearing in that sublist. This two-level
1951 : * system is needed to keep from confusing the different hierarchies when a
1952 : * UNION ALL contains multiple partitioned tables as children. The data
1953 : * stored in each PartitionedRelPruningData can be re-used each time we
1954 : * re-evaluate which partitions match the pruning steps provided in each
1955 : * PartitionedRelPruneInfo.
1956 : *
1957 : * Note that only the PartitionPruneContexts for initial pruning are
1958 : * initialized here. Those required for exec pruning are initialized later in
1959 : * ExecInitPartitionExecPruning(), as they depend on the availability of the
1960 : * parent plan node's PlanState.
1961 : *
1962 : * If initial pruning steps are to be skipped (e.g., during EXPLAIN
1963 : * (GENERIC_PLAN)), *all_leafpart_rtis will be populated with the RT indexes of
1964 : * all leaf partitions whose scanning subnode is included in the parent plan
1965 : * node's list of child plans. The caller must add these RT indexes to
1966 : * estate->es_unpruned_relids.
1967 : */
1968 : static PartitionPruneState *
1969 798 : CreatePartitionPruneState(EState *estate, PartitionPruneInfo *pruneinfo,
1970 : Bitmapset **all_leafpart_rtis)
1971 : {
1972 : PartitionPruneState *prunestate;
1973 : int n_part_hierarchies;
1974 : ListCell *lc;
1975 : int i;
1976 :
1977 : /*
1978 : * Expression context that will be used by partkey_datum_from_expr() to
1979 : * evaluate expressions for comparison against partition bounds.
1980 : */
1981 798 : ExprContext *econtext = CreateExprContext(estate);
1982 :
1983 : /* For data reading, executor always includes detached partitions */
1984 798 : if (estate->es_partition_directory == NULL)
1985 750 : estate->es_partition_directory =
1986 750 : CreatePartitionDirectory(estate->es_query_cxt, false);
1987 :
1988 798 : n_part_hierarchies = list_length(pruneinfo->prune_infos);
1989 : Assert(n_part_hierarchies > 0);
1990 :
1991 : /*
1992 : * Allocate the data structure
1993 : */
1994 : prunestate = (PartitionPruneState *)
1995 798 : palloc(offsetof(PartitionPruneState, partprunedata) +
1996 : sizeof(PartitionPruningData *) * n_part_hierarchies);
1997 :
1998 : /* Save ExprContext for use during InitExecPartitionPruneContexts(). */
1999 798 : prunestate->econtext = econtext;
2000 798 : prunestate->execparamids = NULL;
2001 : /* other_subplans can change at runtime, so we need our own copy */
2002 798 : prunestate->other_subplans = bms_copy(pruneinfo->other_subplans);
2003 798 : prunestate->do_initial_prune = false; /* may be set below */
2004 798 : prunestate->do_exec_prune = false; /* may be set below */
2005 798 : prunestate->num_partprunedata = n_part_hierarchies;
2006 :
2007 : /*
2008 : * Create a short-term memory context which we'll use when making calls to
2009 : * the partition pruning functions. This avoids possible memory leaks,
2010 : * since the pruning functions call comparison functions that aren't under
2011 : * our control.
2012 : */
2013 798 : prunestate->prune_context =
2014 798 : AllocSetContextCreate(CurrentMemoryContext,
2015 : "Partition Prune",
2016 : ALLOCSET_DEFAULT_SIZES);
2017 :
2018 798 : i = 0;
2019 1620 : foreach(lc, pruneinfo->prune_infos)
2020 : {
2021 822 : List *partrelpruneinfos = lfirst_node(List, lc);
2022 822 : int npartrelpruneinfos = list_length(partrelpruneinfos);
2023 : PartitionPruningData *prunedata;
2024 : ListCell *lc2;
2025 : int j;
2026 :
2027 : prunedata = (PartitionPruningData *)
2028 822 : palloc(offsetof(PartitionPruningData, partrelprunedata) +
2029 822 : npartrelpruneinfos * sizeof(PartitionedRelPruningData));
2030 822 : prunestate->partprunedata[i] = prunedata;
2031 822 : prunedata->num_partrelprunedata = npartrelpruneinfos;
2032 :
2033 822 : j = 0;
2034 2454 : foreach(lc2, partrelpruneinfos)
2035 : {
2036 1632 : PartitionedRelPruneInfo *pinfo = lfirst_node(PartitionedRelPruneInfo, lc2);
2037 1632 : PartitionedRelPruningData *pprune = &prunedata->partrelprunedata[j];
2038 : Relation partrel;
2039 : PartitionDesc partdesc;
2040 : PartitionKey partkey;
2041 :
2042 : /*
2043 : * We can rely on the copies of the partitioned table's partition
2044 : * key and partition descriptor appearing in its relcache entry,
2045 : * because that entry will be held open and locked for the
2046 : * duration of this executor run.
2047 : */
2048 1632 : partrel = ExecGetRangeTableRelation(estate, pinfo->rtindex, false);
2049 :
2050 : /* Remember for InitExecPartitionPruneContexts(). */
2051 1632 : pprune->partrel = partrel;
2052 :
2053 1632 : partkey = RelationGetPartitionKey(partrel);
2054 1632 : partdesc = PartitionDirectoryLookup(estate->es_partition_directory,
2055 : partrel);
2056 :
2057 : /*
2058 : * Initialize the subplan_map and subpart_map.
2059 : *
2060 : * The set of partitions that exist now might not be the same that
2061 : * existed when the plan was made. The normal case is that it is;
2062 : * optimize for that case with a quick comparison, and just copy
2063 : * the subplan_map and make subpart_map, leafpart_rti_map point to
2064 : * the ones in PruneInfo.
2065 : *
2066 : * For the case where they aren't identical, we could have more
2067 : * partitions on either side; or even exactly the same number of
2068 : * them on both but the set of OIDs doesn't match fully. Handle
2069 : * this by creating new subplan_map and subpart_map arrays that
2070 : * corresponds to the ones in the PruneInfo where the new
2071 : * partition descriptor's OIDs match. Any that don't match can be
2072 : * set to -1, as if they were pruned. By construction, both
2073 : * arrays are in partition bounds order.
2074 : */
2075 1632 : pprune->nparts = partdesc->nparts;
2076 1632 : pprune->subplan_map = palloc(sizeof(int) * partdesc->nparts);
2077 :
2078 1632 : if (partdesc->nparts == pinfo->nparts &&
2079 1630 : memcmp(partdesc->oids, pinfo->relid_map,
2080 1630 : sizeof(int) * partdesc->nparts) == 0)
2081 : {
2082 1508 : pprune->subpart_map = pinfo->subpart_map;
2083 1508 : pprune->leafpart_rti_map = pinfo->leafpart_rti_map;
2084 1508 : memcpy(pprune->subplan_map, pinfo->subplan_map,
2085 1508 : sizeof(int) * pinfo->nparts);
2086 : }
2087 : else
2088 : {
2089 124 : int pd_idx = 0;
2090 : int pp_idx;
2091 :
2092 : /*
2093 : * When the partition arrays are not identical, there could be
2094 : * some new ones but it's also possible that one was removed;
2095 : * we cope with both situations by walking the arrays and
2096 : * discarding those that don't match.
2097 : *
2098 : * If the number of partitions on both sides match, it's still
2099 : * possible that one partition has been detached and another
2100 : * attached. Cope with that by creating a map that skips any
2101 : * mismatches.
2102 : */
2103 124 : pprune->subpart_map = palloc(sizeof(int) * partdesc->nparts);
2104 124 : pprune->leafpart_rti_map = palloc(sizeof(int) * partdesc->nparts);
2105 :
2106 528 : for (pp_idx = 0; pp_idx < partdesc->nparts; pp_idx++)
2107 : {
2108 : /* Skip any InvalidOid relid_map entries */
2109 624 : while (pd_idx < pinfo->nparts &&
2110 504 : !OidIsValid(pinfo->relid_map[pd_idx]))
2111 220 : pd_idx++;
2112 :
2113 404 : recheck:
2114 404 : if (pd_idx < pinfo->nparts &&
2115 284 : pinfo->relid_map[pd_idx] == partdesc->oids[pp_idx])
2116 : {
2117 : /* match... */
2118 182 : pprune->subplan_map[pp_idx] =
2119 182 : pinfo->subplan_map[pd_idx];
2120 182 : pprune->subpart_map[pp_idx] =
2121 182 : pinfo->subpart_map[pd_idx];
2122 182 : pprune->leafpart_rti_map[pp_idx] =
2123 182 : pinfo->leafpart_rti_map[pd_idx];
2124 182 : pd_idx++;
2125 182 : continue;
2126 : }
2127 :
2128 : /*
2129 : * There isn't an exact match in the corresponding
2130 : * positions of both arrays. Peek ahead in
2131 : * pinfo->relid_map to see if we have a match for the
2132 : * current partition in partdesc. Normally if a match
2133 : * exists it's just one element ahead, and it means the
2134 : * planner saw one extra partition that we no longer see
2135 : * now (its concurrent detach finished just in between);
2136 : * so we skip that one by updating pd_idx to the new
2137 : * location and jumping above. We can then continue to
2138 : * match the rest of the elements after skipping the OID
2139 : * with no match; no future matches are tried for the
2140 : * element that was skipped, because we know the arrays to
2141 : * be in the same order.
2142 : *
2143 : * If we don't see a match anywhere in the rest of the
2144 : * pinfo->relid_map array, that means we see an element
2145 : * now that the planner didn't see, so mark that one as
2146 : * pruned and move on.
2147 : */
2148 288 : for (int pd_idx2 = pd_idx + 1; pd_idx2 < pinfo->nparts; pd_idx2++)
2149 : {
2150 66 : if (pd_idx2 >= pinfo->nparts)
2151 0 : break;
2152 66 : if (pinfo->relid_map[pd_idx2] == partdesc->oids[pp_idx])
2153 : {
2154 0 : pd_idx = pd_idx2;
2155 0 : goto recheck;
2156 : }
2157 : }
2158 :
2159 222 : pprune->subpart_map[pp_idx] = -1;
2160 222 : pprune->subplan_map[pp_idx] = -1;
2161 222 : pprune->leafpart_rti_map[pp_idx] = 0;
2162 : }
2163 : }
2164 :
2165 : /* present_parts is also subject to later modification */
2166 1632 : pprune->present_parts = bms_copy(pinfo->present_parts);
2167 :
2168 : /*
2169 : * Only initial_context is initialized here. exec_context is
2170 : * initialized during ExecInitPartitionExecPruning() when the
2171 : * parent plan's PlanState is available.
2172 : *
2173 : * Note that we must skip execution-time (both "init" and "exec")
2174 : * partition pruning in EXPLAIN (GENERIC_PLAN), since parameter
2175 : * values may be missing.
2176 : */
2177 1632 : pprune->initial_pruning_steps = pinfo->initial_pruning_steps;
2178 1632 : if (pinfo->initial_pruning_steps &&
2179 554 : !(econtext->ecxt_estate->es_top_eflags & EXEC_FLAG_EXPLAIN_GENERIC))
2180 : {
2181 548 : InitPartitionPruneContext(&pprune->initial_context,
2182 : pprune->initial_pruning_steps,
2183 : partdesc, partkey, NULL,
2184 : econtext);
2185 : /* Record whether initial pruning is needed at any level */
2186 548 : prunestate->do_initial_prune = true;
2187 : }
2188 1632 : pprune->exec_pruning_steps = pinfo->exec_pruning_steps;
2189 1632 : if (pinfo->exec_pruning_steps &&
2190 508 : !(econtext->ecxt_estate->es_top_eflags & EXEC_FLAG_EXPLAIN_GENERIC))
2191 : {
2192 : /* Record whether exec pruning is needed at any level */
2193 508 : prunestate->do_exec_prune = true;
2194 : }
2195 :
2196 : /*
2197 : * Accumulate the IDs of all PARAM_EXEC Params affecting the
2198 : * partitioning decisions at this plan node.
2199 : */
2200 3264 : prunestate->execparamids = bms_add_members(prunestate->execparamids,
2201 1632 : pinfo->execparamids);
2202 :
2203 : /*
2204 : * Return all leaf partition indexes if we're skipping pruning in
2205 : * the EXPLAIN (GENERIC_PLAN) case.
2206 : */
2207 1632 : if (pinfo->initial_pruning_steps && !prunestate->do_initial_prune)
2208 : {
2209 6 : int part_index = -1;
2210 :
2211 18 : while ((part_index = bms_next_member(pprune->present_parts,
2212 18 : part_index)) >= 0)
2213 : {
2214 12 : Index rtindex = pprune->leafpart_rti_map[part_index];
2215 :
2216 12 : if (rtindex)
2217 12 : *all_leafpart_rtis = bms_add_member(*all_leafpart_rtis,
2218 : rtindex);
2219 : }
2220 : }
2221 :
2222 1632 : j++;
2223 : }
2224 822 : i++;
2225 : }
2226 :
2227 798 : return prunestate;
2228 : }
2229 :
2230 : /*
2231 : * Initialize a PartitionPruneContext for the given list of pruning steps.
2232 : */
2233 : static void
2234 1056 : InitPartitionPruneContext(PartitionPruneContext *context,
2235 : List *pruning_steps,
2236 : PartitionDesc partdesc,
2237 : PartitionKey partkey,
2238 : PlanState *planstate,
2239 : ExprContext *econtext)
2240 : {
2241 : int n_steps;
2242 : int partnatts;
2243 : ListCell *lc;
2244 :
2245 1056 : n_steps = list_length(pruning_steps);
2246 :
2247 1056 : context->strategy = partkey->strategy;
2248 1056 : context->partnatts = partnatts = partkey->partnatts;
2249 1056 : context->nparts = partdesc->nparts;
2250 1056 : context->boundinfo = partdesc->boundinfo;
2251 1056 : context->partcollation = partkey->partcollation;
2252 1056 : context->partsupfunc = partkey->partsupfunc;
2253 :
2254 : /* We'll look up type-specific support functions as needed */
2255 1056 : context->stepcmpfuncs = (FmgrInfo *)
2256 1056 : palloc0(sizeof(FmgrInfo) * n_steps * partnatts);
2257 :
2258 1056 : context->ppccontext = CurrentMemoryContext;
2259 1056 : context->planstate = planstate;
2260 1056 : context->exprcontext = econtext;
2261 :
2262 : /* Initialize expression state for each expression we need */
2263 1056 : context->exprstates = (ExprState **)
2264 1056 : palloc0(sizeof(ExprState *) * n_steps * partnatts);
2265 2774 : foreach(lc, pruning_steps)
2266 : {
2267 1718 : PartitionPruneStepOp *step = (PartitionPruneStepOp *) lfirst(lc);
2268 1718 : ListCell *lc2 = list_head(step->exprs);
2269 : int keyno;
2270 :
2271 : /* not needed for other step kinds */
2272 1718 : if (!IsA(step, PartitionPruneStepOp))
2273 286 : continue;
2274 :
2275 : Assert(list_length(step->exprs) <= partnatts);
2276 :
2277 3014 : for (keyno = 0; keyno < partnatts; keyno++)
2278 : {
2279 1582 : if (bms_is_member(keyno, step->nullkeys))
2280 6 : continue;
2281 :
2282 1576 : if (lc2 != NULL)
2283 : {
2284 1480 : Expr *expr = lfirst(lc2);
2285 :
2286 : /* not needed for Consts */
2287 1480 : if (!IsA(expr, Const))
2288 : {
2289 1386 : int stateidx = PruneCxtStateIdx(partnatts,
2290 : step->step.step_id,
2291 : keyno);
2292 :
2293 : /*
2294 : * When planstate is NULL, pruning_steps is known not to
2295 : * contain any expressions that depend on the parent plan.
2296 : * Information of any available EXTERN parameters must be
2297 : * passed explicitly in that case, which the caller must
2298 : * have made available via econtext.
2299 : */
2300 1386 : if (planstate == NULL)
2301 812 : context->exprstates[stateidx] =
2302 812 : ExecInitExprWithParams(expr,
2303 : econtext->ecxt_param_list_info);
2304 : else
2305 574 : context->exprstates[stateidx] =
2306 574 : ExecInitExpr(expr, context->planstate);
2307 : }
2308 1480 : lc2 = lnext(step->exprs, lc2);
2309 : }
2310 : }
2311 : }
2312 1056 : }
2313 :
2314 : /*
2315 : * InitExecPartitionPruneContexts
2316 : * Initialize exec pruning contexts deferred by CreatePartitionPruneState()
2317 : *
2318 : * This function finalizes exec pruning setup for a PartitionPruneState by
2319 : * initializing contexts for pruning steps that require the parent plan's
2320 : * PlanState. It iterates over PartitionPruningData entries and sets up the
2321 : * necessary execution contexts for pruning during query execution.
2322 : *
2323 : * Also fix the mapping of partition indexes to subplan indexes contained in
2324 : * prunestate by considering the new list of subplans that survived initial
2325 : * pruning.
2326 : *
2327 : * Current values of the indexes present in PartitionPruneState count all the
2328 : * subplans that would be present before initial pruning was done. If initial
2329 : * pruning got rid of some of the subplans, any subsequent pruning passes will
2330 : * be looking at a different set of target subplans to choose from than those
2331 : * in the pre-initial-pruning set, so the maps in PartitionPruneState
2332 : * containing those indexes must be updated to reflect the new indexes of
2333 : * subplans in the post-initial-pruning set.
2334 : */
2335 : static void
2336 394 : InitExecPartitionPruneContexts(PartitionPruneState *prunestate,
2337 : PlanState *parent_plan,
2338 : Bitmapset *initially_valid_subplans,
2339 : int n_total_subplans)
2340 : {
2341 : EState *estate;
2342 394 : int *new_subplan_indexes = NULL;
2343 : Bitmapset *new_other_subplans;
2344 : int i;
2345 : int newidx;
2346 394 : bool fix_subplan_map = false;
2347 :
2348 : Assert(prunestate->do_exec_prune);
2349 : Assert(parent_plan != NULL);
2350 394 : estate = parent_plan->state;
2351 :
2352 : /*
2353 : * No need to fix subplans maps if initial pruning didn't eliminate any
2354 : * subplans.
2355 : */
2356 394 : if (bms_num_members(initially_valid_subplans) < n_total_subplans)
2357 : {
2358 48 : fix_subplan_map = true;
2359 :
2360 : /*
2361 : * First we must build a temporary array which maps old subplan
2362 : * indexes to new ones. For convenience of initialization, we use
2363 : * 1-based indexes in this array and leave pruned items as 0.
2364 : */
2365 48 : new_subplan_indexes = (int *) palloc0(sizeof(int) * n_total_subplans);
2366 48 : newidx = 1;
2367 48 : i = -1;
2368 186 : while ((i = bms_next_member(initially_valid_subplans, i)) >= 0)
2369 : {
2370 : Assert(i < n_total_subplans);
2371 138 : new_subplan_indexes[i] = newidx++;
2372 : }
2373 : }
2374 :
2375 : /*
2376 : * Now we can update each PartitionedRelPruneInfo's subplan_map with new
2377 : * subplan indexes. We must also recompute its present_parts bitmap.
2378 : */
2379 812 : for (i = 0; i < prunestate->num_partprunedata; i++)
2380 : {
2381 418 : PartitionPruningData *prunedata = prunestate->partprunedata[i];
2382 : int j;
2383 :
2384 : /*
2385 : * Within each hierarchy, we perform this loop in back-to-front order
2386 : * so that we determine present_parts for the lowest-level partitioned
2387 : * tables first. This way we can tell whether a sub-partitioned
2388 : * table's partitions were entirely pruned so we can exclude it from
2389 : * the current level's present_parts.
2390 : */
2391 1292 : for (j = prunedata->num_partrelprunedata - 1; j >= 0; j--)
2392 : {
2393 874 : PartitionedRelPruningData *pprune = &prunedata->partrelprunedata[j];
2394 874 : int nparts = pprune->nparts;
2395 : int k;
2396 :
2397 : /* Initialize PartitionPruneContext for exec pruning, if needed. */
2398 874 : if (pprune->exec_pruning_steps != NIL)
2399 : {
2400 : PartitionKey partkey;
2401 : PartitionDesc partdesc;
2402 :
2403 : /*
2404 : * See the comment in CreatePartitionPruneState() regarding
2405 : * the usage of partdesc and partkey.
2406 : */
2407 508 : partkey = RelationGetPartitionKey(pprune->partrel);
2408 508 : partdesc = PartitionDirectoryLookup(estate->es_partition_directory,
2409 : pprune->partrel);
2410 :
2411 508 : InitPartitionPruneContext(&pprune->exec_context,
2412 : pprune->exec_pruning_steps,
2413 : partdesc, partkey, parent_plan,
2414 : prunestate->econtext);
2415 : }
2416 :
2417 874 : if (!fix_subplan_map)
2418 682 : continue;
2419 :
2420 : /* We just rebuild present_parts from scratch */
2421 192 : bms_free(pprune->present_parts);
2422 192 : pprune->present_parts = NULL;
2423 :
2424 708 : for (k = 0; k < nparts; k++)
2425 : {
2426 516 : int oldidx = pprune->subplan_map[k];
2427 : int subidx;
2428 :
2429 : /*
2430 : * If this partition existed as a subplan then change the old
2431 : * subplan index to the new subplan index. The new index may
2432 : * become -1 if the partition was pruned above, or it may just
2433 : * come earlier in the subplan list due to some subplans being
2434 : * removed earlier in the list. If it's a subpartition, add
2435 : * it to present_parts unless it's entirely pruned.
2436 : */
2437 516 : if (oldidx >= 0)
2438 : {
2439 : Assert(oldidx < n_total_subplans);
2440 396 : pprune->subplan_map[k] = new_subplan_indexes[oldidx] - 1;
2441 :
2442 396 : if (new_subplan_indexes[oldidx] > 0)
2443 114 : pprune->present_parts =
2444 114 : bms_add_member(pprune->present_parts, k);
2445 : }
2446 120 : else if ((subidx = pprune->subpart_map[k]) >= 0)
2447 : {
2448 : PartitionedRelPruningData *subprune;
2449 :
2450 120 : subprune = &prunedata->partrelprunedata[subidx];
2451 :
2452 120 : if (!bms_is_empty(subprune->present_parts))
2453 48 : pprune->present_parts =
2454 48 : bms_add_member(pprune->present_parts, k);
2455 : }
2456 : }
2457 : }
2458 : }
2459 :
2460 : /*
2461 : * If we fixed subplan maps, we must also recompute the other_subplans
2462 : * set, since indexes in it may change.
2463 : */
2464 394 : if (fix_subplan_map)
2465 : {
2466 48 : new_other_subplans = NULL;
2467 48 : i = -1;
2468 72 : while ((i = bms_next_member(prunestate->other_subplans, i)) >= 0)
2469 24 : new_other_subplans = bms_add_member(new_other_subplans,
2470 24 : new_subplan_indexes[i] - 1);
2471 :
2472 48 : bms_free(prunestate->other_subplans);
2473 48 : prunestate->other_subplans = new_other_subplans;
2474 :
2475 48 : pfree(new_subplan_indexes);
2476 : }
2477 394 : }
2478 :
2479 : /*
2480 : * ExecFindMatchingSubPlans
2481 : * Determine which subplans match the pruning steps detailed in
2482 : * 'prunestate' for the current comparison expression values.
2483 : *
2484 : * Pass initial_prune if PARAM_EXEC Params cannot yet be evaluated. This
2485 : * differentiates the initial executor-time pruning step from later
2486 : * runtime pruning.
2487 : *
2488 : * The caller must pass a non-NULL validsubplan_rtis during initial pruning
2489 : * to collect the RT indexes of leaf partitions whose subnodes will be
2490 : * executed. These RT indexes are later added to EState.es_unpruned_relids.
2491 : */
2492 : Bitmapset *
2493 3892 : ExecFindMatchingSubPlans(PartitionPruneState *prunestate,
2494 : bool initial_prune,
2495 : Bitmapset **validsubplan_rtis)
2496 : {
2497 3892 : Bitmapset *result = NULL;
2498 : MemoryContext oldcontext;
2499 : int i;
2500 :
2501 : /*
2502 : * Either we're here on the initial prune done during pruning
2503 : * initialization, or we're at a point where PARAM_EXEC Params can be
2504 : * evaluated *and* there are steps in which to do so.
2505 : */
2506 : Assert(initial_prune || prunestate->do_exec_prune);
2507 : Assert(validsubplan_rtis != NULL || !initial_prune);
2508 :
2509 : /*
2510 : * Switch to a temp context to avoid leaking memory in the executor's
2511 : * query-lifespan memory context.
2512 : */
2513 3892 : oldcontext = MemoryContextSwitchTo(prunestate->prune_context);
2514 :
2515 : /*
2516 : * For each hierarchy, do the pruning tests, and add nondeletable
2517 : * subplans' indexes to "result".
2518 : */
2519 7826 : for (i = 0; i < prunestate->num_partprunedata; i++)
2520 : {
2521 3934 : PartitionPruningData *prunedata = prunestate->partprunedata[i];
2522 : PartitionedRelPruningData *pprune;
2523 :
2524 : /*
2525 : * We pass the zeroth item, belonging to the root table of the
2526 : * hierarchy, and find_matching_subplans_recurse() takes care of
2527 : * recursing to other (lower-level) parents as needed.
2528 : */
2529 3934 : pprune = &prunedata->partrelprunedata[0];
2530 3934 : find_matching_subplans_recurse(prunedata, pprune, initial_prune,
2531 : &result, validsubplan_rtis);
2532 :
2533 : /*
2534 : * Expression eval may have used space in ExprContext too. Avoid
2535 : * accessing exec_context during initial pruning, as it is not valid
2536 : * at that stage.
2537 : */
2538 3934 : if (!initial_prune && pprune->exec_pruning_steps)
2539 3392 : ResetExprContext(pprune->exec_context.exprcontext);
2540 : }
2541 :
2542 : /* Add in any subplans that partition pruning didn't account for */
2543 3892 : result = bms_add_members(result, prunestate->other_subplans);
2544 :
2545 3892 : MemoryContextSwitchTo(oldcontext);
2546 :
2547 : /* Copy result out of the temp context before we reset it */
2548 3892 : result = bms_copy(result);
2549 3892 : if (validsubplan_rtis)
2550 446 : *validsubplan_rtis = bms_copy(*validsubplan_rtis);
2551 :
2552 3892 : MemoryContextReset(prunestate->prune_context);
2553 :
2554 3892 : return result;
2555 : }
2556 :
2557 : /*
2558 : * find_matching_subplans_recurse
2559 : * Recursive worker function for ExecFindMatchingSubPlans
2560 : *
2561 : * Adds valid (non-prunable) subplan IDs to *validsubplans. If
2562 : * *validsubplan_rtis is non-NULL, it also adds the RT indexes of their
2563 : * corresponding partitions, but only if they are leaf partitions.
2564 : */
2565 : static void
2566 4348 : find_matching_subplans_recurse(PartitionPruningData *prunedata,
2567 : PartitionedRelPruningData *pprune,
2568 : bool initial_prune,
2569 : Bitmapset **validsubplans,
2570 : Bitmapset **validsubplan_rtis)
2571 : {
2572 : Bitmapset *partset;
2573 : int i;
2574 :
2575 : /* Guard against stack overflow due to overly deep partition hierarchy. */
2576 4348 : check_stack_depth();
2577 :
2578 : /*
2579 : * Prune as appropriate, if we have pruning steps matching the current
2580 : * execution context. Otherwise just include all partitions at this
2581 : * level.
2582 : */
2583 4348 : if (initial_prune && pprune->initial_pruning_steps)
2584 530 : partset = get_matching_partitions(&pprune->initial_context,
2585 : pprune->initial_pruning_steps);
2586 3818 : else if (!initial_prune && pprune->exec_pruning_steps)
2587 3476 : partset = get_matching_partitions(&pprune->exec_context,
2588 : pprune->exec_pruning_steps);
2589 : else
2590 342 : partset = pprune->present_parts;
2591 :
2592 : /* Translate partset into subplan indexes */
2593 4348 : i = -1;
2594 6152 : while ((i = bms_next_member(partset, i)) >= 0)
2595 : {
2596 1804 : if (pprune->subplan_map[i] >= 0)
2597 : {
2598 2776 : *validsubplans = bms_add_member(*validsubplans,
2599 1388 : pprune->subplan_map[i]);
2600 :
2601 : /*
2602 : * Only report leaf partitions. Non-leaf partitions may appear
2603 : * here when they use an unflattened Append or MergeAppend.
2604 : */
2605 1388 : if (validsubplan_rtis && pprune->leafpart_rti_map[i])
2606 672 : *validsubplan_rtis = bms_add_member(*validsubplan_rtis,
2607 672 : pprune->leafpart_rti_map[i]);
2608 : }
2609 : else
2610 : {
2611 416 : int partidx = pprune->subpart_map[i];
2612 :
2613 416 : if (partidx >= 0)
2614 414 : find_matching_subplans_recurse(prunedata,
2615 : &prunedata->partrelprunedata[partidx],
2616 : initial_prune, validsubplans,
2617 : validsubplan_rtis);
2618 : else
2619 : {
2620 : /*
2621 : * We get here if the planner already pruned all the sub-
2622 : * partitions for this partition. Silently ignore this
2623 : * partition in this case. The end result is the same: we
2624 : * would have pruned all partitions just the same, but we
2625 : * don't have any pruning steps to execute to verify this.
2626 : */
2627 : }
2628 : }
2629 : }
2630 4348 : }
|