Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * nbtutils.c
4 : * Utility code for Postgres btree implementation.
5 : *
6 : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/access/nbtree/nbtutils.c
12 : *
13 : *-------------------------------------------------------------------------
14 : */
15 :
16 : #include "postgres.h"
17 :
18 : #include <time.h>
19 :
20 : #include "access/nbtree.h"
21 : #include "access/reloptions.h"
22 : #include "commands/progress.h"
23 : #include "miscadmin.h"
24 : #include "utils/datum.h"
25 : #include "utils/lsyscache.h"
26 :
27 : #define LOOK_AHEAD_REQUIRED_RECHECKS 3
28 : #define LOOK_AHEAD_DEFAULT_DISTANCE 5
29 :
30 : static inline int32 _bt_compare_array_skey(FmgrInfo *orderproc,
31 : Datum tupdatum, bool tupnull,
32 : Datum arrdatum, ScanKey cur);
33 : static bool _bt_advance_array_keys_increment(IndexScanDesc scan, ScanDirection dir);
34 : static void _bt_rewind_nonrequired_arrays(IndexScanDesc scan, ScanDirection dir);
35 : static bool _bt_tuple_before_array_skeys(IndexScanDesc scan, ScanDirection dir,
36 : IndexTuple tuple, TupleDesc tupdesc, int tupnatts,
37 : bool readpagetup, int sktrig, bool *scanBehind);
38 : static bool _bt_advance_array_keys(IndexScanDesc scan, BTReadPageState *pstate,
39 : IndexTuple tuple, int tupnatts, TupleDesc tupdesc,
40 : int sktrig, bool sktrig_required);
41 : #ifdef USE_ASSERT_CHECKING
42 : static bool _bt_verify_arrays_bt_first(IndexScanDesc scan, ScanDirection dir);
43 : static bool _bt_verify_keys_with_arraykeys(IndexScanDesc scan);
44 : #endif
45 : static bool _bt_oppodir_checkkeys(IndexScanDesc scan, ScanDirection dir,
46 : IndexTuple finaltup);
47 : static bool _bt_check_compare(IndexScanDesc scan, ScanDirection dir,
48 : IndexTuple tuple, int tupnatts, TupleDesc tupdesc,
49 : bool advancenonrequired, bool prechecked, bool firstmatch,
50 : bool *continuescan, int *ikey);
51 : static bool _bt_check_rowcompare(ScanKey skey,
52 : IndexTuple tuple, int tupnatts, TupleDesc tupdesc,
53 : ScanDirection dir, bool *continuescan);
54 : static void _bt_checkkeys_look_ahead(IndexScanDesc scan, BTReadPageState *pstate,
55 : int tupnatts, TupleDesc tupdesc);
56 : static int _bt_keep_natts(Relation rel, IndexTuple lastleft,
57 : IndexTuple firstright, BTScanInsert itup_key);
58 :
59 :
60 : /*
61 : * _bt_mkscankey
62 : * Build an insertion scan key that contains comparison data from itup
63 : * as well as comparator routines appropriate to the key datatypes.
64 : *
65 : * The result is intended for use with _bt_compare() and _bt_truncate().
66 : * Callers that don't need to fill out the insertion scankey arguments
67 : * (e.g. they use an ad-hoc comparison routine, or only need a scankey
68 : * for _bt_truncate()) can pass a NULL index tuple. The scankey will
69 : * be initialized as if an "all truncated" pivot tuple was passed
70 : * instead.
71 : *
72 : * Note that we may occasionally have to share lock the metapage to
73 : * determine whether or not the keys in the index are expected to be
74 : * unique (i.e. if this is a "heapkeyspace" index). We assume a
75 : * heapkeyspace index when caller passes a NULL tuple, allowing index
76 : * build callers to avoid accessing the non-existent metapage. We
77 : * also assume that the index is _not_ allequalimage when a NULL tuple
78 : * is passed; CREATE INDEX callers call _bt_allequalimage() to set the
79 : * field themselves.
80 : */
81 : BTScanInsert
82 11542206 : _bt_mkscankey(Relation rel, IndexTuple itup)
83 : {
84 : BTScanInsert key;
85 : ScanKey skey;
86 : TupleDesc itupdesc;
87 : int indnkeyatts;
88 : int16 *indoption;
89 : int tupnatts;
90 : int i;
91 :
92 11542206 : itupdesc = RelationGetDescr(rel);
93 11542206 : indnkeyatts = IndexRelationGetNumberOfKeyAttributes(rel);
94 11542206 : indoption = rel->rd_indoption;
95 11542206 : tupnatts = itup ? BTreeTupleGetNAtts(itup, rel) : 0;
96 :
97 : Assert(tupnatts <= IndexRelationGetNumberOfAttributes(rel));
98 :
99 : /*
100 : * We'll execute search using scan key constructed on key columns.
101 : * Truncated attributes and non-key attributes are omitted from the final
102 : * scan key.
103 : */
104 11542206 : key = palloc(offsetof(BTScanInsertData, scankeys) +
105 11542206 : sizeof(ScanKeyData) * indnkeyatts);
106 11542206 : if (itup)
107 11405298 : _bt_metaversion(rel, &key->heapkeyspace, &key->allequalimage);
108 : else
109 : {
110 : /* Utility statement callers can set these fields themselves */
111 136908 : key->heapkeyspace = true;
112 136908 : key->allequalimage = false;
113 : }
114 11542206 : key->anynullkeys = false; /* initial assumption */
115 11542206 : key->nextkey = false; /* usual case, required by btinsert */
116 11542206 : key->backward = false; /* usual case, required by btinsert */
117 11542206 : key->keysz = Min(indnkeyatts, tupnatts);
118 11542206 : key->scantid = key->heapkeyspace && itup ?
119 23084412 : BTreeTupleGetHeapTID(itup) : NULL;
120 11542206 : skey = key->scankeys;
121 31066774 : for (i = 0; i < indnkeyatts; i++)
122 : {
123 : FmgrInfo *procinfo;
124 : Datum arg;
125 : bool null;
126 : int flags;
127 :
128 : /*
129 : * We can use the cached (default) support procs since no cross-type
130 : * comparison can be needed.
131 : */
132 19524568 : procinfo = index_getprocinfo(rel, i + 1, BTORDER_PROC);
133 :
134 : /*
135 : * Key arguments built from truncated attributes (or when caller
136 : * provides no tuple) are defensively represented as NULL values. They
137 : * should never be used.
138 : */
139 19524568 : if (i < tupnatts)
140 19279004 : arg = index_getattr(itup, i + 1, itupdesc, &null);
141 : else
142 : {
143 245564 : arg = (Datum) 0;
144 245564 : null = true;
145 : }
146 19524568 : flags = (null ? SK_ISNULL : 0) | (indoption[i] << SK_BT_INDOPTION_SHIFT);
147 19524568 : ScanKeyEntryInitializeWithInfo(&skey[i],
148 : flags,
149 19524568 : (AttrNumber) (i + 1),
150 : InvalidStrategy,
151 : InvalidOid,
152 19524568 : rel->rd_indcollation[i],
153 : procinfo,
154 : arg);
155 : /* Record if any key attribute is NULL (or truncated) */
156 19524568 : if (null)
157 266146 : key->anynullkeys = true;
158 : }
159 :
160 : /*
161 : * In NULLS NOT DISTINCT mode, we pretend that there are no null keys, so
162 : * that full uniqueness check is done.
163 : */
164 11542206 : if (rel->rd_index->indnullsnotdistinct)
165 186 : key->anynullkeys = false;
166 :
167 11542206 : return key;
168 : }
169 :
170 : /*
171 : * free a retracement stack made by _bt_search.
172 : */
173 : void
174 20815602 : _bt_freestack(BTStack stack)
175 : {
176 : BTStack ostack;
177 :
178 38249504 : while (stack != NULL)
179 : {
180 17433902 : ostack = stack;
181 17433902 : stack = stack->bts_parent;
182 17433902 : pfree(ostack);
183 : }
184 20815602 : }
185 :
186 : /*
187 : * _bt_compare_array_skey() -- apply array comparison function
188 : *
189 : * Compares caller's tuple attribute value to a scan key/array element.
190 : * Helper function used during binary searches of SK_SEARCHARRAY arrays.
191 : *
192 : * This routine returns:
193 : * <0 if tupdatum < arrdatum;
194 : * 0 if tupdatum == arrdatum;
195 : * >0 if tupdatum > arrdatum.
196 : *
197 : * This is essentially the same interface as _bt_compare: both functions
198 : * compare the value that they're searching for to a binary search pivot.
199 : * However, unlike _bt_compare, this function's "tuple argument" comes first,
200 : * while its "array/scankey argument" comes second.
201 : */
202 : static inline int32
203 75590 : _bt_compare_array_skey(FmgrInfo *orderproc,
204 : Datum tupdatum, bool tupnull,
205 : Datum arrdatum, ScanKey cur)
206 : {
207 75590 : int32 result = 0;
208 :
209 : Assert(cur->sk_strategy == BTEqualStrategyNumber);
210 :
211 75590 : if (tupnull) /* NULL tupdatum */
212 : {
213 6 : if (cur->sk_flags & SK_ISNULL)
214 6 : result = 0; /* NULL "=" NULL */
215 0 : else if (cur->sk_flags & SK_BT_NULLS_FIRST)
216 0 : result = -1; /* NULL "<" NOT_NULL */
217 : else
218 0 : result = 1; /* NULL ">" NOT_NULL */
219 : }
220 75584 : else if (cur->sk_flags & SK_ISNULL) /* NOT_NULL tupdatum, NULL arrdatum */
221 : {
222 6 : if (cur->sk_flags & SK_BT_NULLS_FIRST)
223 0 : result = 1; /* NOT_NULL ">" NULL */
224 : else
225 6 : result = -1; /* NOT_NULL "<" NULL */
226 : }
227 : else
228 : {
229 : /*
230 : * Like _bt_compare, we need to be careful of cross-type comparisons,
231 : * so the left value has to be the value that came from an index tuple
232 : */
233 75578 : result = DatumGetInt32(FunctionCall2Coll(orderproc, cur->sk_collation,
234 : tupdatum, arrdatum));
235 :
236 : /*
237 : * We flip the sign by following the obvious rule: flip whenever the
238 : * column is a DESC column.
239 : *
240 : * _bt_compare does it the wrong way around (flip when *ASC*) in order
241 : * to compensate for passing its orderproc arguments backwards. We
242 : * don't need to play these games because we find it natural to pass
243 : * tupdatum as the left value (and arrdatum as the right value).
244 : */
245 75578 : if (cur->sk_flags & SK_BT_DESC)
246 24 : INVERT_COMPARE_RESULT(result);
247 : }
248 :
249 75590 : return result;
250 : }
251 :
252 : /*
253 : * _bt_binsrch_array_skey() -- Binary search for next matching array key
254 : *
255 : * Returns an index to the first array element >= caller's tupdatum argument.
256 : * This convention is more natural for forwards scan callers, but that can't
257 : * really matter to backwards scan callers. Both callers require handling for
258 : * the case where the match we return is < tupdatum, and symmetric handling
259 : * for the case where our best match is > tupdatum.
260 : *
261 : * Also sets *set_elem_result to the result _bt_compare_array_skey returned
262 : * when we used it to compare the matching array element to tupdatum/tupnull.
263 : *
264 : * cur_elem_trig indicates if array advancement was triggered by this array's
265 : * scan key, and that the array is for a required scan key. We can apply this
266 : * information to find the next matching array element in the current scan
267 : * direction using far fewer comparisons (fewer on average, compared to naive
268 : * binary search). This scheme takes advantage of an important property of
269 : * required arrays: required arrays always advance in lockstep with the index
270 : * scan's progress through the index's key space.
271 : */
272 : int
273 28306 : _bt_binsrch_array_skey(FmgrInfo *orderproc,
274 : bool cur_elem_trig, ScanDirection dir,
275 : Datum tupdatum, bool tupnull,
276 : BTArrayKeyInfo *array, ScanKey cur,
277 : int32 *set_elem_result)
278 : {
279 28306 : int low_elem = 0,
280 28306 : mid_elem = -1,
281 28306 : high_elem = array->num_elems - 1,
282 28306 : result = 0;
283 : Datum arrdatum;
284 :
285 : Assert(cur->sk_flags & SK_SEARCHARRAY);
286 : Assert(cur->sk_strategy == BTEqualStrategyNumber);
287 :
288 28306 : if (cur_elem_trig)
289 : {
290 : Assert(!ScanDirectionIsNoMovement(dir));
291 : Assert(cur->sk_flags & SK_BT_REQFWD);
292 :
293 : /*
294 : * When the scan key that triggered array advancement is a required
295 : * array scan key, it is now certain that the current array element
296 : * (plus all prior elements relative to the current scan direction)
297 : * cannot possibly be at or ahead of the corresponding tuple value.
298 : * (_bt_checkkeys must have called _bt_tuple_before_array_skeys, which
299 : * makes sure this is true as a condition of advancing the arrays.)
300 : *
301 : * This makes it safe to exclude array elements up to and including
302 : * the former-current array element from our search.
303 : *
304 : * Separately, when array advancement was triggered by a required scan
305 : * key, the array element immediately after the former-current element
306 : * is often either an exact tupdatum match, or a "close by" near-match
307 : * (a near-match tupdatum is one whose key space falls _between_ the
308 : * former-current and new-current array elements). We'll detect both
309 : * cases via an optimistic comparison of the new search lower bound
310 : * (or new search upper bound in the case of backwards scans).
311 : */
312 27772 : if (ScanDirectionIsForward(dir))
313 : {
314 27748 : low_elem = array->cur_elem + 1; /* old cur_elem exhausted */
315 :
316 : /* Compare prospective new cur_elem (also the new lower bound) */
317 27748 : if (high_elem >= low_elem)
318 : {
319 20366 : arrdatum = array->elem_values[low_elem];
320 20366 : result = _bt_compare_array_skey(orderproc, tupdatum, tupnull,
321 : arrdatum, cur);
322 :
323 20366 : if (result <= 0)
324 : {
325 : /* Optimistic comparison optimization worked out */
326 20280 : *set_elem_result = result;
327 20280 : return low_elem;
328 : }
329 86 : mid_elem = low_elem;
330 86 : low_elem++; /* this cur_elem exhausted, too */
331 : }
332 :
333 7468 : if (high_elem < low_elem)
334 : {
335 : /* Caller needs to perform "beyond end" array advancement */
336 7388 : *set_elem_result = 1;
337 7388 : return high_elem;
338 : }
339 : }
340 : else
341 : {
342 24 : high_elem = array->cur_elem - 1; /* old cur_elem exhausted */
343 :
344 : /* Compare prospective new cur_elem (also the new upper bound) */
345 24 : if (high_elem >= low_elem)
346 : {
347 18 : arrdatum = array->elem_values[high_elem];
348 18 : result = _bt_compare_array_skey(orderproc, tupdatum, tupnull,
349 : arrdatum, cur);
350 :
351 18 : if (result >= 0)
352 : {
353 : /* Optimistic comparison optimization worked out */
354 18 : *set_elem_result = result;
355 18 : return high_elem;
356 : }
357 0 : mid_elem = high_elem;
358 0 : high_elem--; /* this cur_elem exhausted, too */
359 : }
360 :
361 6 : if (high_elem < low_elem)
362 : {
363 : /* Caller needs to perform "beyond end" array advancement */
364 6 : *set_elem_result = -1;
365 6 : return low_elem;
366 : }
367 : }
368 : }
369 :
370 1124 : while (high_elem > low_elem)
371 : {
372 654 : mid_elem = low_elem + ((high_elem - low_elem) / 2);
373 654 : arrdatum = array->elem_values[mid_elem];
374 :
375 654 : result = _bt_compare_array_skey(orderproc, tupdatum, tupnull,
376 : arrdatum, cur);
377 :
378 654 : if (result == 0)
379 : {
380 : /*
381 : * It's safe to quit as soon as we see an equal array element.
382 : * This often saves an extra comparison or two...
383 : */
384 144 : low_elem = mid_elem;
385 144 : break;
386 : }
387 :
388 510 : if (result > 0)
389 450 : low_elem = mid_elem + 1;
390 : else
391 60 : high_elem = mid_elem;
392 : }
393 :
394 : /*
395 : * ...but our caller also cares about how its searched-for tuple datum
396 : * compares to the low_elem datum. Must always set *set_elem_result with
397 : * the result of that comparison specifically.
398 : */
399 614 : if (low_elem != mid_elem)
400 422 : result = _bt_compare_array_skey(orderproc, tupdatum, tupnull,
401 422 : array->elem_values[low_elem], cur);
402 :
403 614 : *set_elem_result = result;
404 :
405 614 : return low_elem;
406 : }
407 :
408 : /*
409 : * _bt_start_array_keys() -- Initialize array keys at start of a scan
410 : *
411 : * Set up the cur_elem counters and fill in the first sk_argument value for
412 : * each array scankey.
413 : */
414 : void
415 74572 : _bt_start_array_keys(IndexScanDesc scan, ScanDirection dir)
416 : {
417 74572 : BTScanOpaque so = (BTScanOpaque) scan->opaque;
418 : int i;
419 :
420 : Assert(so->numArrayKeys);
421 : Assert(so->qual_ok);
422 :
423 149456 : for (i = 0; i < so->numArrayKeys; i++)
424 : {
425 74884 : BTArrayKeyInfo *curArrayKey = &so->arrayKeys[i];
426 74884 : ScanKey skey = &so->keyData[curArrayKey->scan_key];
427 :
428 : Assert(curArrayKey->num_elems > 0);
429 : Assert(skey->sk_flags & SK_SEARCHARRAY);
430 :
431 74884 : if (ScanDirectionIsBackward(dir))
432 7630 : curArrayKey->cur_elem = curArrayKey->num_elems - 1;
433 : else
434 67254 : curArrayKey->cur_elem = 0;
435 74884 : skey->sk_argument = curArrayKey->elem_values[curArrayKey->cur_elem];
436 : }
437 74572 : so->scanBehind = so->oppositeDirCheck = false; /* reset */
438 74572 : }
439 :
440 : /*
441 : * _bt_advance_array_keys_increment() -- Advance to next set of array elements
442 : *
443 : * Advances the array keys by a single increment in the current scan
444 : * direction. When there are multiple array keys this can roll over from the
445 : * lowest order array to higher order arrays.
446 : *
447 : * Returns true if there is another set of values to consider, false if not.
448 : * On true result, the scankeys are initialized with the next set of values.
449 : * On false result, the scankeys stay the same, and the array keys are not
450 : * advanced (every array remains at its final element for scan direction).
451 : */
452 : static bool
453 7506 : _bt_advance_array_keys_increment(IndexScanDesc scan, ScanDirection dir)
454 : {
455 7506 : BTScanOpaque so = (BTScanOpaque) scan->opaque;
456 :
457 : /*
458 : * We must advance the last array key most quickly, since it will
459 : * correspond to the lowest-order index column among the available
460 : * qualifications
461 : */
462 15130 : for (int i = so->numArrayKeys - 1; i >= 0; i--)
463 : {
464 7662 : BTArrayKeyInfo *curArrayKey = &so->arrayKeys[i];
465 7662 : ScanKey skey = &so->keyData[curArrayKey->scan_key];
466 7662 : int cur_elem = curArrayKey->cur_elem;
467 7662 : int num_elems = curArrayKey->num_elems;
468 7662 : bool rolled = false;
469 :
470 7662 : if (ScanDirectionIsForward(dir) && ++cur_elem >= num_elems)
471 : {
472 7618 : cur_elem = 0;
473 7618 : rolled = true;
474 : }
475 44 : else if (ScanDirectionIsBackward(dir) && --cur_elem < 0)
476 : {
477 6 : cur_elem = num_elems - 1;
478 6 : rolled = true;
479 : }
480 :
481 7662 : curArrayKey->cur_elem = cur_elem;
482 7662 : skey->sk_argument = curArrayKey->elem_values[cur_elem];
483 7662 : if (!rolled)
484 38 : return true;
485 :
486 : /* Need to advance next array key, if any */
487 : }
488 :
489 : /*
490 : * The array keys are now exhausted.
491 : *
492 : * Restore the array keys to the state they were in immediately before we
493 : * were called. This ensures that the arrays only ever ratchet in the
494 : * current scan direction.
495 : *
496 : * Without this, scans could overlook matching tuples when the scan
497 : * direction gets reversed just before btgettuple runs out of items to
498 : * return, but just after _bt_readpage prepares all the items from the
499 : * scan's final page in so->currPos. When we're on the final page it is
500 : * typical for so->currPos to get invalidated once btgettuple finally
501 : * returns false, which'll effectively invalidate the scan's array keys.
502 : * That hasn't happened yet, though -- and in general it may never happen.
503 : */
504 7468 : _bt_start_array_keys(scan, -dir);
505 :
506 7468 : return false;
507 : }
508 :
509 : /*
510 : * _bt_rewind_nonrequired_arrays() -- Rewind non-required arrays
511 : *
512 : * Called when _bt_advance_array_keys decides to start a new primitive index
513 : * scan on the basis of the current scan position being before the position
514 : * that _bt_first is capable of repositioning the scan to by applying an
515 : * inequality operator required in the opposite-to-scan direction only.
516 : *
517 : * Although equality strategy scan keys (for both arrays and non-arrays alike)
518 : * are either marked required in both directions or in neither direction,
519 : * there is a sense in which non-required arrays behave like required arrays.
520 : * With a qual such as "WHERE a IN (100, 200) AND b >= 3 AND c IN (5, 6, 7)",
521 : * the scan key on "c" is non-required, but nevertheless enables positioning
522 : * the scan at the first tuple >= "(100, 3, 5)" on the leaf level during the
523 : * first descent of the tree by _bt_first. Later on, there could also be a
524 : * second descent, that places the scan right before tuples >= "(200, 3, 5)".
525 : * _bt_first must never be allowed to build an insertion scan key whose "c"
526 : * entry is set to a value other than 5, the "c" array's first element/value.
527 : * (Actually, it's the first in the current scan direction. This example uses
528 : * a forward scan.)
529 : *
530 : * Calling here resets the array scan key elements for the scan's non-required
531 : * arrays. This is strictly necessary for correctness in a subset of cases
532 : * involving "required in opposite direction"-triggered primitive index scans.
533 : * Not all callers are at risk of _bt_first using a non-required array like
534 : * this, but advancement always resets the arrays when another primitive scan
535 : * is scheduled, just to keep things simple. Array advancement even makes
536 : * sure to reset non-required arrays during scans that have no inequalities.
537 : * (Advancement still won't call here when there are no inequalities, though
538 : * that's just because it's all handled indirectly instead.)
539 : *
540 : * Note: _bt_verify_arrays_bt_first is called by an assertion to enforce that
541 : * everybody got this right.
542 : */
543 : static void
544 378 : _bt_rewind_nonrequired_arrays(IndexScanDesc scan, ScanDirection dir)
545 : {
546 378 : BTScanOpaque so = (BTScanOpaque) scan->opaque;
547 378 : int arrayidx = 0;
548 :
549 768 : for (int ikey = 0; ikey < so->numberOfKeys; ikey++)
550 : {
551 390 : ScanKey cur = so->keyData + ikey;
552 390 : BTArrayKeyInfo *array = NULL;
553 : int first_elem_dir;
554 :
555 390 : if (!(cur->sk_flags & SK_SEARCHARRAY) ||
556 378 : cur->sk_strategy != BTEqualStrategyNumber)
557 12 : continue;
558 :
559 378 : array = &so->arrayKeys[arrayidx++];
560 : Assert(array->scan_key == ikey);
561 :
562 378 : if ((cur->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)))
563 378 : continue;
564 :
565 0 : if (ScanDirectionIsForward(dir))
566 0 : first_elem_dir = 0;
567 : else
568 0 : first_elem_dir = array->num_elems - 1;
569 :
570 0 : if (array->cur_elem != first_elem_dir)
571 : {
572 0 : array->cur_elem = first_elem_dir;
573 0 : cur->sk_argument = array->elem_values[first_elem_dir];
574 : }
575 : }
576 378 : }
577 :
578 : /*
579 : * _bt_tuple_before_array_skeys() -- too early to advance required arrays?
580 : *
581 : * We always compare the tuple using the current array keys (which we assume
582 : * are already set in so->keyData[]). readpagetup indicates if tuple is the
583 : * scan's current _bt_readpage-wise tuple.
584 : *
585 : * readpagetup callers must only call here when _bt_check_compare already set
586 : * continuescan=false. We help these callers deal with _bt_check_compare's
587 : * inability to distinguishing between the < and > cases (it uses equality
588 : * operator scan keys, whereas we use 3-way ORDER procs). These callers pass
589 : * a _bt_check_compare-set sktrig value that indicates which scan key
590 : * triggered the call (!readpagetup callers just pass us sktrig=0 instead).
591 : * This information allows us to avoid wastefully checking earlier scan keys
592 : * that were already deemed to have been satisfied inside _bt_check_compare.
593 : *
594 : * Returns false when caller's tuple is >= the current required equality scan
595 : * keys (or <=, in the case of backwards scans). This happens to readpagetup
596 : * callers when the scan has reached the point of needing its array keys
597 : * advanced; caller will need to advance required and non-required arrays at
598 : * scan key offsets >= sktrig, plus scan keys < sktrig iff sktrig rolls over.
599 : * (When we return false to readpagetup callers, tuple can only be == current
600 : * required equality scan keys when caller's sktrig indicates that the arrays
601 : * need to be advanced due to an unsatisfied required inequality key trigger.)
602 : *
603 : * Returns true when caller passes a tuple that is < the current set of
604 : * equality keys for the most significant non-equal required scan key/column
605 : * (or > the keys, during backwards scans). This happens to readpagetup
606 : * callers when tuple is still before the start of matches for the scan's
607 : * required equality strategy scan keys. (sktrig can't have indicated that an
608 : * inequality strategy scan key wasn't satisfied in _bt_check_compare when we
609 : * return true. In fact, we automatically return false when passed such an
610 : * inequality sktrig by readpagetup callers -- _bt_check_compare's initial
611 : * continuescan=false doesn't really need to be confirmed here by us.)
612 : *
613 : * !readpagetup callers optionally pass us *scanBehind, which tracks whether
614 : * any missing truncated attributes might have affected array advancement
615 : * (compared to what would happen if it was shown the first non-pivot tuple on
616 : * the page to the right of caller's finaltup/high key tuple instead). It's
617 : * only possible that we'll set *scanBehind to true when caller passes us a
618 : * pivot tuple (with truncated -inf attributes) that we return false for.
619 : */
620 : static bool
621 53424 : _bt_tuple_before_array_skeys(IndexScanDesc scan, ScanDirection dir,
622 : IndexTuple tuple, TupleDesc tupdesc, int tupnatts,
623 : bool readpagetup, int sktrig, bool *scanBehind)
624 : {
625 53424 : BTScanOpaque so = (BTScanOpaque) scan->opaque;
626 :
627 : Assert(so->numArrayKeys);
628 : Assert(so->numberOfKeys);
629 : Assert(sktrig == 0 || readpagetup);
630 : Assert(!readpagetup || scanBehind == NULL);
631 :
632 53424 : if (scanBehind)
633 17898 : *scanBehind = false;
634 :
635 53590 : for (int ikey = sktrig; ikey < so->numberOfKeys; ikey++)
636 : {
637 53534 : ScanKey cur = so->keyData + ikey;
638 : Datum tupdatum;
639 : bool tupnull;
640 : int32 result;
641 :
642 : /* readpagetup calls require one ORDER proc comparison (at most) */
643 : Assert(!readpagetup || ikey == sktrig);
644 :
645 : /*
646 : * Once we reach a non-required scan key, we're completely done.
647 : *
648 : * Note: we deliberately don't consider the scan direction here.
649 : * _bt_advance_array_keys caller requires that we track *scanBehind
650 : * without concern for scan direction.
651 : */
652 53534 : if ((cur->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)) == 0)
653 : {
654 : Assert(!readpagetup);
655 : Assert(ikey > sktrig || ikey == 0);
656 53368 : return false;
657 : }
658 :
659 53534 : if (cur->sk_attno > tupnatts)
660 : {
661 : Assert(!readpagetup);
662 :
663 : /*
664 : * When we reach a high key's truncated attribute, assume that the
665 : * tuple attribute's value is >= the scan's equality constraint
666 : * scan keys (but set *scanBehind to let interested callers know
667 : * that a truncated attribute might have affected our answer).
668 : */
669 6 : if (scanBehind)
670 6 : *scanBehind = true;
671 :
672 6 : return false;
673 : }
674 :
675 : /*
676 : * Deal with inequality strategy scan keys that _bt_check_compare set
677 : * continuescan=false for
678 : */
679 53528 : if (cur->sk_strategy != BTEqualStrategyNumber)
680 : {
681 : /*
682 : * When _bt_check_compare indicated that a required inequality
683 : * scan key wasn't satisfied, there's no need to verify anything;
684 : * caller always calls _bt_advance_array_keys with this sktrig.
685 : */
686 6 : if (readpagetup)
687 6 : return false;
688 :
689 : /*
690 : * Otherwise we can't give up, since we must check all required
691 : * scan keys (required in either direction) in order to correctly
692 : * track *scanBehind for caller
693 : */
694 0 : continue;
695 : }
696 :
697 53522 : tupdatum = index_getattr(tuple, cur->sk_attno, tupdesc, &tupnull);
698 :
699 53522 : result = _bt_compare_array_skey(&so->orderProcs[ikey],
700 : tupdatum, tupnull,
701 : cur->sk_argument, cur);
702 :
703 : /*
704 : * Does this comparison indicate that caller must _not_ advance the
705 : * scan's arrays just yet?
706 : */
707 53522 : if ((ScanDirectionIsForward(dir) && result < 0) ||
708 108 : (ScanDirectionIsBackward(dir) && result > 0))
709 23966 : return true;
710 :
711 : /*
712 : * Does this comparison indicate that caller should now advance the
713 : * scan's arrays? (Must be if we get here during a readpagetup call.)
714 : */
715 29556 : if (readpagetup || result != 0)
716 : {
717 : Assert(result != 0);
718 29390 : return false;
719 : }
720 :
721 : /*
722 : * Inconclusive -- need to check later scan keys, too.
723 : *
724 : * This must be a finaltup precheck, or a call made from an assertion.
725 : */
726 : Assert(result == 0);
727 : }
728 :
729 : Assert(!readpagetup);
730 :
731 56 : return false;
732 : }
733 :
734 : /*
735 : * _bt_start_prim_scan() -- start scheduled primitive index scan?
736 : *
737 : * Returns true if _bt_checkkeys scheduled another primitive index scan, just
738 : * as the last one ended. Otherwise returns false, indicating that the array
739 : * keys are now fully exhausted.
740 : *
741 : * Only call here during scans with one or more equality type array scan keys,
742 : * after _bt_first or _bt_next return false.
743 : */
744 : bool
745 84382 : _bt_start_prim_scan(IndexScanDesc scan, ScanDirection dir)
746 : {
747 84382 : BTScanOpaque so = (BTScanOpaque) scan->opaque;
748 :
749 : Assert(so->numArrayKeys);
750 :
751 84382 : so->scanBehind = so->oppositeDirCheck = false; /* reset */
752 :
753 : /*
754 : * Array keys are advanced within _bt_checkkeys when the scan reaches the
755 : * leaf level (more precisely, they're advanced when the scan reaches the
756 : * end of each distinct set of array elements). This process avoids
757 : * repeat access to leaf pages (across multiple primitive index scans) by
758 : * advancing the scan's array keys when it allows the primitive index scan
759 : * to find nearby matching tuples (or when it eliminates ranges of array
760 : * key space that can't possibly be satisfied by any index tuple).
761 : *
762 : * _bt_checkkeys sets a simple flag variable to schedule another primitive
763 : * index scan. The flag tells us what to do.
764 : *
765 : * We cannot rely on _bt_first always reaching _bt_checkkeys. There are
766 : * various cases where that won't happen. For example, if the index is
767 : * completely empty, then _bt_first won't call _bt_readpage/_bt_checkkeys.
768 : * We also don't expect a call to _bt_checkkeys during searches for a
769 : * non-existent value that happens to be lower/higher than any existing
770 : * value in the index.
771 : *
772 : * We don't require special handling for these cases -- we don't need to
773 : * be explicitly instructed to _not_ perform another primitive index scan.
774 : * It's up to code under the control of _bt_first to always set the flag
775 : * when another primitive index scan will be required.
776 : *
777 : * This works correctly, even with the tricky cases listed above, which
778 : * all involve access to leaf pages "near the boundaries of the key space"
779 : * (whether it's from a leftmost/rightmost page, or an imaginary empty
780 : * leaf root page). If _bt_checkkeys cannot be reached by a primitive
781 : * index scan for one set of array keys, then it also won't be reached for
782 : * any later set ("later" in terms of the direction that we scan the index
783 : * and advance the arrays). The array keys won't have advanced in these
784 : * cases, but that's the correct behavior (even _bt_advance_array_keys
785 : * won't always advance the arrays at the point they become "exhausted").
786 : */
787 84382 : if (so->needPrimScan)
788 : {
789 : Assert(_bt_verify_arrays_bt_first(scan, dir));
790 :
791 : /*
792 : * Flag was set -- must call _bt_first again, which will reset the
793 : * scan's needPrimScan flag
794 : */
795 17236 : return true;
796 : }
797 :
798 : /* The top-level index scan ran out of tuples in this scan direction */
799 67146 : if (scan->parallel_scan != NULL)
800 30 : _bt_parallel_done(scan);
801 :
802 67146 : return false;
803 : }
804 :
805 : /*
806 : * _bt_advance_array_keys() -- Advance array elements using a tuple
807 : *
808 : * The scan always gets a new qual as a consequence of calling here (except
809 : * when we determine that the top-level scan has run out of matching tuples).
810 : * All later _bt_check_compare calls also use the same new qual that was first
811 : * used here (at least until the next call here advances the keys once again).
812 : * It's convenient to structure _bt_check_compare rechecks of caller's tuple
813 : * (using the new qual) as one the steps of advancing the scan's array keys,
814 : * so this function works as a wrapper around _bt_check_compare.
815 : *
816 : * Like _bt_check_compare, we'll set pstate.continuescan on behalf of the
817 : * caller, and return a boolean indicating if caller's tuple satisfies the
818 : * scan's new qual. But unlike _bt_check_compare, we set so->needPrimScan
819 : * when we set continuescan=false, indicating if a new primitive index scan
820 : * has been scheduled (otherwise, the top-level scan has run out of tuples in
821 : * the current scan direction).
822 : *
823 : * Caller must use _bt_tuple_before_array_skeys to determine if the current
824 : * place in the scan is >= the current array keys _before_ calling here.
825 : * We're responsible for ensuring that caller's tuple is <= the newly advanced
826 : * required array keys once we return. We try to find an exact match, but
827 : * failing that we'll advance the array keys to whatever set of array elements
828 : * comes next in the key space for the current scan direction. Required array
829 : * keys "ratchet forwards" (or backwards). They can only advance as the scan
830 : * itself advances through the index/key space.
831 : *
832 : * (The rules are the same for backwards scans, except that the operators are
833 : * flipped: just replace the precondition's >= operator with a <=, and the
834 : * postcondition's <= operator with a >=. In other words, just swap the
835 : * precondition with the postcondition.)
836 : *
837 : * We also deal with "advancing" non-required arrays here. Callers whose
838 : * sktrig scan key is non-required specify sktrig_required=false. These calls
839 : * are the only exception to the general rule about always advancing the
840 : * required array keys (the scan may not even have a required array). These
841 : * callers should just pass a NULL pstate (since there is never any question
842 : * of stopping the scan). No call to _bt_tuple_before_array_skeys is required
843 : * ahead of these calls (it's already clear that any required scan keys must
844 : * be satisfied by caller's tuple).
845 : *
846 : * Note that we deal with non-array required equality strategy scan keys as
847 : * degenerate single element arrays here. Obviously, they can never really
848 : * advance in the way that real arrays can, but they must still affect how we
849 : * advance real array scan keys (exactly like true array equality scan keys).
850 : * We have to keep around a 3-way ORDER proc for these (using the "=" operator
851 : * won't do), since in general whether the tuple is < or > _any_ unsatisfied
852 : * required equality key influences how the scan's real arrays must advance.
853 : *
854 : * Note also that we may sometimes need to advance the array keys when the
855 : * existing required array keys (and other required equality keys) are already
856 : * an exact match for every corresponding value from caller's tuple. We must
857 : * do this for inequalities that _bt_check_compare set continuescan=false for.
858 : * They'll advance the array keys here, just like any other scan key that
859 : * _bt_check_compare stops on. (This can even happen _after_ we advance the
860 : * array keys, in which case we'll advance the array keys a second time. That
861 : * way _bt_checkkeys caller always has its required arrays advance to the
862 : * maximum possible extent that its tuple will allow.)
863 : */
864 : static bool
865 28124 : _bt_advance_array_keys(IndexScanDesc scan, BTReadPageState *pstate,
866 : IndexTuple tuple, int tupnatts, TupleDesc tupdesc,
867 : int sktrig, bool sktrig_required)
868 : {
869 28124 : BTScanOpaque so = (BTScanOpaque) scan->opaque;
870 28124 : Relation rel = scan->indexRelation;
871 28124 : ScanDirection dir = so->currPos.dir;
872 28124 : int arrayidx = 0;
873 28124 : bool beyond_end_advance = false,
874 28124 : has_required_opposite_direction_only = false,
875 28124 : all_required_satisfied = true,
876 28124 : all_satisfied = true;
877 :
878 : Assert(!so->needPrimScan && !so->scanBehind && !so->oppositeDirCheck);
879 :
880 28124 : if (sktrig_required)
881 : {
882 : /*
883 : * Precondition array state assertion
884 : */
885 : Assert(!_bt_tuple_before_array_skeys(scan, dir, tuple, tupdesc,
886 : tupnatts, false, 0, NULL));
887 :
888 : /*
889 : * Required scan key wasn't satisfied, so required arrays will have to
890 : * advance. Invalidate page-level state that tracks whether the
891 : * scan's required-in-opposite-direction-only keys are known to be
892 : * satisfied by page's remaining tuples.
893 : */
894 27860 : pstate->firstmatch = false;
895 :
896 : /* Shouldn't have to invalidate 'prechecked', though */
897 : Assert(!pstate->prechecked);
898 :
899 : /*
900 : * Once we return we'll have a new set of required array keys, so
901 : * reset state used by "look ahead" optimization
902 : */
903 27860 : pstate->rechecks = 0;
904 27860 : pstate->targetdistance = 0;
905 : }
906 :
907 : Assert(_bt_verify_keys_with_arraykeys(scan));
908 :
909 59836 : for (int ikey = 0; ikey < so->numberOfKeys; ikey++)
910 : {
911 31934 : ScanKey cur = so->keyData + ikey;
912 31934 : BTArrayKeyInfo *array = NULL;
913 : Datum tupdatum;
914 31934 : bool required = false,
915 31934 : required_opposite_direction_only = false,
916 : tupnull;
917 : int32 result;
918 31934 : int set_elem = 0;
919 :
920 31934 : if (cur->sk_strategy == BTEqualStrategyNumber)
921 : {
922 : /* Manage array state */
923 31658 : if (cur->sk_flags & SK_SEARCHARRAY)
924 : {
925 28988 : array = &so->arrayKeys[arrayidx++];
926 : Assert(array->scan_key == ikey);
927 : }
928 : }
929 : else
930 : {
931 : /*
932 : * Are any inequalities required in the opposite direction only
933 : * present here?
934 : */
935 276 : if (((ScanDirectionIsForward(dir) &&
936 276 : (cur->sk_flags & (SK_BT_REQBKWD))) ||
937 120 : (ScanDirectionIsBackward(dir) &&
938 120 : (cur->sk_flags & (SK_BT_REQFWD)))))
939 150 : has_required_opposite_direction_only =
940 150 : required_opposite_direction_only = true;
941 : }
942 :
943 : /* Optimization: skip over known-satisfied scan keys */
944 31934 : if (ikey < sktrig)
945 3050 : continue;
946 :
947 31098 : if (cur->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD))
948 : {
949 : Assert(sktrig_required);
950 :
951 29568 : required = true;
952 :
953 29568 : if (cur->sk_attno > tupnatts)
954 : {
955 : /* Set this just like _bt_tuple_before_array_skeys */
956 : Assert(sktrig < ikey);
957 6 : so->scanBehind = true;
958 : }
959 : }
960 :
961 : /*
962 : * Handle a required non-array scan key that the initial call to
963 : * _bt_check_compare indicated triggered array advancement, if any.
964 : *
965 : * The non-array scan key's strategy will be <, <=, or = during a
966 : * forwards scan (or any one of =, >=, or > during a backwards scan).
967 : * It follows that the corresponding tuple attribute's value must now
968 : * be either > or >= the scan key value (for backwards scans it must
969 : * be either < or <= that value).
970 : *
971 : * If this is a required equality strategy scan key, this is just an
972 : * optimization; _bt_tuple_before_array_skeys already confirmed that
973 : * this scan key places us ahead of caller's tuple. There's no need
974 : * to repeat that work now. (The same underlying principle also gets
975 : * applied by the cur_elem_trig optimization used to speed up searches
976 : * for the next array element.)
977 : *
978 : * If this is a required inequality strategy scan key, we _must_ rely
979 : * on _bt_check_compare like this; we aren't capable of directly
980 : * evaluating required inequality strategy scan keys here, on our own.
981 : */
982 31098 : if (ikey == sktrig && !array)
983 : {
984 : Assert(sktrig_required && required && all_required_satisfied);
985 :
986 : /* Use "beyond end" advancement. See below for an explanation. */
987 88 : beyond_end_advance = true;
988 88 : all_satisfied = all_required_satisfied = false;
989 :
990 88 : continue;
991 : }
992 :
993 : /*
994 : * Nothing more for us to do with an inequality strategy scan key that
995 : * wasn't the one that _bt_check_compare stopped on, though.
996 : *
997 : * Note: if our later call to _bt_check_compare (to recheck caller's
998 : * tuple) sets continuescan=false due to finding this same inequality
999 : * unsatisfied (possible when it's required in the scan direction),
1000 : * we'll deal with it via a recursive "second pass" call.
1001 : */
1002 31010 : else if (cur->sk_strategy != BTEqualStrategyNumber)
1003 30 : continue;
1004 :
1005 : /*
1006 : * Nothing for us to do with an equality strategy scan key that isn't
1007 : * marked required, either -- unless it's a non-required array
1008 : */
1009 30980 : else if (!required && !array)
1010 1248 : continue;
1011 :
1012 : /*
1013 : * Here we perform steps for all array scan keys after a required
1014 : * array scan key whose binary search triggered "beyond end of array
1015 : * element" array advancement due to encountering a tuple attribute
1016 : * value > the closest matching array key (or < for backwards scans).
1017 : */
1018 29732 : if (beyond_end_advance)
1019 : {
1020 : int final_elem_dir;
1021 :
1022 366 : if (ScanDirectionIsBackward(dir) || !array)
1023 160 : final_elem_dir = 0;
1024 : else
1025 206 : final_elem_dir = array->num_elems - 1;
1026 :
1027 366 : if (array && array->cur_elem != final_elem_dir)
1028 : {
1029 42 : array->cur_elem = final_elem_dir;
1030 42 : cur->sk_argument = array->elem_values[final_elem_dir];
1031 : }
1032 :
1033 366 : continue;
1034 : }
1035 :
1036 : /*
1037 : * Here we perform steps for all array scan keys after a required
1038 : * array scan key whose tuple attribute was < the closest matching
1039 : * array key when we dealt with it (or > for backwards scans).
1040 : *
1041 : * This earlier required array key already puts us ahead of caller's
1042 : * tuple in the key space (for the current scan direction). We must
1043 : * make sure that subsequent lower-order array keys do not put us too
1044 : * far ahead (ahead of tuples that have yet to be seen by our caller).
1045 : * For example, when a tuple "(a, b) = (42, 5)" advances the array
1046 : * keys on "a" from 40 to 45, we must also set "b" to whatever the
1047 : * first array element for "b" is. It would be wrong to allow "b" to
1048 : * be set based on the tuple value.
1049 : *
1050 : * Perform the same steps with truncated high key attributes. You can
1051 : * think of this as a "binary search" for the element closest to the
1052 : * value -inf. Again, the arrays must never get ahead of the scan.
1053 : */
1054 29366 : if (!all_required_satisfied || cur->sk_attno > tupnatts)
1055 : {
1056 : int first_elem_dir;
1057 :
1058 482 : if (ScanDirectionIsForward(dir) || !array)
1059 482 : first_elem_dir = 0;
1060 : else
1061 0 : first_elem_dir = array->num_elems - 1;
1062 :
1063 482 : if (array && array->cur_elem != first_elem_dir)
1064 : {
1065 192 : array->cur_elem = first_elem_dir;
1066 192 : cur->sk_argument = array->elem_values[first_elem_dir];
1067 : }
1068 :
1069 482 : continue;
1070 : }
1071 :
1072 : /*
1073 : * Search in scankey's array for the corresponding tuple attribute
1074 : * value from caller's tuple
1075 : */
1076 28884 : tupdatum = index_getattr(tuple, cur->sk_attno, tupdesc, &tupnull);
1077 :
1078 28884 : if (array)
1079 : {
1080 28276 : bool cur_elem_trig = (sktrig_required && ikey == sktrig);
1081 :
1082 : /*
1083 : * Binary search for closest match that's available from the array
1084 : */
1085 28276 : set_elem = _bt_binsrch_array_skey(&so->orderProcs[ikey],
1086 : cur_elem_trig, dir,
1087 : tupdatum, tupnull, array, cur,
1088 : &result);
1089 :
1090 : Assert(set_elem >= 0 && set_elem < array->num_elems);
1091 : }
1092 : else
1093 : {
1094 : Assert(sktrig_required && required);
1095 :
1096 : /*
1097 : * This is a required non-array equality strategy scan key, which
1098 : * we'll treat as a degenerate single element array.
1099 : *
1100 : * This scan key's imaginary "array" can't really advance, but it
1101 : * can still roll over like any other array. (Actually, this is
1102 : * no different to real single value arrays, which never advance
1103 : * without rolling over -- they can never truly advance, either.)
1104 : */
1105 608 : result = _bt_compare_array_skey(&so->orderProcs[ikey],
1106 : tupdatum, tupnull,
1107 : cur->sk_argument, cur);
1108 : }
1109 :
1110 : /*
1111 : * Consider "beyond end of array element" array advancement.
1112 : *
1113 : * When the tuple attribute value is > the closest matching array key
1114 : * (or < in the backwards scan case), we need to ratchet this array
1115 : * forward (backward) by one increment, so that caller's tuple ends up
1116 : * being < final array value instead (or > final array value instead).
1117 : * This process has to work for all of the arrays, not just this one:
1118 : * it must "carry" to higher-order arrays when the set_elem that we
1119 : * just found happens to be the final one for the scan's direction.
1120 : * Incrementing (decrementing) set_elem itself isn't good enough.
1121 : *
1122 : * Our approach is to provisionally use set_elem as if it was an exact
1123 : * match now, then set each later/less significant array to whatever
1124 : * its final element is. Once outside the loop we'll then "increment
1125 : * this array's set_elem" by calling _bt_advance_array_keys_increment.
1126 : * That way the process rolls over to higher order arrays as needed.
1127 : *
1128 : * Under this scheme any required arrays only ever ratchet forwards
1129 : * (or backwards), and always do so to the maximum possible extent
1130 : * that we can know will be safe without seeing the scan's next tuple.
1131 : * We don't need any special handling for required scan keys that lack
1132 : * a real array to advance, nor for redundant scan keys that couldn't
1133 : * be eliminated by _bt_preprocess_keys. It won't matter if some of
1134 : * our "true" array scan keys (or even all of them) are non-required.
1135 : */
1136 28884 : if (required &&
1137 28620 : ((ScanDirectionIsForward(dir) && result > 0) ||
1138 24 : (ScanDirectionIsBackward(dir) && result < 0)))
1139 7418 : beyond_end_advance = true;
1140 :
1141 : Assert(all_required_satisfied && all_satisfied);
1142 28884 : if (result != 0)
1143 : {
1144 : /*
1145 : * Track whether caller's tuple satisfies our new post-advancement
1146 : * qual, for required scan keys, as well as for the entire set of
1147 : * interesting scan keys (all required scan keys plus non-required
1148 : * array scan keys are considered interesting.)
1149 : */
1150 25780 : all_satisfied = false;
1151 25780 : if (required)
1152 25558 : all_required_satisfied = false;
1153 : else
1154 : {
1155 : /*
1156 : * There's no need to advance the arrays using the best
1157 : * available match for a non-required array. Give up now.
1158 : * (Though note that sktrig_required calls still have to do
1159 : * all the usual post-advancement steps, including the recheck
1160 : * call to _bt_check_compare.)
1161 : */
1162 222 : break;
1163 : }
1164 : }
1165 :
1166 : /* Advance array keys, even when set_elem isn't an exact match */
1167 28662 : if (array && array->cur_elem != set_elem)
1168 : {
1169 20642 : array->cur_elem = set_elem;
1170 20642 : cur->sk_argument = array->elem_values[set_elem];
1171 : }
1172 : }
1173 :
1174 : /*
1175 : * Advance the array keys incrementally whenever "beyond end of array
1176 : * element" array advancement happens, so that advancement will carry to
1177 : * higher-order arrays (might exhaust all the scan's arrays instead, which
1178 : * ends the top-level scan).
1179 : */
1180 28124 : if (beyond_end_advance && !_bt_advance_array_keys_increment(scan, dir))
1181 7468 : goto end_toplevel_scan;
1182 :
1183 : Assert(_bt_verify_keys_with_arraykeys(scan));
1184 :
1185 : /*
1186 : * Does tuple now satisfy our new qual? Recheck with _bt_check_compare.
1187 : *
1188 : * Calls triggered by an unsatisfied required scan key, whose tuple now
1189 : * satisfies all required scan keys, but not all nonrequired array keys,
1190 : * will still require a recheck call to _bt_check_compare. They'll still
1191 : * need its "second pass" handling of required inequality scan keys.
1192 : * (Might have missed a still-unsatisfied required inequality scan key
1193 : * that caller didn't detect as the sktrig scan key during its initial
1194 : * _bt_check_compare call that used the old/original qual.)
1195 : *
1196 : * Calls triggered by an unsatisfied nonrequired array scan key never need
1197 : * "second pass" handling of required inequalities (nor any other handling
1198 : * of any required scan key). All that matters is whether caller's tuple
1199 : * satisfies the new qual, so it's safe to just skip the _bt_check_compare
1200 : * recheck when we've already determined that it can only return 'false'.
1201 : */
1202 20656 : if ((sktrig_required && all_required_satisfied) ||
1203 18442 : (!sktrig_required && all_satisfied))
1204 : {
1205 2256 : int nsktrig = sktrig + 1;
1206 : bool continuescan;
1207 :
1208 : Assert(all_required_satisfied);
1209 :
1210 : /* Recheck _bt_check_compare on behalf of caller */
1211 2256 : if (_bt_check_compare(scan, dir, tuple, tupnatts, tupdesc,
1212 : false, false, false,
1213 2250 : &continuescan, &nsktrig) &&
1214 2250 : !so->scanBehind)
1215 : {
1216 : /* This tuple satisfies the new qual */
1217 : Assert(all_satisfied && continuescan);
1218 :
1219 2244 : if (pstate)
1220 2202 : pstate->continuescan = true;
1221 :
1222 2244 : return true;
1223 : }
1224 :
1225 : /*
1226 : * Consider "second pass" handling of required inequalities.
1227 : *
1228 : * It's possible that our _bt_check_compare call indicated that the
1229 : * scan should end due to some unsatisfied inequality that wasn't
1230 : * initially recognized as such by us. Handle this by calling
1231 : * ourselves recursively, this time indicating that the trigger is the
1232 : * inequality that we missed first time around (and using a set of
1233 : * required array/equality keys that are now exact matches for tuple).
1234 : *
1235 : * We make a strong, general guarantee that every _bt_checkkeys call
1236 : * here will advance the array keys to the maximum possible extent
1237 : * that we can know to be safe based on caller's tuple alone. If we
1238 : * didn't perform this step, then that guarantee wouldn't quite hold.
1239 : */
1240 12 : if (unlikely(!continuescan))
1241 : {
1242 : bool satisfied PG_USED_FOR_ASSERTS_ONLY;
1243 :
1244 : Assert(sktrig_required);
1245 : Assert(so->keyData[nsktrig].sk_strategy != BTEqualStrategyNumber);
1246 :
1247 : /*
1248 : * The tuple must use "beyond end" advancement during the
1249 : * recursive call, so we cannot possibly end up back here when
1250 : * recursing. We'll consume a small, fixed amount of stack space.
1251 : */
1252 : Assert(!beyond_end_advance);
1253 :
1254 : /* Advance the array keys a second time using same tuple */
1255 0 : satisfied = _bt_advance_array_keys(scan, pstate, tuple, tupnatts,
1256 : tupdesc, nsktrig, true);
1257 :
1258 : /* This tuple doesn't satisfy the inequality */
1259 : Assert(!satisfied);
1260 0 : return false;
1261 : }
1262 :
1263 : /*
1264 : * Some non-required scan key (from new qual) still not satisfied.
1265 : *
1266 : * All scan keys required in the current scan direction must still be
1267 : * satisfied, though, so we can trust all_required_satisfied below.
1268 : */
1269 : }
1270 :
1271 : /*
1272 : * When we were called just to deal with "advancing" non-required arrays,
1273 : * this is as far as we can go (cannot stop the scan for these callers)
1274 : */
1275 18412 : if (!sktrig_required)
1276 : {
1277 : /* Caller's tuple doesn't match any qual */
1278 222 : return false;
1279 : }
1280 :
1281 : /*
1282 : * Postcondition array state assertion (for still-unsatisfied tuples).
1283 : *
1284 : * By here we have established that the scan's required arrays (scan must
1285 : * have at least one required array) advanced, without becoming exhausted.
1286 : *
1287 : * Caller's tuple is now < the newly advanced array keys (or > when this
1288 : * is a backwards scan), except in the case where we only got this far due
1289 : * to an unsatisfied non-required scan key. Verify that with an assert.
1290 : *
1291 : * Note: we don't just quit at this point when all required scan keys were
1292 : * found to be satisfied because we need to consider edge-cases involving
1293 : * scan keys required in the opposite direction only; those aren't tracked
1294 : * by all_required_satisfied.
1295 : */
1296 : Assert(_bt_tuple_before_array_skeys(scan, dir, tuple, tupdesc, tupnatts,
1297 : false, 0, NULL) ==
1298 : !all_required_satisfied);
1299 :
1300 : /*
1301 : * We generally permit primitive index scans to continue onto the next
1302 : * sibling page when the page's finaltup satisfies all required scan keys
1303 : * at the point where we're between pages.
1304 : *
1305 : * If caller's tuple is also the page's finaltup, and we see that required
1306 : * scan keys still aren't satisfied, start a new primitive index scan.
1307 : */
1308 18190 : if (!all_required_satisfied && pstate->finaltup == tuple)
1309 48 : goto new_prim_scan;
1310 :
1311 : /*
1312 : * Proactively check finaltup (don't wait until finaltup is reached by the
1313 : * scan) when it might well turn out to not be satisfied later on.
1314 : *
1315 : * Note: if so->scanBehind hasn't already been set for finaltup by us,
1316 : * it'll be set during this call to _bt_tuple_before_array_skeys. Either
1317 : * way, it'll be set correctly (for the whole page) after this point.
1318 : */
1319 36040 : if (!all_required_satisfied && pstate->finaltup &&
1320 35796 : _bt_tuple_before_array_skeys(scan, dir, pstate->finaltup, tupdesc,
1321 35796 : BTreeTupleGetNAtts(pstate->finaltup, rel),
1322 : false, 0, &so->scanBehind))
1323 17212 : goto new_prim_scan;
1324 :
1325 : /*
1326 : * When we encounter a truncated finaltup high key attribute, we're
1327 : * optimistic about the chances of its corresponding required scan key
1328 : * being satisfied when we go on to recheck it against tuples from this
1329 : * page's right sibling leaf page. We consider truncated attributes to be
1330 : * satisfied by required scan keys, which allows the primitive index scan
1331 : * to continue to the next leaf page. We must set so->scanBehind to true
1332 : * to remember that the last page's finaltup had "satisfied" required scan
1333 : * keys for one or more truncated attribute values (scan keys required in
1334 : * _either_ scan direction).
1335 : *
1336 : * There is a chance that _bt_checkkeys (which checks so->scanBehind) will
1337 : * find that even the sibling leaf page's finaltup is < the new array
1338 : * keys. When that happens, our optimistic policy will have incurred a
1339 : * single extra leaf page access that could have been avoided.
1340 : *
1341 : * A pessimistic policy would give backward scans a gratuitous advantage
1342 : * over forward scans. We'd punish forward scans for applying more
1343 : * accurate information from the high key, rather than just using the
1344 : * final non-pivot tuple as finaltup, in the style of backward scans.
1345 : * Being pessimistic would also give some scans with non-required arrays a
1346 : * perverse advantage over similar scans that use required arrays instead.
1347 : *
1348 : * You can think of this as a speculative bet on what the scan is likely
1349 : * to find on the next page. It's not much of a gamble, though, since the
1350 : * untruncated prefix of attributes must strictly satisfy the new qual.
1351 : */
1352 930 : if (so->scanBehind)
1353 : {
1354 : /*
1355 : * Truncated high key -- _bt_scanbehind_checkkeys recheck scheduled.
1356 : *
1357 : * Remember if recheck needs to call _bt_oppodir_checkkeys for next
1358 : * page's finaltup (see below comments about "Handle inequalities
1359 : * marked required in the opposite scan direction" for why).
1360 : */
1361 12 : so->oppositeDirCheck = has_required_opposite_direction_only;
1362 :
1363 : /*
1364 : * Make sure that any SAOP arrays that were not marked required by
1365 : * preprocessing are reset to their first element for this direction
1366 : */
1367 12 : _bt_rewind_nonrequired_arrays(scan, dir);
1368 : }
1369 :
1370 : /*
1371 : * Handle inequalities marked required in the opposite scan direction.
1372 : * They can also signal that we should start a new primitive index scan.
1373 : *
1374 : * It's possible that the scan is now positioned where "matching" tuples
1375 : * begin, and that caller's tuple satisfies all scan keys required in the
1376 : * current scan direction. But if caller's tuple still doesn't satisfy
1377 : * other scan keys that are required in the opposite scan direction only
1378 : * (e.g., a required >= strategy scan key when scan direction is forward),
1379 : * it's still possible that there are many leaf pages before the page that
1380 : * _bt_first could skip straight to. Groveling through all those pages
1381 : * will always give correct answers, but it can be very inefficient. We
1382 : * must avoid needlessly scanning extra pages.
1383 : *
1384 : * Separately, it's possible that _bt_check_compare set continuescan=false
1385 : * for a scan key that's required in the opposite direction only. This is
1386 : * a special case, that happens only when _bt_check_compare sees that the
1387 : * inequality encountered a NULL value. This signals the end of non-NULL
1388 : * values in the current scan direction, which is reason enough to end the
1389 : * (primitive) scan. If this happens at the start of a large group of
1390 : * NULL values, then we shouldn't expect to be called again until after
1391 : * the scan has already read indefinitely-many leaf pages full of tuples
1392 : * with NULL suffix values. (_bt_first is expected to skip over the group
1393 : * of NULLs by applying a similar "deduce NOT NULL" rule of its own, which
1394 : * involves consing up an explicit SK_SEARCHNOTNULL key.)
1395 : *
1396 : * Apply a test against finaltup to detect and recover from the problem:
1397 : * if even finaltup doesn't satisfy such an inequality, we just skip by
1398 : * starting a new primitive index scan. When we skip, we know for sure
1399 : * that all of the tuples on the current page following caller's tuple are
1400 : * also before the _bt_first-wise start of tuples for our new qual. That
1401 : * at least suggests many more skippable pages beyond the current page.
1402 : * (when so->scanBehind and so->oppositeDirCheck are set, this'll happen
1403 : * when we test the next page's finaltup/high key instead.)
1404 : */
1405 918 : else if (has_required_opposite_direction_only && pstate->finaltup &&
1406 0 : unlikely(!_bt_oppodir_checkkeys(scan, dir, pstate->finaltup)))
1407 : {
1408 0 : _bt_rewind_nonrequired_arrays(scan, dir);
1409 0 : goto new_prim_scan;
1410 : }
1411 :
1412 918 : continue_scan:
1413 :
1414 : /*
1415 : * Stick with the ongoing primitive index scan for now.
1416 : *
1417 : * It's possible that later tuples will also turn out to have values that
1418 : * are still < the now-current array keys (or > the current array keys).
1419 : * Our caller will handle this by performing what amounts to a linear
1420 : * search of the page, implemented by calling _bt_check_compare and then
1421 : * _bt_tuple_before_array_skeys for each tuple.
1422 : *
1423 : * This approach has various advantages over a binary search of the page.
1424 : * Repeated binary searches of the page (one binary search for every array
1425 : * advancement) won't outperform a continuous linear search. While there
1426 : * are workloads that a naive linear search won't handle well, our caller
1427 : * has a "look ahead" fallback mechanism to deal with that problem.
1428 : */
1429 1296 : pstate->continuescan = true; /* Override _bt_check_compare */
1430 1296 : so->needPrimScan = false; /* _bt_readpage has more tuples to check */
1431 :
1432 1296 : if (so->scanBehind)
1433 : {
1434 : /* Optimization: skip by setting "look ahead" mechanism's offnum */
1435 378 : if (ScanDirectionIsForward(dir))
1436 372 : pstate->skip = pstate->maxoff + 1;
1437 : else
1438 6 : pstate->skip = pstate->minoff - 1;
1439 : }
1440 :
1441 : /* Caller's tuple doesn't match the new qual */
1442 1296 : return false;
1443 :
1444 17260 : new_prim_scan:
1445 :
1446 : Assert(pstate->finaltup); /* not on rightmost/leftmost page */
1447 :
1448 : /*
1449 : * Looks like another primitive index scan is required. But consider
1450 : * continuing the current primscan based on scan-level heuristics.
1451 : *
1452 : * Continue the ongoing primitive scan (and schedule a recheck for when
1453 : * the scan arrives on the next sibling leaf page) when it has already
1454 : * read at least one leaf page before the one we're reading now. This
1455 : * makes primscan scheduling more efficient when scanning subsets of an
1456 : * index with many distinct attribute values matching many array elements.
1457 : * It encourages fewer, larger primitive scans where that makes sense
1458 : * (where index descent costs need to be kept under control).
1459 : *
1460 : * Note: This heuristic isn't as aggressive as you might think. We're
1461 : * conservative about allowing a primitive scan to step from the first
1462 : * leaf page it reads to the page's sibling page (we only allow it on
1463 : * first pages whose finaltup strongly suggests that it'll work out).
1464 : * Clearing this first page finaltup hurdle is a strong signal in itself.
1465 : */
1466 17260 : if (!pstate->firstpage)
1467 : {
1468 : /* Schedule a recheck once on the next (or previous) page */
1469 366 : so->scanBehind = true;
1470 366 : so->oppositeDirCheck = has_required_opposite_direction_only;
1471 :
1472 366 : _bt_rewind_nonrequired_arrays(scan, dir);
1473 :
1474 : /* Continue the current primitive scan after all */
1475 366 : goto continue_scan;
1476 : }
1477 :
1478 : /*
1479 : * End this primitive index scan, but schedule another.
1480 : *
1481 : * Note: We make a soft assumption that the current scan direction will
1482 : * also be used within _bt_next, when it is asked to step off this page.
1483 : * It is up to _bt_next to cancel this scheduled primitive index scan
1484 : * whenever it steps to a page in the direction opposite currPos.dir.
1485 : */
1486 16894 : pstate->continuescan = false; /* Tell _bt_readpage we're done... */
1487 16894 : so->needPrimScan = true; /* ...but call _bt_first again */
1488 :
1489 16894 : if (scan->parallel_scan)
1490 36 : _bt_parallel_primscan_schedule(scan, so->currPos.currPage);
1491 :
1492 : /* Caller's tuple doesn't match the new qual */
1493 16894 : return false;
1494 :
1495 7468 : end_toplevel_scan:
1496 :
1497 : /*
1498 : * End the current primitive index scan, but don't schedule another.
1499 : *
1500 : * This ends the entire top-level scan in the current scan direction.
1501 : *
1502 : * Note: The scan's arrays (including any non-required arrays) are now in
1503 : * their final positions for the current scan direction. If the scan
1504 : * direction happens to change, then the arrays will already be in their
1505 : * first positions for what will then be the current scan direction.
1506 : */
1507 7468 : pstate->continuescan = false; /* Tell _bt_readpage we're done... */
1508 7468 : so->needPrimScan = false; /* ...and don't call _bt_first again */
1509 :
1510 : /* Caller's tuple doesn't match any qual */
1511 7468 : return false;
1512 : }
1513 :
1514 : #ifdef USE_ASSERT_CHECKING
1515 : /*
1516 : * Verify that the scan's qual state matches what we expect at the point that
1517 : * _bt_start_prim_scan is about to start a just-scheduled new primitive scan.
1518 : *
1519 : * We enforce a rule against non-required array scan keys: they must start out
1520 : * with whatever element is the first for the scan's current scan direction.
1521 : * See _bt_rewind_nonrequired_arrays comments for an explanation.
1522 : */
1523 : static bool
1524 : _bt_verify_arrays_bt_first(IndexScanDesc scan, ScanDirection dir)
1525 : {
1526 : BTScanOpaque so = (BTScanOpaque) scan->opaque;
1527 : int arrayidx = 0;
1528 :
1529 : for (int ikey = 0; ikey < so->numberOfKeys; ikey++)
1530 : {
1531 : ScanKey cur = so->keyData + ikey;
1532 : BTArrayKeyInfo *array = NULL;
1533 : int first_elem_dir;
1534 :
1535 : if (!(cur->sk_flags & SK_SEARCHARRAY) ||
1536 : cur->sk_strategy != BTEqualStrategyNumber)
1537 : continue;
1538 :
1539 : array = &so->arrayKeys[arrayidx++];
1540 :
1541 : if (((cur->sk_flags & SK_BT_REQFWD) && ScanDirectionIsForward(dir)) ||
1542 : ((cur->sk_flags & SK_BT_REQBKWD) && ScanDirectionIsBackward(dir)))
1543 : continue;
1544 :
1545 : if (ScanDirectionIsForward(dir))
1546 : first_elem_dir = 0;
1547 : else
1548 : first_elem_dir = array->num_elems - 1;
1549 :
1550 : if (array->cur_elem != first_elem_dir)
1551 : return false;
1552 : }
1553 :
1554 : return _bt_verify_keys_with_arraykeys(scan);
1555 : }
1556 :
1557 : /*
1558 : * Verify that the scan's "so->keyData[]" scan keys are in agreement with
1559 : * its array key state
1560 : */
1561 : static bool
1562 : _bt_verify_keys_with_arraykeys(IndexScanDesc scan)
1563 : {
1564 : BTScanOpaque so = (BTScanOpaque) scan->opaque;
1565 : int last_sk_attno = InvalidAttrNumber,
1566 : arrayidx = 0;
1567 :
1568 : if (!so->qual_ok)
1569 : return false;
1570 :
1571 : for (int ikey = 0; ikey < so->numberOfKeys; ikey++)
1572 : {
1573 : ScanKey cur = so->keyData + ikey;
1574 : BTArrayKeyInfo *array;
1575 :
1576 : if (cur->sk_strategy != BTEqualStrategyNumber ||
1577 : !(cur->sk_flags & SK_SEARCHARRAY))
1578 : continue;
1579 :
1580 : array = &so->arrayKeys[arrayidx++];
1581 : if (array->scan_key != ikey)
1582 : return false;
1583 :
1584 : if (array->num_elems <= 0)
1585 : return false;
1586 :
1587 : if (cur->sk_argument != array->elem_values[array->cur_elem])
1588 : return false;
1589 : if (last_sk_attno > cur->sk_attno)
1590 : return false;
1591 : last_sk_attno = cur->sk_attno;
1592 : }
1593 :
1594 : if (arrayidx != so->numArrayKeys)
1595 : return false;
1596 :
1597 : return true;
1598 : }
1599 : #endif
1600 :
1601 : /*
1602 : * Test whether an indextuple satisfies all the scankey conditions.
1603 : *
1604 : * Return true if so, false if not. If the tuple fails to pass the qual,
1605 : * we also determine whether there's any need to continue the scan beyond
1606 : * this tuple, and set pstate.continuescan accordingly. See comments for
1607 : * _bt_preprocess_keys() about how this is done.
1608 : *
1609 : * Forward scan callers can pass a high key tuple in the hopes of having
1610 : * us set *continuescan to false, and avoiding an unnecessary visit to
1611 : * the page to the right.
1612 : *
1613 : * Advances the scan's array keys when necessary for arrayKeys=true callers.
1614 : * Caller can avoid all array related side-effects when calling just to do a
1615 : * page continuescan precheck -- pass arrayKeys=false for that. Scans without
1616 : * any arrays keys must always pass arrayKeys=false.
1617 : *
1618 : * Also stops and starts primitive index scans for arrayKeys=true callers.
1619 : * Scans with array keys are required to set up page state that helps us with
1620 : * this. The page's finaltup tuple (the page high key for a forward scan, or
1621 : * the page's first non-pivot tuple for a backward scan) must be set in
1622 : * pstate.finaltup ahead of the first call here for the page (or possibly the
1623 : * first call after an initial continuescan-setting page precheck call). Set
1624 : * this to NULL for rightmost page (or the leftmost page for backwards scans).
1625 : *
1626 : * scan: index scan descriptor (containing a search-type scankey)
1627 : * pstate: page level input and output parameters
1628 : * arrayKeys: should we advance the scan's array keys if necessary?
1629 : * tuple: index tuple to test
1630 : * tupnatts: number of attributes in tupnatts (high key may be truncated)
1631 : */
1632 : bool
1633 56333366 : _bt_checkkeys(IndexScanDesc scan, BTReadPageState *pstate, bool arrayKeys,
1634 : IndexTuple tuple, int tupnatts)
1635 : {
1636 56333366 : TupleDesc tupdesc = RelationGetDescr(scan->indexRelation);
1637 56333366 : BTScanOpaque so = (BTScanOpaque) scan->opaque;
1638 56333366 : ScanDirection dir = so->currPos.dir;
1639 56333366 : int ikey = 0;
1640 : bool res;
1641 :
1642 : Assert(BTreeTupleGetNAtts(tuple, scan->indexRelation) == tupnatts);
1643 : Assert(!so->needPrimScan && !so->scanBehind && !so->oppositeDirCheck);
1644 :
1645 56333366 : res = _bt_check_compare(scan, dir, tuple, tupnatts, tupdesc,
1646 56333366 : arrayKeys, pstate->prechecked, pstate->firstmatch,
1647 : &pstate->continuescan, &ikey);
1648 :
1649 : #ifdef USE_ASSERT_CHECKING
1650 : if (!arrayKeys && so->numArrayKeys)
1651 : {
1652 : /*
1653 : * This is a continuescan precheck call for a scan with array keys.
1654 : *
1655 : * Assert that the scan isn't in danger of becoming confused.
1656 : */
1657 : Assert(!so->scanBehind && !so->oppositeDirCheck);
1658 : Assert(!pstate->prechecked && !pstate->firstmatch);
1659 : Assert(!_bt_tuple_before_array_skeys(scan, dir, tuple, tupdesc,
1660 : tupnatts, false, 0, NULL));
1661 : }
1662 : if (pstate->prechecked || pstate->firstmatch)
1663 : {
1664 : bool dcontinuescan;
1665 : int dikey = 0;
1666 :
1667 : /*
1668 : * Call relied on continuescan/firstmatch prechecks -- assert that we
1669 : * get the same answer without those optimizations
1670 : */
1671 : Assert(res == _bt_check_compare(scan, dir, tuple, tupnatts, tupdesc,
1672 : false, false, false,
1673 : &dcontinuescan, &dikey));
1674 : Assert(pstate->continuescan == dcontinuescan);
1675 : }
1676 : #endif
1677 :
1678 : /*
1679 : * Only one _bt_check_compare call is required in the common case where
1680 : * there are no equality strategy array scan keys. Otherwise we can only
1681 : * accept _bt_check_compare's answer unreservedly when it didn't set
1682 : * pstate.continuescan=false.
1683 : */
1684 56333366 : if (!arrayKeys || pstate->continuescan)
1685 56300414 : return res;
1686 :
1687 : /*
1688 : * _bt_check_compare call set continuescan=false in the presence of
1689 : * equality type array keys. This could mean that the tuple is just past
1690 : * the end of matches for the current array keys.
1691 : *
1692 : * It's also possible that the scan is still _before_ the _start_ of
1693 : * tuples matching the current set of array keys. Check for that first.
1694 : */
1695 32952 : if (_bt_tuple_before_array_skeys(scan, dir, tuple, tupdesc, tupnatts, true,
1696 : ikey, NULL))
1697 : {
1698 : /* Override _bt_check_compare, continue primitive scan */
1699 5092 : pstate->continuescan = true;
1700 :
1701 : /*
1702 : * We will end up here repeatedly given a group of tuples > the
1703 : * previous array keys and < the now-current keys (for a backwards
1704 : * scan it's just the same, though the operators swap positions).
1705 : *
1706 : * We must avoid allowing this linear search process to scan very many
1707 : * tuples from well before the start of tuples matching the current
1708 : * array keys (or from well before the point where we'll once again
1709 : * have to advance the scan's array keys).
1710 : *
1711 : * We keep the overhead under control by speculatively "looking ahead"
1712 : * to later still-unscanned items from this same leaf page. We'll
1713 : * only attempt this once the number of tuples that the linear search
1714 : * process has examined starts to get out of hand.
1715 : */
1716 5092 : pstate->rechecks++;
1717 5092 : if (pstate->rechecks >= LOOK_AHEAD_REQUIRED_RECHECKS)
1718 : {
1719 : /* See if we should skip ahead within the current leaf page */
1720 2262 : _bt_checkkeys_look_ahead(scan, pstate, tupnatts, tupdesc);
1721 :
1722 : /*
1723 : * Might have set pstate.skip to a later page offset. When that
1724 : * happens then _bt_readpage caller will inexpensively skip ahead
1725 : * to a later tuple from the same page (the one just after the
1726 : * tuple we successfully "looked ahead" to).
1727 : */
1728 : }
1729 :
1730 : /* This indextuple doesn't match the current qual, in any case */
1731 5092 : return false;
1732 : }
1733 :
1734 : /*
1735 : * Caller's tuple is >= the current set of array keys and other equality
1736 : * constraint scan keys (or <= if this is a backwards scan). It's now
1737 : * clear that we _must_ advance any required array keys in lockstep with
1738 : * the scan.
1739 : */
1740 27860 : return _bt_advance_array_keys(scan, pstate, tuple, tupnatts, tupdesc,
1741 : ikey, true);
1742 : }
1743 :
1744 : /*
1745 : * Test whether caller's finaltup tuple is still before the start of matches
1746 : * for the current array keys.
1747 : *
1748 : * Called at the start of reading a page during a scan with array keys, though
1749 : * only when the so->scanBehind flag was set on the scan's prior page.
1750 : *
1751 : * Returns false if the tuple is still before the start of matches. When that
1752 : * happens, caller should cut its losses and start a new primitive index scan.
1753 : * Otherwise returns true.
1754 : */
1755 : bool
1756 366 : _bt_scanbehind_checkkeys(IndexScanDesc scan, ScanDirection dir,
1757 : IndexTuple finaltup)
1758 : {
1759 366 : Relation rel = scan->indexRelation;
1760 366 : TupleDesc tupdesc = RelationGetDescr(rel);
1761 366 : BTScanOpaque so = (BTScanOpaque) scan->opaque;
1762 366 : int nfinaltupatts = BTreeTupleGetNAtts(finaltup, rel);
1763 :
1764 : Assert(so->numArrayKeys);
1765 :
1766 366 : if (_bt_tuple_before_array_skeys(scan, dir, finaltup, tupdesc,
1767 : nfinaltupatts, false, 0, NULL))
1768 342 : return false;
1769 :
1770 24 : if (!so->oppositeDirCheck)
1771 18 : return true;
1772 :
1773 6 : return _bt_oppodir_checkkeys(scan, dir, finaltup);
1774 : }
1775 :
1776 : /*
1777 : * Test whether an indextuple fails to satisfy an inequality required in the
1778 : * opposite direction only.
1779 : *
1780 : * Caller's finaltup tuple is the page high key (for forwards scans), or the
1781 : * first non-pivot tuple (for backwards scans). Called during scans with
1782 : * required array keys and required opposite-direction inequalities.
1783 : *
1784 : * Returns false if an inequality scan key required in the opposite direction
1785 : * only isn't satisfied (and any earlier required scan keys are satisfied).
1786 : * Otherwise returns true.
1787 : *
1788 : * An unsatisfied inequality required in the opposite direction only might
1789 : * well enable skipping over many leaf pages, provided another _bt_first call
1790 : * takes place. This type of unsatisfied inequality won't usually cause
1791 : * _bt_checkkeys to stop the scan to consider array advancement/starting a new
1792 : * primitive index scan.
1793 : */
1794 : static bool
1795 6 : _bt_oppodir_checkkeys(IndexScanDesc scan, ScanDirection dir,
1796 : IndexTuple finaltup)
1797 : {
1798 6 : Relation rel = scan->indexRelation;
1799 6 : TupleDesc tupdesc = RelationGetDescr(rel);
1800 6 : BTScanOpaque so = (BTScanOpaque) scan->opaque;
1801 6 : int nfinaltupatts = BTreeTupleGetNAtts(finaltup, rel);
1802 : bool continuescan;
1803 6 : ScanDirection flipped = -dir;
1804 6 : int ikey = 0;
1805 :
1806 : Assert(so->numArrayKeys);
1807 :
1808 6 : _bt_check_compare(scan, flipped, finaltup, nfinaltupatts, tupdesc,
1809 : false, false, false, &continuescan, &ikey);
1810 :
1811 6 : if (!continuescan && so->keyData[ikey].sk_strategy != BTEqualStrategyNumber)
1812 0 : return false;
1813 :
1814 6 : return true;
1815 : }
1816 :
1817 : /*
1818 : * Test whether an indextuple satisfies current scan condition.
1819 : *
1820 : * Return true if so, false if not. If not, also sets *continuescan to false
1821 : * when it's also not possible for any later tuples to pass the current qual
1822 : * (with the scan's current set of array keys, in the current scan direction),
1823 : * in addition to setting *ikey to the so->keyData[] subscript/offset for the
1824 : * unsatisfied scan key (needed when caller must consider advancing the scan's
1825 : * array keys).
1826 : *
1827 : * This is a subroutine for _bt_checkkeys. We provisionally assume that
1828 : * reaching the end of the current set of required keys (in particular the
1829 : * current required array keys) ends the ongoing (primitive) index scan.
1830 : * Callers without array keys should just end the scan right away when they
1831 : * find that continuescan has been set to false here by us. Things are more
1832 : * complicated for callers with array keys.
1833 : *
1834 : * Callers with array keys must first consider advancing the arrays when
1835 : * continuescan has been set to false here by us. They must then consider if
1836 : * it really does make sense to end the current (primitive) index scan, in
1837 : * light of everything that is known at that point. (In general when we set
1838 : * continuescan=false for these callers it must be treated as provisional.)
1839 : *
1840 : * We deal with advancing unsatisfied non-required arrays directly, though.
1841 : * This is safe, since by definition non-required keys can't end the scan.
1842 : * This is just how we determine if non-required arrays are just unsatisfied
1843 : * by the current array key, or if they're truly unsatisfied (that is, if
1844 : * they're unsatisfied by every possible array key).
1845 : *
1846 : * Though we advance non-required array keys on our own, that shouldn't have
1847 : * any lasting consequences for the scan. By definition, non-required arrays
1848 : * have no fixed relationship with the scan's progress. (There are delicate
1849 : * considerations for non-required arrays when the arrays need to be advanced
1850 : * following our setting continuescan to false, but that doesn't concern us.)
1851 : *
1852 : * Pass advancenonrequired=false to avoid all array related side effects.
1853 : * This allows _bt_advance_array_keys caller to avoid infinite recursion.
1854 : */
1855 : static bool
1856 56335628 : _bt_check_compare(IndexScanDesc scan, ScanDirection dir,
1857 : IndexTuple tuple, int tupnatts, TupleDesc tupdesc,
1858 : bool advancenonrequired, bool prechecked, bool firstmatch,
1859 : bool *continuescan, int *ikey)
1860 : {
1861 56335628 : BTScanOpaque so = (BTScanOpaque) scan->opaque;
1862 :
1863 56335628 : *continuescan = true; /* default assumption */
1864 :
1865 109317300 : for (; *ikey < so->numberOfKeys; (*ikey)++)
1866 : {
1867 64361046 : ScanKey key = so->keyData + *ikey;
1868 : Datum datum;
1869 : bool isNull;
1870 64361046 : bool requiredSameDir = false,
1871 64361046 : requiredOppositeDirOnly = false;
1872 :
1873 : /*
1874 : * Check if the key is required in the current scan direction, in the
1875 : * opposite scan direction _only_, or in neither direction
1876 : */
1877 64361046 : if (((key->sk_flags & SK_BT_REQFWD) && ScanDirectionIsForward(dir)) ||
1878 15169838 : ((key->sk_flags & SK_BT_REQBKWD) && ScanDirectionIsBackward(dir)))
1879 49208186 : requiredSameDir = true;
1880 15152860 : else if (((key->sk_flags & SK_BT_REQFWD) && ScanDirectionIsBackward(dir)) ||
1881 6853478 : ((key->sk_flags & SK_BT_REQBKWD) && ScanDirectionIsForward(dir)))
1882 14574086 : requiredOppositeDirOnly = true;
1883 :
1884 : /*
1885 : * If the caller told us the *continuescan flag is known to be true
1886 : * for the last item on the page, then we know the keys required for
1887 : * the current direction scan should be matched. Otherwise, the
1888 : * *continuescan flag would be set for the current item and
1889 : * subsequently the last item on the page accordingly.
1890 : *
1891 : * If the key is required for the opposite direction scan, we can skip
1892 : * the check if the caller tells us there was already at least one
1893 : * matching item on the page. Also, we require the *continuescan flag
1894 : * to be true for the last item on the page to know there are no
1895 : * NULLs.
1896 : *
1897 : * Both cases above work except for the row keys, where NULLs could be
1898 : * found in the middle of matching values.
1899 : */
1900 64361046 : if (prechecked &&
1901 1620968 : (requiredSameDir || (requiredOppositeDirOnly && firstmatch)) &&
1902 1527142 : !(key->sk_flags & SK_ROW_HEADER))
1903 19798930 : continue;
1904 :
1905 62833904 : if (key->sk_attno > tupnatts)
1906 : {
1907 : /*
1908 : * This attribute is truncated (must be high key). The value for
1909 : * this attribute in the first non-pivot tuple on the page to the
1910 : * right could be any possible value. Assume that truncated
1911 : * attribute passes the qual.
1912 : */
1913 : Assert(BTreeTupleIsPivot(tuple));
1914 2200 : continue;
1915 : }
1916 :
1917 : /* row-comparison keys need special processing */
1918 62831704 : if (key->sk_flags & SK_ROW_HEADER)
1919 : {
1920 2448 : if (_bt_check_rowcompare(key, tuple, tupnatts, tupdesc, dir,
1921 : continuescan))
1922 2382 : continue;
1923 11379374 : return false;
1924 : }
1925 :
1926 62829256 : datum = index_getattr(tuple,
1927 62829256 : key->sk_attno,
1928 : tupdesc,
1929 : &isNull);
1930 :
1931 62829256 : if (key->sk_flags & SK_ISNULL)
1932 : {
1933 : /* Handle IS NULL/NOT NULL tests */
1934 18315368 : if (key->sk_flags & SK_SEARCHNULL)
1935 : {
1936 48236 : if (isNull)
1937 164 : continue; /* tuple satisfies this qual */
1938 : }
1939 : else
1940 : {
1941 : Assert(key->sk_flags & SK_SEARCHNOTNULL);
1942 18267132 : if (!isNull)
1943 18267042 : continue; /* tuple satisfies this qual */
1944 : }
1945 :
1946 : /*
1947 : * Tuple fails this qual. If it's a required qual for the current
1948 : * scan direction, then we can conclude no further tuples will
1949 : * pass, either.
1950 : */
1951 48162 : if (requiredSameDir)
1952 36 : *continuescan = false;
1953 :
1954 : /*
1955 : * In any case, this indextuple doesn't match the qual.
1956 : */
1957 48162 : return false;
1958 : }
1959 :
1960 44513888 : if (isNull)
1961 : {
1962 150 : if (key->sk_flags & SK_BT_NULLS_FIRST)
1963 : {
1964 : /*
1965 : * Since NULLs are sorted before non-NULLs, we know we have
1966 : * reached the lower limit of the range of values for this
1967 : * index attr. On a backward scan, we can stop if this qual
1968 : * is one of the "must match" subset. We can stop regardless
1969 : * of whether the qual is > or <, so long as it's required,
1970 : * because it's not possible for any future tuples to pass. On
1971 : * a forward scan, however, we must keep going, because we may
1972 : * have initially positioned to the start of the index.
1973 : * (_bt_advance_array_keys also relies on this behavior during
1974 : * forward scans.)
1975 : */
1976 0 : if ((key->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)) &&
1977 : ScanDirectionIsBackward(dir))
1978 0 : *continuescan = false;
1979 : }
1980 : else
1981 : {
1982 : /*
1983 : * Since NULLs are sorted after non-NULLs, we know we have
1984 : * reached the upper limit of the range of values for this
1985 : * index attr. On a forward scan, we can stop if this qual is
1986 : * one of the "must match" subset. We can stop regardless of
1987 : * whether the qual is > or <, so long as it's required,
1988 : * because it's not possible for any future tuples to pass. On
1989 : * a backward scan, however, we must keep going, because we
1990 : * may have initially positioned to the end of the index.
1991 : * (_bt_advance_array_keys also relies on this behavior during
1992 : * backward scans.)
1993 : */
1994 150 : if ((key->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)) &&
1995 : ScanDirectionIsForward(dir))
1996 84 : *continuescan = false;
1997 : }
1998 :
1999 : /*
2000 : * In any case, this indextuple doesn't match the qual.
2001 : */
2002 150 : return false;
2003 : }
2004 :
2005 : /*
2006 : * Apply the key-checking function, though only if we must.
2007 : *
2008 : * When a key is required in the opposite-of-scan direction _only_,
2009 : * then it must already be satisfied if firstmatch=true indicates that
2010 : * an earlier tuple from this same page satisfied it earlier on.
2011 : */
2012 44513738 : if (!(requiredOppositeDirOnly && firstmatch) &&
2013 40796922 : !DatumGetBool(FunctionCall2Coll(&key->sk_func, key->sk_collation,
2014 : datum, key->sk_argument)))
2015 : {
2016 : /*
2017 : * Tuple fails this qual. If it's a required qual for the current
2018 : * scan direction, then we can conclude no further tuples will
2019 : * pass, either.
2020 : *
2021 : * Note: because we stop the scan as soon as any required equality
2022 : * qual fails, it is critical that equality quals be used for the
2023 : * initial positioning in _bt_first() when they are available. See
2024 : * comments in _bt_first().
2025 : */
2026 11330996 : if (requiredSameDir)
2027 10889910 : *continuescan = false;
2028 :
2029 : /*
2030 : * If this is a non-required equality-type array key, the tuple
2031 : * needs to be checked against every possible array key. Handle
2032 : * this by "advancing" the scan key's array to a matching value
2033 : * (if we're successful then the tuple might match the qual).
2034 : */
2035 441086 : else if (advancenonrequired &&
2036 378 : key->sk_strategy == BTEqualStrategyNumber &&
2037 264 : (key->sk_flags & SK_SEARCHARRAY))
2038 264 : return _bt_advance_array_keys(scan, NULL, tuple, tupnatts,
2039 : tupdesc, *ikey, false);
2040 :
2041 : /*
2042 : * This indextuple doesn't match the qual.
2043 : */
2044 11330732 : return false;
2045 : }
2046 : }
2047 :
2048 : /* If we get here, the tuple passes all index quals. */
2049 44956254 : return true;
2050 : }
2051 :
2052 : /*
2053 : * Test whether an indextuple satisfies a row-comparison scan condition.
2054 : *
2055 : * Return true if so, false if not. If not, also clear *continuescan if
2056 : * it's not possible for any future tuples in the current scan direction
2057 : * to pass the qual.
2058 : *
2059 : * This is a subroutine for _bt_checkkeys/_bt_check_compare.
2060 : */
2061 : static bool
2062 2448 : _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, int tupnatts,
2063 : TupleDesc tupdesc, ScanDirection dir, bool *continuescan)
2064 : {
2065 2448 : ScanKey subkey = (ScanKey) DatumGetPointer(skey->sk_argument);
2066 2448 : int32 cmpresult = 0;
2067 : bool result;
2068 :
2069 : /* First subkey should be same as the header says */
2070 : Assert(subkey->sk_attno == skey->sk_attno);
2071 :
2072 : /* Loop over columns of the row condition */
2073 : for (;;)
2074 240 : {
2075 : Datum datum;
2076 : bool isNull;
2077 :
2078 : Assert(subkey->sk_flags & SK_ROW_MEMBER);
2079 :
2080 2688 : if (subkey->sk_attno > tupnatts)
2081 : {
2082 : /*
2083 : * This attribute is truncated (must be high key). The value for
2084 : * this attribute in the first non-pivot tuple on the page to the
2085 : * right could be any possible value. Assume that truncated
2086 : * attribute passes the qual.
2087 : */
2088 : Assert(BTreeTupleIsPivot(tuple));
2089 6 : cmpresult = 0;
2090 6 : if (subkey->sk_flags & SK_ROW_END)
2091 6 : break;
2092 0 : subkey++;
2093 0 : continue;
2094 : }
2095 :
2096 2682 : datum = index_getattr(tuple,
2097 2682 : subkey->sk_attno,
2098 : tupdesc,
2099 : &isNull);
2100 :
2101 2682 : if (isNull)
2102 : {
2103 48 : if (subkey->sk_flags & SK_BT_NULLS_FIRST)
2104 : {
2105 : /*
2106 : * Since NULLs are sorted before non-NULLs, we know we have
2107 : * reached the lower limit of the range of values for this
2108 : * index attr. On a backward scan, we can stop if this qual
2109 : * is one of the "must match" subset. We can stop regardless
2110 : * of whether the qual is > or <, so long as it's required,
2111 : * because it's not possible for any future tuples to pass. On
2112 : * a forward scan, however, we must keep going, because we may
2113 : * have initially positioned to the start of the index.
2114 : * (_bt_advance_array_keys also relies on this behavior during
2115 : * forward scans.)
2116 : */
2117 0 : if ((subkey->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)) &&
2118 : ScanDirectionIsBackward(dir))
2119 0 : *continuescan = false;
2120 : }
2121 : else
2122 : {
2123 : /*
2124 : * Since NULLs are sorted after non-NULLs, we know we have
2125 : * reached the upper limit of the range of values for this
2126 : * index attr. On a forward scan, we can stop if this qual is
2127 : * one of the "must match" subset. We can stop regardless of
2128 : * whether the qual is > or <, so long as it's required,
2129 : * because it's not possible for any future tuples to pass. On
2130 : * a backward scan, however, we must keep going, because we
2131 : * may have initially positioned to the end of the index.
2132 : * (_bt_advance_array_keys also relies on this behavior during
2133 : * backward scans.)
2134 : */
2135 48 : if ((subkey->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)) &&
2136 : ScanDirectionIsForward(dir))
2137 0 : *continuescan = false;
2138 : }
2139 :
2140 : /*
2141 : * In any case, this indextuple doesn't match the qual.
2142 : */
2143 60 : return false;
2144 : }
2145 :
2146 2634 : if (subkey->sk_flags & SK_ISNULL)
2147 : {
2148 : /*
2149 : * Unlike the simple-scankey case, this isn't a disallowed case
2150 : * (except when it's the first row element that has the NULL arg).
2151 : * But it can never match. If all the earlier row comparison
2152 : * columns are required for the scan direction, we can stop the
2153 : * scan, because there can't be another tuple that will succeed.
2154 : */
2155 : Assert(subkey != (ScanKey) DatumGetPointer(skey->sk_argument));
2156 12 : subkey--;
2157 12 : if ((subkey->sk_flags & SK_BT_REQFWD) &&
2158 : ScanDirectionIsForward(dir))
2159 6 : *continuescan = false;
2160 6 : else if ((subkey->sk_flags & SK_BT_REQBKWD) &&
2161 : ScanDirectionIsBackward(dir))
2162 6 : *continuescan = false;
2163 12 : return false;
2164 : }
2165 :
2166 : /* Perform the test --- three-way comparison not bool operator */
2167 2622 : cmpresult = DatumGetInt32(FunctionCall2Coll(&subkey->sk_func,
2168 : subkey->sk_collation,
2169 : datum,
2170 : subkey->sk_argument));
2171 :
2172 2622 : if (subkey->sk_flags & SK_BT_DESC)
2173 0 : INVERT_COMPARE_RESULT(cmpresult);
2174 :
2175 : /* Done comparing if unequal, else advance to next column */
2176 2622 : if (cmpresult != 0)
2177 2382 : break;
2178 :
2179 240 : if (subkey->sk_flags & SK_ROW_END)
2180 0 : break;
2181 240 : subkey++;
2182 : }
2183 :
2184 : /*
2185 : * At this point cmpresult indicates the overall result of the row
2186 : * comparison, and subkey points to the deciding column (or the last
2187 : * column if the result is "=").
2188 : */
2189 2388 : switch (subkey->sk_strategy)
2190 : {
2191 : /* EQ and NE cases aren't allowed here */
2192 186 : case BTLessStrategyNumber:
2193 186 : result = (cmpresult < 0);
2194 186 : break;
2195 1590 : case BTLessEqualStrategyNumber:
2196 1590 : result = (cmpresult <= 0);
2197 1590 : break;
2198 240 : case BTGreaterEqualStrategyNumber:
2199 240 : result = (cmpresult >= 0);
2200 240 : break;
2201 372 : case BTGreaterStrategyNumber:
2202 372 : result = (cmpresult > 0);
2203 372 : break;
2204 0 : default:
2205 0 : elog(ERROR, "unexpected strategy number %d", subkey->sk_strategy);
2206 : result = 0; /* keep compiler quiet */
2207 : break;
2208 : }
2209 :
2210 2388 : if (!result)
2211 : {
2212 : /*
2213 : * Tuple fails this qual. If it's a required qual for the current
2214 : * scan direction, then we can conclude no further tuples will pass,
2215 : * either. Note we have to look at the deciding column, not
2216 : * necessarily the first or last column of the row condition.
2217 : */
2218 6 : if ((subkey->sk_flags & SK_BT_REQFWD) &&
2219 : ScanDirectionIsForward(dir))
2220 6 : *continuescan = false;
2221 0 : else if ((subkey->sk_flags & SK_BT_REQBKWD) &&
2222 : ScanDirectionIsBackward(dir))
2223 0 : *continuescan = false;
2224 : }
2225 :
2226 2388 : return result;
2227 : }
2228 :
2229 : /*
2230 : * Determine if a scan with array keys should skip over uninteresting tuples.
2231 : *
2232 : * This is a subroutine for _bt_checkkeys. Called when _bt_readpage's linear
2233 : * search process (started after it finishes reading an initial group of
2234 : * matching tuples, used to locate the start of the next group of tuples
2235 : * matching the next set of required array keys) has already scanned an
2236 : * excessive number of tuples whose key space is "between arrays".
2237 : *
2238 : * When we perform look ahead successfully, we'll sets pstate.skip, which
2239 : * instructs _bt_readpage to skip ahead to that tuple next (could be past the
2240 : * end of the scan's leaf page). Pages where the optimization is effective
2241 : * will generally still need to skip several times. Each call here performs
2242 : * only a single "look ahead" comparison of a later tuple, whose distance from
2243 : * the current tuple's offset number is determined by applying heuristics.
2244 : */
2245 : static void
2246 2262 : _bt_checkkeys_look_ahead(IndexScanDesc scan, BTReadPageState *pstate,
2247 : int tupnatts, TupleDesc tupdesc)
2248 : {
2249 2262 : BTScanOpaque so = (BTScanOpaque) scan->opaque;
2250 2262 : ScanDirection dir = so->currPos.dir;
2251 : OffsetNumber aheadoffnum;
2252 : IndexTuple ahead;
2253 :
2254 : /* Avoid looking ahead when comparing the page high key */
2255 2262 : if (pstate->offnum < pstate->minoff)
2256 0 : return;
2257 :
2258 : /*
2259 : * Don't look ahead when there aren't enough tuples remaining on the page
2260 : * (in the current scan direction) for it to be worth our while
2261 : */
2262 2262 : if (ScanDirectionIsForward(dir) &&
2263 2232 : pstate->offnum >= pstate->maxoff - LOOK_AHEAD_DEFAULT_DISTANCE)
2264 36 : return;
2265 2226 : else if (ScanDirectionIsBackward(dir) &&
2266 30 : pstate->offnum <= pstate->minoff + LOOK_AHEAD_DEFAULT_DISTANCE)
2267 18 : return;
2268 :
2269 : /*
2270 : * The look ahead distance starts small, and ramps up as each call here
2271 : * allows _bt_readpage to skip over more tuples
2272 : */
2273 2208 : if (!pstate->targetdistance)
2274 724 : pstate->targetdistance = LOOK_AHEAD_DEFAULT_DISTANCE;
2275 1484 : else if (pstate->targetdistance < MaxIndexTuplesPerPage / 2)
2276 1484 : pstate->targetdistance *= 2;
2277 :
2278 : /* Don't read past the end (or before the start) of the page, though */
2279 2208 : if (ScanDirectionIsForward(dir))
2280 2196 : aheadoffnum = Min((int) pstate->maxoff,
2281 : (int) pstate->offnum + pstate->targetdistance);
2282 : else
2283 12 : aheadoffnum = Max((int) pstate->minoff,
2284 : (int) pstate->offnum - pstate->targetdistance);
2285 :
2286 2208 : ahead = (IndexTuple) PageGetItem(pstate->page,
2287 2208 : PageGetItemId(pstate->page, aheadoffnum));
2288 2208 : if (_bt_tuple_before_array_skeys(scan, dir, ahead, tupdesc, tupnatts,
2289 : false, 0, NULL))
2290 : {
2291 : /*
2292 : * Success -- instruct _bt_readpage to skip ahead to very next tuple
2293 : * after the one we determined was still before the current array keys
2294 : */
2295 1320 : if (ScanDirectionIsForward(dir))
2296 1308 : pstate->skip = aheadoffnum + 1;
2297 : else
2298 12 : pstate->skip = aheadoffnum - 1;
2299 : }
2300 : else
2301 : {
2302 : /*
2303 : * Failure -- "ahead" tuple is too far ahead (we were too aggressive).
2304 : *
2305 : * Reset the number of rechecks, and aggressively reduce the target
2306 : * distance (we're much more aggressive here than we were when the
2307 : * distance was initially ramped up).
2308 : */
2309 888 : pstate->rechecks = 0;
2310 888 : pstate->targetdistance = Max(pstate->targetdistance / 8, 1);
2311 : }
2312 : }
2313 :
2314 : /*
2315 : * _bt_killitems - set LP_DEAD state for items an indexscan caller has
2316 : * told us were killed
2317 : *
2318 : * scan->opaque, referenced locally through so, contains information about the
2319 : * current page and killed tuples thereon (generally, this should only be
2320 : * called if so->numKilled > 0).
2321 : *
2322 : * The caller does not have a lock on the page and may or may not have the
2323 : * page pinned in a buffer. Note that read-lock is sufficient for setting
2324 : * LP_DEAD status (which is only a hint).
2325 : *
2326 : * We match items by heap TID before assuming they are the right ones to
2327 : * delete. We cope with cases where items have moved right due to insertions.
2328 : * If an item has moved off the current page due to a split, we'll fail to
2329 : * find it and do nothing (this is not an error case --- we assume the item
2330 : * will eventually get marked in a future indexscan).
2331 : *
2332 : * Note that if we hold a pin on the target page continuously from initially
2333 : * reading the items until applying this function, VACUUM cannot have deleted
2334 : * any items from the page, and so there is no need to search left from the
2335 : * recorded offset. (This observation also guarantees that the item is still
2336 : * the right one to delete, which might otherwise be questionable since heap
2337 : * TIDs can get recycled.) This holds true even if the page has been modified
2338 : * by inserts and page splits, so there is no need to consult the LSN.
2339 : *
2340 : * If the pin was released after reading the page, then we re-read it. If it
2341 : * has been modified since we read it (as determined by the LSN), we dare not
2342 : * flag any entries because it is possible that the old entry was vacuumed
2343 : * away and the TID was re-used by a completely different heap tuple.
2344 : */
2345 : void
2346 162912 : _bt_killitems(IndexScanDesc scan)
2347 : {
2348 162912 : BTScanOpaque so = (BTScanOpaque) scan->opaque;
2349 : Page page;
2350 : BTPageOpaque opaque;
2351 : OffsetNumber minoff;
2352 : OffsetNumber maxoff;
2353 : int i;
2354 162912 : int numKilled = so->numKilled;
2355 162912 : bool killedsomething = false;
2356 : bool droppedpin PG_USED_FOR_ASSERTS_ONLY;
2357 :
2358 : Assert(BTScanPosIsValid(so->currPos));
2359 :
2360 : /*
2361 : * Always reset the scan state, so we don't look for same items on other
2362 : * pages.
2363 : */
2364 162912 : so->numKilled = 0;
2365 :
2366 162912 : if (BTScanPosIsPinned(so->currPos))
2367 : {
2368 : /*
2369 : * We have held the pin on this page since we read the index tuples,
2370 : * so all we need to do is lock it. The pin will have prevented
2371 : * re-use of any TID on the page, so there is no need to check the
2372 : * LSN.
2373 : */
2374 34736 : droppedpin = false;
2375 34736 : _bt_lockbuf(scan->indexRelation, so->currPos.buf, BT_READ);
2376 :
2377 34736 : page = BufferGetPage(so->currPos.buf);
2378 : }
2379 : else
2380 : {
2381 : Buffer buf;
2382 :
2383 128176 : droppedpin = true;
2384 : /* Attempt to re-read the buffer, getting pin and lock. */
2385 128176 : buf = _bt_getbuf(scan->indexRelation, so->currPos.currPage, BT_READ);
2386 :
2387 128176 : page = BufferGetPage(buf);
2388 128176 : if (BufferGetLSNAtomic(buf) == so->currPos.lsn)
2389 128026 : so->currPos.buf = buf;
2390 : else
2391 : {
2392 : /* Modified while not pinned means hinting is not safe. */
2393 150 : _bt_relbuf(scan->indexRelation, buf);
2394 150 : return;
2395 : }
2396 : }
2397 :
2398 162762 : opaque = BTPageGetOpaque(page);
2399 162762 : minoff = P_FIRSTDATAKEY(opaque);
2400 162762 : maxoff = PageGetMaxOffsetNumber(page);
2401 :
2402 630152 : for (i = 0; i < numKilled; i++)
2403 : {
2404 467390 : int itemIndex = so->killedItems[i];
2405 467390 : BTScanPosItem *kitem = &so->currPos.items[itemIndex];
2406 467390 : OffsetNumber offnum = kitem->indexOffset;
2407 :
2408 : Assert(itemIndex >= so->currPos.firstItem &&
2409 : itemIndex <= so->currPos.lastItem);
2410 467390 : if (offnum < minoff)
2411 0 : continue; /* pure paranoia */
2412 8053174 : while (offnum <= maxoff)
2413 : {
2414 7989352 : ItemId iid = PageGetItemId(page, offnum);
2415 7989352 : IndexTuple ituple = (IndexTuple) PageGetItem(page, iid);
2416 7989352 : bool killtuple = false;
2417 :
2418 7989352 : if (BTreeTupleIsPosting(ituple))
2419 : {
2420 2654048 : int pi = i + 1;
2421 2654048 : int nposting = BTreeTupleGetNPosting(ituple);
2422 : int j;
2423 :
2424 : /*
2425 : * We rely on the convention that heap TIDs in the scanpos
2426 : * items array are stored in ascending heap TID order for a
2427 : * group of TIDs that originally came from a posting list
2428 : * tuple. This convention even applies during backwards
2429 : * scans, where returning the TIDs in descending order might
2430 : * seem more natural. This is about effectiveness, not
2431 : * correctness.
2432 : *
2433 : * Note that the page may have been modified in almost any way
2434 : * since we first read it (in the !droppedpin case), so it's
2435 : * possible that this posting list tuple wasn't a posting list
2436 : * tuple when we first encountered its heap TIDs.
2437 : */
2438 2718704 : for (j = 0; j < nposting; j++)
2439 : {
2440 2716266 : ItemPointer item = BTreeTupleGetPostingN(ituple, j);
2441 :
2442 2716266 : if (!ItemPointerEquals(item, &kitem->heapTid))
2443 2651610 : break; /* out of posting list loop */
2444 :
2445 : /*
2446 : * kitem must have matching offnum when heap TIDs match,
2447 : * though only in the common case where the page can't
2448 : * have been concurrently modified
2449 : */
2450 : Assert(kitem->indexOffset == offnum || !droppedpin);
2451 :
2452 : /*
2453 : * Read-ahead to later kitems here.
2454 : *
2455 : * We rely on the assumption that not advancing kitem here
2456 : * will prevent us from considering the posting list tuple
2457 : * fully dead by not matching its next heap TID in next
2458 : * loop iteration.
2459 : *
2460 : * If, on the other hand, this is the final heap TID in
2461 : * the posting list tuple, then tuple gets killed
2462 : * regardless (i.e. we handle the case where the last
2463 : * kitem is also the last heap TID in the last index tuple
2464 : * correctly -- posting tuple still gets killed).
2465 : */
2466 64656 : if (pi < numKilled)
2467 33928 : kitem = &so->currPos.items[so->killedItems[pi++]];
2468 : }
2469 :
2470 : /*
2471 : * Don't bother advancing the outermost loop's int iterator to
2472 : * avoid processing killed items that relate to the same
2473 : * offnum/posting list tuple. This micro-optimization hardly
2474 : * seems worth it. (Further iterations of the outermost loop
2475 : * will fail to match on this same posting list's first heap
2476 : * TID instead, so we'll advance to the next offnum/index
2477 : * tuple pretty quickly.)
2478 : */
2479 2654048 : if (j == nposting)
2480 2438 : killtuple = true;
2481 : }
2482 5335304 : else if (ItemPointerEquals(&ituple->t_tid, &kitem->heapTid))
2483 402778 : killtuple = true;
2484 :
2485 : /*
2486 : * Mark index item as dead, if it isn't already. Since this
2487 : * happens while holding a buffer lock possibly in shared mode,
2488 : * it's possible that multiple processes attempt to do this
2489 : * simultaneously, leading to multiple full-page images being sent
2490 : * to WAL (if wal_log_hints or data checksums are enabled), which
2491 : * is undesirable.
2492 : */
2493 7989352 : if (killtuple && !ItemIdIsDead(iid))
2494 : {
2495 : /* found the item/all posting list items */
2496 403568 : ItemIdMarkDead(iid);
2497 403568 : killedsomething = true;
2498 403568 : break; /* out of inner search loop */
2499 : }
2500 7585784 : offnum = OffsetNumberNext(offnum);
2501 : }
2502 : }
2503 :
2504 : /*
2505 : * Since this can be redone later if needed, mark as dirty hint.
2506 : *
2507 : * Whenever we mark anything LP_DEAD, we also set the page's
2508 : * BTP_HAS_GARBAGE flag, which is likewise just a hint. (Note that we
2509 : * only rely on the page-level flag in !heapkeyspace indexes.)
2510 : */
2511 162762 : if (killedsomething)
2512 : {
2513 130680 : opaque->btpo_flags |= BTP_HAS_GARBAGE;
2514 130680 : MarkBufferDirtyHint(so->currPos.buf, true);
2515 : }
2516 :
2517 162762 : _bt_unlockbuf(scan->indexRelation, so->currPos.buf);
2518 : }
2519 :
2520 :
2521 : /*
2522 : * The following routines manage a shared-memory area in which we track
2523 : * assignment of "vacuum cycle IDs" to currently-active btree vacuuming
2524 : * operations. There is a single counter which increments each time we
2525 : * start a vacuum to assign it a cycle ID. Since multiple vacuums could
2526 : * be active concurrently, we have to track the cycle ID for each active
2527 : * vacuum; this requires at most MaxBackends entries (usually far fewer).
2528 : * We assume at most one vacuum can be active for a given index.
2529 : *
2530 : * Access to the shared memory area is controlled by BtreeVacuumLock.
2531 : * In principle we could use a separate lmgr locktag for each index,
2532 : * but a single LWLock is much cheaper, and given the short time that
2533 : * the lock is ever held, the concurrency hit should be minimal.
2534 : */
2535 :
2536 : typedef struct BTOneVacInfo
2537 : {
2538 : LockRelId relid; /* global identifier of an index */
2539 : BTCycleId cycleid; /* cycle ID for its active VACUUM */
2540 : } BTOneVacInfo;
2541 :
2542 : typedef struct BTVacInfo
2543 : {
2544 : BTCycleId cycle_ctr; /* cycle ID most recently assigned */
2545 : int num_vacuums; /* number of currently active VACUUMs */
2546 : int max_vacuums; /* allocated length of vacuums[] array */
2547 : BTOneVacInfo vacuums[FLEXIBLE_ARRAY_MEMBER];
2548 : } BTVacInfo;
2549 :
2550 : static BTVacInfo *btvacinfo;
2551 :
2552 :
2553 : /*
2554 : * _bt_vacuum_cycleid --- get the active vacuum cycle ID for an index,
2555 : * or zero if there is no active VACUUM
2556 : *
2557 : * Note: for correct interlocking, the caller must already hold pin and
2558 : * exclusive lock on each buffer it will store the cycle ID into. This
2559 : * ensures that even if a VACUUM starts immediately afterwards, it cannot
2560 : * process those pages until the page split is complete.
2561 : */
2562 : BTCycleId
2563 21690 : _bt_vacuum_cycleid(Relation rel)
2564 : {
2565 21690 : BTCycleId result = 0;
2566 : int i;
2567 :
2568 : /* Share lock is enough since this is a read-only operation */
2569 21690 : LWLockAcquire(BtreeVacuumLock, LW_SHARED);
2570 :
2571 21690 : for (i = 0; i < btvacinfo->num_vacuums; i++)
2572 : {
2573 2 : BTOneVacInfo *vac = &btvacinfo->vacuums[i];
2574 :
2575 2 : if (vac->relid.relId == rel->rd_lockInfo.lockRelId.relId &&
2576 2 : vac->relid.dbId == rel->rd_lockInfo.lockRelId.dbId)
2577 : {
2578 2 : result = vac->cycleid;
2579 2 : break;
2580 : }
2581 : }
2582 :
2583 21690 : LWLockRelease(BtreeVacuumLock);
2584 21690 : return result;
2585 : }
2586 :
2587 : /*
2588 : * _bt_start_vacuum --- assign a cycle ID to a just-starting VACUUM operation
2589 : *
2590 : * Note: the caller must guarantee that it will eventually call
2591 : * _bt_end_vacuum, else we'll permanently leak an array slot. To ensure
2592 : * that this happens even in elog(FATAL) scenarios, the appropriate coding
2593 : * is not just a PG_TRY, but
2594 : * PG_ENSURE_ERROR_CLEANUP(_bt_end_vacuum_callback, PointerGetDatum(rel))
2595 : */
2596 : BTCycleId
2597 2684 : _bt_start_vacuum(Relation rel)
2598 : {
2599 : BTCycleId result;
2600 : int i;
2601 : BTOneVacInfo *vac;
2602 :
2603 2684 : LWLockAcquire(BtreeVacuumLock, LW_EXCLUSIVE);
2604 :
2605 : /*
2606 : * Assign the next cycle ID, being careful to avoid zero as well as the
2607 : * reserved high values.
2608 : */
2609 2684 : result = ++(btvacinfo->cycle_ctr);
2610 2684 : if (result == 0 || result > MAX_BT_CYCLE_ID)
2611 30 : result = btvacinfo->cycle_ctr = 1;
2612 :
2613 : /* Let's just make sure there's no entry already for this index */
2614 2684 : for (i = 0; i < btvacinfo->num_vacuums; i++)
2615 : {
2616 0 : vac = &btvacinfo->vacuums[i];
2617 0 : if (vac->relid.relId == rel->rd_lockInfo.lockRelId.relId &&
2618 0 : vac->relid.dbId == rel->rd_lockInfo.lockRelId.dbId)
2619 : {
2620 : /*
2621 : * Unlike most places in the backend, we have to explicitly
2622 : * release our LWLock before throwing an error. This is because
2623 : * we expect _bt_end_vacuum() to be called before transaction
2624 : * abort cleanup can run to release LWLocks.
2625 : */
2626 0 : LWLockRelease(BtreeVacuumLock);
2627 0 : elog(ERROR, "multiple active vacuums for index \"%s\"",
2628 : RelationGetRelationName(rel));
2629 : }
2630 : }
2631 :
2632 : /* OK, add an entry */
2633 2684 : if (btvacinfo->num_vacuums >= btvacinfo->max_vacuums)
2634 : {
2635 0 : LWLockRelease(BtreeVacuumLock);
2636 0 : elog(ERROR, "out of btvacinfo slots");
2637 : }
2638 2684 : vac = &btvacinfo->vacuums[btvacinfo->num_vacuums];
2639 2684 : vac->relid = rel->rd_lockInfo.lockRelId;
2640 2684 : vac->cycleid = result;
2641 2684 : btvacinfo->num_vacuums++;
2642 :
2643 2684 : LWLockRelease(BtreeVacuumLock);
2644 2684 : return result;
2645 : }
2646 :
2647 : /*
2648 : * _bt_end_vacuum --- mark a btree VACUUM operation as done
2649 : *
2650 : * Note: this is deliberately coded not to complain if no entry is found;
2651 : * this allows the caller to put PG_TRY around the start_vacuum operation.
2652 : */
2653 : void
2654 2684 : _bt_end_vacuum(Relation rel)
2655 : {
2656 : int i;
2657 :
2658 2684 : LWLockAcquire(BtreeVacuumLock, LW_EXCLUSIVE);
2659 :
2660 : /* Find the array entry */
2661 2684 : for (i = 0; i < btvacinfo->num_vacuums; i++)
2662 : {
2663 2684 : BTOneVacInfo *vac = &btvacinfo->vacuums[i];
2664 :
2665 2684 : if (vac->relid.relId == rel->rd_lockInfo.lockRelId.relId &&
2666 2684 : vac->relid.dbId == rel->rd_lockInfo.lockRelId.dbId)
2667 : {
2668 : /* Remove it by shifting down the last entry */
2669 2684 : *vac = btvacinfo->vacuums[btvacinfo->num_vacuums - 1];
2670 2684 : btvacinfo->num_vacuums--;
2671 2684 : break;
2672 : }
2673 : }
2674 :
2675 2684 : LWLockRelease(BtreeVacuumLock);
2676 2684 : }
2677 :
2678 : /*
2679 : * _bt_end_vacuum wrapped as an on_shmem_exit callback function
2680 : */
2681 : void
2682 0 : _bt_end_vacuum_callback(int code, Datum arg)
2683 : {
2684 0 : _bt_end_vacuum((Relation) DatumGetPointer(arg));
2685 0 : }
2686 :
2687 : /*
2688 : * BTreeShmemSize --- report amount of shared memory space needed
2689 : */
2690 : Size
2691 5826 : BTreeShmemSize(void)
2692 : {
2693 : Size size;
2694 :
2695 5826 : size = offsetof(BTVacInfo, vacuums);
2696 5826 : size = add_size(size, mul_size(MaxBackends, sizeof(BTOneVacInfo)));
2697 5826 : return size;
2698 : }
2699 :
2700 : /*
2701 : * BTreeShmemInit --- initialize this module's shared memory
2702 : */
2703 : void
2704 2032 : BTreeShmemInit(void)
2705 : {
2706 : bool found;
2707 :
2708 2032 : btvacinfo = (BTVacInfo *) ShmemInitStruct("BTree Vacuum State",
2709 : BTreeShmemSize(),
2710 : &found);
2711 :
2712 2032 : if (!IsUnderPostmaster)
2713 : {
2714 : /* Initialize shared memory area */
2715 : Assert(!found);
2716 :
2717 : /*
2718 : * It doesn't really matter what the cycle counter starts at, but
2719 : * having it always start the same doesn't seem good. Seed with
2720 : * low-order bits of time() instead.
2721 : */
2722 2032 : btvacinfo->cycle_ctr = (BTCycleId) time(NULL);
2723 :
2724 2032 : btvacinfo->num_vacuums = 0;
2725 2032 : btvacinfo->max_vacuums = MaxBackends;
2726 : }
2727 : else
2728 : Assert(found);
2729 2032 : }
2730 :
2731 : bytea *
2732 322 : btoptions(Datum reloptions, bool validate)
2733 : {
2734 : static const relopt_parse_elt tab[] = {
2735 : {"fillfactor", RELOPT_TYPE_INT, offsetof(BTOptions, fillfactor)},
2736 : {"vacuum_cleanup_index_scale_factor", RELOPT_TYPE_REAL,
2737 : offsetof(BTOptions, vacuum_cleanup_index_scale_factor)},
2738 : {"deduplicate_items", RELOPT_TYPE_BOOL,
2739 : offsetof(BTOptions, deduplicate_items)}
2740 : };
2741 :
2742 322 : return (bytea *) build_reloptions(reloptions, validate,
2743 : RELOPT_KIND_BTREE,
2744 : sizeof(BTOptions),
2745 : tab, lengthof(tab));
2746 : }
2747 :
2748 : /*
2749 : * btproperty() -- Check boolean properties of indexes.
2750 : *
2751 : * This is optional, but handling AMPROP_RETURNABLE here saves opening the rel
2752 : * to call btcanreturn.
2753 : */
2754 : bool
2755 756 : btproperty(Oid index_oid, int attno,
2756 : IndexAMProperty prop, const char *propname,
2757 : bool *res, bool *isnull)
2758 : {
2759 756 : switch (prop)
2760 : {
2761 42 : case AMPROP_RETURNABLE:
2762 : /* answer only for columns, not AM or whole index */
2763 42 : if (attno == 0)
2764 12 : return false;
2765 : /* otherwise, btree can always return data */
2766 30 : *res = true;
2767 30 : return true;
2768 :
2769 714 : default:
2770 714 : return false; /* punt to generic code */
2771 : }
2772 : }
2773 :
2774 : /*
2775 : * btbuildphasename() -- Return name of index build phase.
2776 : */
2777 : char *
2778 0 : btbuildphasename(int64 phasenum)
2779 : {
2780 0 : switch (phasenum)
2781 : {
2782 0 : case PROGRESS_CREATEIDX_SUBPHASE_INITIALIZE:
2783 0 : return "initializing";
2784 0 : case PROGRESS_BTREE_PHASE_INDEXBUILD_TABLESCAN:
2785 0 : return "scanning table";
2786 0 : case PROGRESS_BTREE_PHASE_PERFORMSORT_1:
2787 0 : return "sorting live tuples";
2788 0 : case PROGRESS_BTREE_PHASE_PERFORMSORT_2:
2789 0 : return "sorting dead tuples";
2790 0 : case PROGRESS_BTREE_PHASE_LEAF_LOAD:
2791 0 : return "loading tuples in tree";
2792 0 : default:
2793 0 : return NULL;
2794 : }
2795 : }
2796 :
2797 : /*
2798 : * _bt_truncate() -- create tuple without unneeded suffix attributes.
2799 : *
2800 : * Returns truncated pivot index tuple allocated in caller's memory context,
2801 : * with key attributes copied from caller's firstright argument. If rel is
2802 : * an INCLUDE index, non-key attributes will definitely be truncated away,
2803 : * since they're not part of the key space. More aggressive suffix
2804 : * truncation can take place when it's clear that the returned tuple does not
2805 : * need one or more suffix key attributes. We only need to keep firstright
2806 : * attributes up to and including the first non-lastleft-equal attribute.
2807 : * Caller's insertion scankey is used to compare the tuples; the scankey's
2808 : * argument values are not considered here.
2809 : *
2810 : * Note that returned tuple's t_tid offset will hold the number of attributes
2811 : * present, so the original item pointer offset is not represented. Caller
2812 : * should only change truncated tuple's downlink. Note also that truncated
2813 : * key attributes are treated as containing "minus infinity" values by
2814 : * _bt_compare().
2815 : *
2816 : * In the worst case (when a heap TID must be appended to distinguish lastleft
2817 : * from firstright), the size of the returned tuple is the size of firstright
2818 : * plus the size of an additional MAXALIGN()'d item pointer. This guarantee
2819 : * is important, since callers need to stay under the 1/3 of a page
2820 : * restriction on tuple size. If this routine is ever taught to truncate
2821 : * within an attribute/datum, it will need to avoid returning an enlarged
2822 : * tuple to caller when truncation + TOAST compression ends up enlarging the
2823 : * final datum.
2824 : */
2825 : IndexTuple
2826 60528 : _bt_truncate(Relation rel, IndexTuple lastleft, IndexTuple firstright,
2827 : BTScanInsert itup_key)
2828 : {
2829 60528 : TupleDesc itupdesc = RelationGetDescr(rel);
2830 60528 : int16 nkeyatts = IndexRelationGetNumberOfKeyAttributes(rel);
2831 : int keepnatts;
2832 : IndexTuple pivot;
2833 : IndexTuple tidpivot;
2834 : ItemPointer pivotheaptid;
2835 : Size newsize;
2836 :
2837 : /*
2838 : * We should only ever truncate non-pivot tuples from leaf pages. It's
2839 : * never okay to truncate when splitting an internal page.
2840 : */
2841 : Assert(!BTreeTupleIsPivot(lastleft) && !BTreeTupleIsPivot(firstright));
2842 :
2843 : /* Determine how many attributes must be kept in truncated tuple */
2844 60528 : keepnatts = _bt_keep_natts(rel, lastleft, firstright, itup_key);
2845 :
2846 : #ifdef DEBUG_NO_TRUNCATE
2847 : /* Force truncation to be ineffective for testing purposes */
2848 : keepnatts = nkeyatts + 1;
2849 : #endif
2850 :
2851 60528 : pivot = index_truncate_tuple(itupdesc, firstright,
2852 : Min(keepnatts, nkeyatts));
2853 :
2854 60528 : if (BTreeTupleIsPosting(pivot))
2855 : {
2856 : /*
2857 : * index_truncate_tuple() just returns a straight copy of firstright
2858 : * when it has no attributes to truncate. When that happens, we may
2859 : * need to truncate away a posting list here instead.
2860 : */
2861 : Assert(keepnatts == nkeyatts || keepnatts == nkeyatts + 1);
2862 : Assert(IndexRelationGetNumberOfAttributes(rel) == nkeyatts);
2863 1308 : pivot->t_info &= ~INDEX_SIZE_MASK;
2864 1308 : pivot->t_info |= MAXALIGN(BTreeTupleGetPostingOffset(firstright));
2865 : }
2866 :
2867 : /*
2868 : * If there is a distinguishing key attribute within pivot tuple, we're
2869 : * done
2870 : */
2871 60528 : if (keepnatts <= nkeyatts)
2872 : {
2873 59432 : BTreeTupleSetNAtts(pivot, keepnatts, false);
2874 59432 : return pivot;
2875 : }
2876 :
2877 : /*
2878 : * We have to store a heap TID in the new pivot tuple, since no non-TID
2879 : * key attribute value in firstright distinguishes the right side of the
2880 : * split from the left side. nbtree conceptualizes this case as an
2881 : * inability to truncate away any key attributes, since heap TID is
2882 : * treated as just another key attribute (despite lacking a pg_attribute
2883 : * entry).
2884 : *
2885 : * Use enlarged space that holds a copy of pivot. We need the extra space
2886 : * to store a heap TID at the end (using the special pivot tuple
2887 : * representation). Note that the original pivot already has firstright's
2888 : * possible posting list/non-key attribute values removed at this point.
2889 : */
2890 1096 : newsize = MAXALIGN(IndexTupleSize(pivot)) + MAXALIGN(sizeof(ItemPointerData));
2891 1096 : tidpivot = palloc0(newsize);
2892 1096 : memcpy(tidpivot, pivot, MAXALIGN(IndexTupleSize(pivot)));
2893 : /* Cannot leak memory here */
2894 1096 : pfree(pivot);
2895 :
2896 : /*
2897 : * Store all of firstright's key attribute values plus a tiebreaker heap
2898 : * TID value in enlarged pivot tuple
2899 : */
2900 1096 : tidpivot->t_info &= ~INDEX_SIZE_MASK;
2901 1096 : tidpivot->t_info |= newsize;
2902 1096 : BTreeTupleSetNAtts(tidpivot, nkeyatts, true);
2903 1096 : pivotheaptid = BTreeTupleGetHeapTID(tidpivot);
2904 :
2905 : /*
2906 : * Lehman & Yao use lastleft as the leaf high key in all cases, but don't
2907 : * consider suffix truncation. It seems like a good idea to follow that
2908 : * example in cases where no truncation takes place -- use lastleft's heap
2909 : * TID. (This is also the closest value to negative infinity that's
2910 : * legally usable.)
2911 : */
2912 1096 : ItemPointerCopy(BTreeTupleGetMaxHeapTID(lastleft), pivotheaptid);
2913 :
2914 : /*
2915 : * We're done. Assert() that heap TID invariants hold before returning.
2916 : *
2917 : * Lehman and Yao require that the downlink to the right page, which is to
2918 : * be inserted into the parent page in the second phase of a page split be
2919 : * a strict lower bound on items on the right page, and a non-strict upper
2920 : * bound for items on the left page. Assert that heap TIDs follow these
2921 : * invariants, since a heap TID value is apparently needed as a
2922 : * tiebreaker.
2923 : */
2924 : #ifndef DEBUG_NO_TRUNCATE
2925 : Assert(ItemPointerCompare(BTreeTupleGetMaxHeapTID(lastleft),
2926 : BTreeTupleGetHeapTID(firstright)) < 0);
2927 : Assert(ItemPointerCompare(pivotheaptid,
2928 : BTreeTupleGetHeapTID(lastleft)) >= 0);
2929 : Assert(ItemPointerCompare(pivotheaptid,
2930 : BTreeTupleGetHeapTID(firstright)) < 0);
2931 : #else
2932 :
2933 : /*
2934 : * Those invariants aren't guaranteed to hold for lastleft + firstright
2935 : * heap TID attribute values when they're considered here only because
2936 : * DEBUG_NO_TRUNCATE is defined (a heap TID is probably not actually
2937 : * needed as a tiebreaker). DEBUG_NO_TRUNCATE must therefore use a heap
2938 : * TID value that always works as a strict lower bound for items to the
2939 : * right. In particular, it must avoid using firstright's leading key
2940 : * attribute values along with lastleft's heap TID value when lastleft's
2941 : * TID happens to be greater than firstright's TID.
2942 : */
2943 : ItemPointerCopy(BTreeTupleGetHeapTID(firstright), pivotheaptid);
2944 :
2945 : /*
2946 : * Pivot heap TID should never be fully equal to firstright. Note that
2947 : * the pivot heap TID will still end up equal to lastleft's heap TID when
2948 : * that's the only usable value.
2949 : */
2950 : ItemPointerSetOffsetNumber(pivotheaptid,
2951 : OffsetNumberPrev(ItemPointerGetOffsetNumber(pivotheaptid)));
2952 : Assert(ItemPointerCompare(pivotheaptid,
2953 : BTreeTupleGetHeapTID(firstright)) < 0);
2954 : #endif
2955 :
2956 1096 : return tidpivot;
2957 : }
2958 :
2959 : /*
2960 : * _bt_keep_natts - how many key attributes to keep when truncating.
2961 : *
2962 : * Caller provides two tuples that enclose a split point. Caller's insertion
2963 : * scankey is used to compare the tuples; the scankey's argument values are
2964 : * not considered here.
2965 : *
2966 : * This can return a number of attributes that is one greater than the
2967 : * number of key attributes for the index relation. This indicates that the
2968 : * caller must use a heap TID as a unique-ifier in new pivot tuple.
2969 : */
2970 : static int
2971 60528 : _bt_keep_natts(Relation rel, IndexTuple lastleft, IndexTuple firstright,
2972 : BTScanInsert itup_key)
2973 : {
2974 60528 : int nkeyatts = IndexRelationGetNumberOfKeyAttributes(rel);
2975 60528 : TupleDesc itupdesc = RelationGetDescr(rel);
2976 : int keepnatts;
2977 : ScanKey scankey;
2978 :
2979 : /*
2980 : * _bt_compare() treats truncated key attributes as having the value minus
2981 : * infinity, which would break searches within !heapkeyspace indexes. We
2982 : * must still truncate away non-key attribute values, though.
2983 : */
2984 60528 : if (!itup_key->heapkeyspace)
2985 0 : return nkeyatts;
2986 :
2987 60528 : scankey = itup_key->scankeys;
2988 60528 : keepnatts = 1;
2989 73538 : for (int attnum = 1; attnum <= nkeyatts; attnum++, scankey++)
2990 : {
2991 : Datum datum1,
2992 : datum2;
2993 : bool isNull1,
2994 : isNull2;
2995 :
2996 72442 : datum1 = index_getattr(lastleft, attnum, itupdesc, &isNull1);
2997 72442 : datum2 = index_getattr(firstright, attnum, itupdesc, &isNull2);
2998 :
2999 72442 : if (isNull1 != isNull2)
3000 59432 : break;
3001 :
3002 144854 : if (!isNull1 &&
3003 72412 : DatumGetInt32(FunctionCall2Coll(&scankey->sk_func,
3004 : scankey->sk_collation,
3005 : datum1,
3006 : datum2)) != 0)
3007 59432 : break;
3008 :
3009 13010 : keepnatts++;
3010 : }
3011 :
3012 : /*
3013 : * Assert that _bt_keep_natts_fast() agrees with us in passing. This is
3014 : * expected in an allequalimage index.
3015 : */
3016 : Assert(!itup_key->allequalimage ||
3017 : keepnatts == _bt_keep_natts_fast(rel, lastleft, firstright));
3018 :
3019 60528 : return keepnatts;
3020 : }
3021 :
3022 : /*
3023 : * _bt_keep_natts_fast - fast bitwise variant of _bt_keep_natts.
3024 : *
3025 : * This is exported so that a candidate split point can have its effect on
3026 : * suffix truncation inexpensively evaluated ahead of time when finding a
3027 : * split location. A naive bitwise approach to datum comparisons is used to
3028 : * save cycles.
3029 : *
3030 : * The approach taken here usually provides the same answer as _bt_keep_natts
3031 : * will (for the same pair of tuples from a heapkeyspace index), since the
3032 : * majority of btree opclasses can never indicate that two datums are equal
3033 : * unless they're bitwise equal after detoasting. When an index only has
3034 : * "equal image" columns, routine is guaranteed to give the same result as
3035 : * _bt_keep_natts would.
3036 : *
3037 : * Callers can rely on the fact that attributes considered equal here are
3038 : * definitely also equal according to _bt_keep_natts, even when the index uses
3039 : * an opclass or collation that is not "allequalimage"/deduplication-safe.
3040 : * This weaker guarantee is good enough for nbtsplitloc.c caller, since false
3041 : * negatives generally only have the effect of making leaf page splits use a
3042 : * more balanced split point.
3043 : */
3044 : int
3045 13046466 : _bt_keep_natts_fast(Relation rel, IndexTuple lastleft, IndexTuple firstright)
3046 : {
3047 13046466 : TupleDesc itupdesc = RelationGetDescr(rel);
3048 13046466 : int keysz = IndexRelationGetNumberOfKeyAttributes(rel);
3049 : int keepnatts;
3050 :
3051 13046466 : keepnatts = 1;
3052 21833022 : for (int attnum = 1; attnum <= keysz; attnum++)
3053 : {
3054 : Datum datum1,
3055 : datum2;
3056 : bool isNull1,
3057 : isNull2;
3058 : CompactAttribute *att;
3059 :
3060 19507230 : datum1 = index_getattr(lastleft, attnum, itupdesc, &isNull1);
3061 19507230 : datum2 = index_getattr(firstright, attnum, itupdesc, &isNull2);
3062 19507230 : att = TupleDescCompactAttr(itupdesc, attnum - 1);
3063 :
3064 19507230 : if (isNull1 != isNull2)
3065 10720674 : break;
3066 :
3067 19507080 : if (!isNull1 &&
3068 19460024 : !datum_image_eq(datum1, datum2, att->attbyval, att->attlen))
3069 10720524 : break;
3070 :
3071 8786556 : keepnatts++;
3072 : }
3073 :
3074 13046466 : return keepnatts;
3075 : }
3076 :
3077 : /*
3078 : * _bt_check_natts() -- Verify tuple has expected number of attributes.
3079 : *
3080 : * Returns value indicating if the expected number of attributes were found
3081 : * for a particular offset on page. This can be used as a general purpose
3082 : * sanity check.
3083 : *
3084 : * Testing a tuple directly with BTreeTupleGetNAtts() should generally be
3085 : * preferred to calling here. That's usually more convenient, and is always
3086 : * more explicit. Call here instead when offnum's tuple may be a negative
3087 : * infinity tuple that uses the pre-v11 on-disk representation, or when a low
3088 : * context check is appropriate. This routine is as strict as possible about
3089 : * what is expected on each version of btree.
3090 : */
3091 : bool
3092 4046372 : _bt_check_natts(Relation rel, bool heapkeyspace, Page page, OffsetNumber offnum)
3093 : {
3094 4046372 : int16 natts = IndexRelationGetNumberOfAttributes(rel);
3095 4046372 : int16 nkeyatts = IndexRelationGetNumberOfKeyAttributes(rel);
3096 4046372 : BTPageOpaque opaque = BTPageGetOpaque(page);
3097 : IndexTuple itup;
3098 : int tupnatts;
3099 :
3100 : /*
3101 : * We cannot reliably test a deleted or half-dead page, since they have
3102 : * dummy high keys
3103 : */
3104 4046372 : if (P_IGNORE(opaque))
3105 0 : return true;
3106 :
3107 : Assert(offnum >= FirstOffsetNumber &&
3108 : offnum <= PageGetMaxOffsetNumber(page));
3109 :
3110 4046372 : itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, offnum));
3111 4046372 : tupnatts = BTreeTupleGetNAtts(itup, rel);
3112 :
3113 : /* !heapkeyspace indexes do not support deduplication */
3114 4046372 : if (!heapkeyspace && BTreeTupleIsPosting(itup))
3115 0 : return false;
3116 :
3117 : /* Posting list tuples should never have "pivot heap TID" bit set */
3118 4046372 : if (BTreeTupleIsPosting(itup) &&
3119 21896 : (ItemPointerGetOffsetNumberNoCheck(&itup->t_tid) &
3120 : BT_PIVOT_HEAP_TID_ATTR) != 0)
3121 0 : return false;
3122 :
3123 : /* INCLUDE indexes do not support deduplication */
3124 4046372 : if (natts != nkeyatts && BTreeTupleIsPosting(itup))
3125 0 : return false;
3126 :
3127 4046372 : if (P_ISLEAF(opaque))
3128 : {
3129 4032044 : if (offnum >= P_FIRSTDATAKEY(opaque))
3130 : {
3131 : /*
3132 : * Non-pivot tuple should never be explicitly marked as a pivot
3133 : * tuple
3134 : */
3135 4018836 : if (BTreeTupleIsPivot(itup))
3136 0 : return false;
3137 :
3138 : /*
3139 : * Leaf tuples that are not the page high key (non-pivot tuples)
3140 : * should never be truncated. (Note that tupnatts must have been
3141 : * inferred, even with a posting list tuple, because only pivot
3142 : * tuples store tupnatts directly.)
3143 : */
3144 4018836 : return tupnatts == natts;
3145 : }
3146 : else
3147 : {
3148 : /*
3149 : * Rightmost page doesn't contain a page high key, so tuple was
3150 : * checked above as ordinary leaf tuple
3151 : */
3152 : Assert(!P_RIGHTMOST(opaque));
3153 :
3154 : /*
3155 : * !heapkeyspace high key tuple contains only key attributes. Note
3156 : * that tupnatts will only have been explicitly represented in
3157 : * !heapkeyspace indexes that happen to have non-key attributes.
3158 : */
3159 13208 : if (!heapkeyspace)
3160 0 : return tupnatts == nkeyatts;
3161 :
3162 : /* Use generic heapkeyspace pivot tuple handling */
3163 : }
3164 : }
3165 : else /* !P_ISLEAF(opaque) */
3166 : {
3167 14328 : if (offnum == P_FIRSTDATAKEY(opaque))
3168 : {
3169 : /*
3170 : * The first tuple on any internal page (possibly the first after
3171 : * its high key) is its negative infinity tuple. Negative
3172 : * infinity tuples are always truncated to zero attributes. They
3173 : * are a particular kind of pivot tuple.
3174 : */
3175 1114 : if (heapkeyspace)
3176 1114 : return tupnatts == 0;
3177 :
3178 : /*
3179 : * The number of attributes won't be explicitly represented if the
3180 : * negative infinity tuple was generated during a page split that
3181 : * occurred with a version of Postgres before v11. There must be
3182 : * a problem when there is an explicit representation that is
3183 : * non-zero, or when there is no explicit representation and the
3184 : * tuple is evidently not a pre-pg_upgrade tuple.
3185 : *
3186 : * Prior to v11, downlinks always had P_HIKEY as their offset.
3187 : * Accept that as an alternative indication of a valid
3188 : * !heapkeyspace negative infinity tuple.
3189 : */
3190 0 : return tupnatts == 0 ||
3191 0 : ItemPointerGetOffsetNumber(&(itup->t_tid)) == P_HIKEY;
3192 : }
3193 : else
3194 : {
3195 : /*
3196 : * !heapkeyspace downlink tuple with separator key contains only
3197 : * key attributes. Note that tupnatts will only have been
3198 : * explicitly represented in !heapkeyspace indexes that happen to
3199 : * have non-key attributes.
3200 : */
3201 13214 : if (!heapkeyspace)
3202 0 : return tupnatts == nkeyatts;
3203 :
3204 : /* Use generic heapkeyspace pivot tuple handling */
3205 : }
3206 : }
3207 :
3208 : /* Handle heapkeyspace pivot tuples (excluding minus infinity items) */
3209 : Assert(heapkeyspace);
3210 :
3211 : /*
3212 : * Explicit representation of the number of attributes is mandatory with
3213 : * heapkeyspace index pivot tuples, regardless of whether or not there are
3214 : * non-key attributes.
3215 : */
3216 26422 : if (!BTreeTupleIsPivot(itup))
3217 0 : return false;
3218 :
3219 : /* Pivot tuple should not use posting list representation (redundant) */
3220 26422 : if (BTreeTupleIsPosting(itup))
3221 0 : return false;
3222 :
3223 : /*
3224 : * Heap TID is a tiebreaker key attribute, so it cannot be untruncated
3225 : * when any other key attribute is truncated
3226 : */
3227 26422 : if (BTreeTupleGetHeapTID(itup) != NULL && tupnatts != nkeyatts)
3228 0 : return false;
3229 :
3230 : /*
3231 : * Pivot tuple must have at least one untruncated key attribute (minus
3232 : * infinity pivot tuples are the only exception). Pivot tuples can never
3233 : * represent that there is a value present for a key attribute that
3234 : * exceeds pg_index.indnkeyatts for the index.
3235 : */
3236 26422 : return tupnatts > 0 && tupnatts <= nkeyatts;
3237 : }
3238 :
3239 : /*
3240 : *
3241 : * _bt_check_third_page() -- check whether tuple fits on a btree page at all.
3242 : *
3243 : * We actually need to be able to fit three items on every page, so restrict
3244 : * any one item to 1/3 the per-page available space. Note that itemsz should
3245 : * not include the ItemId overhead.
3246 : *
3247 : * It might be useful to apply TOAST methods rather than throw an error here.
3248 : * Using out of line storage would break assumptions made by suffix truncation
3249 : * and by contrib/amcheck, though.
3250 : */
3251 : void
3252 264 : _bt_check_third_page(Relation rel, Relation heap, bool needheaptidspace,
3253 : Page page, IndexTuple newtup)
3254 : {
3255 : Size itemsz;
3256 : BTPageOpaque opaque;
3257 :
3258 264 : itemsz = MAXALIGN(IndexTupleSize(newtup));
3259 :
3260 : /* Double check item size against limit */
3261 264 : if (itemsz <= BTMaxItemSize)
3262 0 : return;
3263 :
3264 : /*
3265 : * Tuple is probably too large to fit on page, but it's possible that the
3266 : * index uses version 2 or version 3, or that page is an internal page, in
3267 : * which case a slightly higher limit applies.
3268 : */
3269 264 : if (!needheaptidspace && itemsz <= BTMaxItemSizeNoHeapTid)
3270 264 : return;
3271 :
3272 : /*
3273 : * Internal page insertions cannot fail here, because that would mean that
3274 : * an earlier leaf level insertion that should have failed didn't
3275 : */
3276 0 : opaque = BTPageGetOpaque(page);
3277 0 : if (!P_ISLEAF(opaque))
3278 0 : elog(ERROR, "cannot insert oversized tuple of size %zu on internal page of index \"%s\"",
3279 : itemsz, RelationGetRelationName(rel));
3280 :
3281 0 : ereport(ERROR,
3282 : (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
3283 : errmsg("index row size %zu exceeds btree version %u maximum %zu for index \"%s\"",
3284 : itemsz,
3285 : needheaptidspace ? BTREE_VERSION : BTREE_NOVAC_VERSION,
3286 : needheaptidspace ? BTMaxItemSize : BTMaxItemSizeNoHeapTid,
3287 : RelationGetRelationName(rel)),
3288 : errdetail("Index row references tuple (%u,%u) in relation \"%s\".",
3289 : ItemPointerGetBlockNumber(BTreeTupleGetHeapTID(newtup)),
3290 : ItemPointerGetOffsetNumber(BTreeTupleGetHeapTID(newtup)),
3291 : RelationGetRelationName(heap)),
3292 : errhint("Values larger than 1/3 of a buffer page cannot be indexed.\n"
3293 : "Consider a function index of an MD5 hash of the value, "
3294 : "or use full text indexing."),
3295 : errtableconstraint(heap, RelationGetRelationName(rel))));
3296 : }
3297 :
3298 : /*
3299 : * Are all attributes in rel "equality is image equality" attributes?
3300 : *
3301 : * We use each attribute's BTEQUALIMAGE_PROC opclass procedure. If any
3302 : * opclass either lacks a BTEQUALIMAGE_PROC procedure or returns false, we
3303 : * return false; otherwise we return true.
3304 : *
3305 : * Returned boolean value is stored in index metapage during index builds.
3306 : * Deduplication can only be used when we return true.
3307 : */
3308 : bool
3309 56448 : _bt_allequalimage(Relation rel, bool debugmessage)
3310 : {
3311 56448 : bool allequalimage = true;
3312 :
3313 : /* INCLUDE indexes can never support deduplication */
3314 56448 : if (IndexRelationGetNumberOfAttributes(rel) !=
3315 56448 : IndexRelationGetNumberOfKeyAttributes(rel))
3316 272 : return false;
3317 :
3318 148182 : for (int i = 0; i < IndexRelationGetNumberOfKeyAttributes(rel); i++)
3319 : {
3320 92508 : Oid opfamily = rel->rd_opfamily[i];
3321 92508 : Oid opcintype = rel->rd_opcintype[i];
3322 92508 : Oid collation = rel->rd_indcollation[i];
3323 : Oid equalimageproc;
3324 :
3325 92508 : equalimageproc = get_opfamily_proc(opfamily, opcintype, opcintype,
3326 : BTEQUALIMAGE_PROC);
3327 :
3328 : /*
3329 : * If there is no BTEQUALIMAGE_PROC then deduplication is assumed to
3330 : * be unsafe. Otherwise, actually call proc and see what it says.
3331 : */
3332 92508 : if (!OidIsValid(equalimageproc) ||
3333 92050 : !DatumGetBool(OidFunctionCall1Coll(equalimageproc, collation,
3334 : ObjectIdGetDatum(opcintype))))
3335 : {
3336 502 : allequalimage = false;
3337 502 : break;
3338 : }
3339 : }
3340 :
3341 56176 : if (debugmessage)
3342 : {
3343 48102 : if (allequalimage)
3344 47600 : elog(DEBUG1, "index \"%s\" can safely use deduplication",
3345 : RelationGetRelationName(rel));
3346 : else
3347 502 : elog(DEBUG1, "index \"%s\" cannot use deduplication",
3348 : RelationGetRelationName(rel));
3349 : }
3350 :
3351 56176 : return allequalimage;
3352 : }
|