Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * nbtutils.c
4 : * Utility code for Postgres btree implementation.
5 : *
6 : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/access/nbtree/nbtutils.c
12 : *
13 : *-------------------------------------------------------------------------
14 : */
15 :
16 : #include "postgres.h"
17 :
18 : #include <time.h>
19 :
20 : #include "access/nbtree.h"
21 : #include "access/reloptions.h"
22 : #include "commands/progress.h"
23 : #include "miscadmin.h"
24 : #include "utils/datum.h"
25 : #include "utils/lsyscache.h"
26 :
27 : #define LOOK_AHEAD_REQUIRED_RECHECKS 3
28 : #define LOOK_AHEAD_DEFAULT_DISTANCE 5
29 : #define NSKIPADVANCES_THRESHOLD 3
30 :
31 : static inline int32 _bt_compare_array_skey(FmgrInfo *orderproc,
32 : Datum tupdatum, bool tupnull,
33 : Datum arrdatum, ScanKey cur);
34 : static void _bt_binsrch_skiparray_skey(bool cur_elem_trig, ScanDirection dir,
35 : Datum tupdatum, bool tupnull,
36 : BTArrayKeyInfo *array, ScanKey cur,
37 : int32 *set_elem_result);
38 : static void _bt_skiparray_set_element(Relation rel, ScanKey skey, BTArrayKeyInfo *array,
39 : int32 set_elem_result, Datum tupdatum, bool tupnull);
40 : static void _bt_skiparray_set_isnull(Relation rel, ScanKey skey, BTArrayKeyInfo *array);
41 : static void _bt_array_set_low_or_high(Relation rel, ScanKey skey,
42 : BTArrayKeyInfo *array, bool low_not_high);
43 : static bool _bt_array_decrement(Relation rel, ScanKey skey, BTArrayKeyInfo *array);
44 : static bool _bt_array_increment(Relation rel, ScanKey skey, BTArrayKeyInfo *array);
45 : static bool _bt_advance_array_keys_increment(IndexScanDesc scan, ScanDirection dir,
46 : bool *skip_array_set);
47 : static bool _bt_tuple_before_array_skeys(IndexScanDesc scan, ScanDirection dir,
48 : IndexTuple tuple, TupleDesc tupdesc, int tupnatts,
49 : bool readpagetup, int sktrig, bool *scanBehind);
50 : static bool _bt_advance_array_keys(IndexScanDesc scan, BTReadPageState *pstate,
51 : IndexTuple tuple, int tupnatts, TupleDesc tupdesc,
52 : int sktrig, bool sktrig_required);
53 : #ifdef USE_ASSERT_CHECKING
54 : static bool _bt_verify_keys_with_arraykeys(IndexScanDesc scan);
55 : #endif
56 : static bool _bt_oppodir_checkkeys(IndexScanDesc scan, ScanDirection dir,
57 : IndexTuple finaltup);
58 : static bool _bt_check_compare(IndexScanDesc scan, ScanDirection dir,
59 : IndexTuple tuple, int tupnatts, TupleDesc tupdesc,
60 : bool advancenonrequired, bool forcenonrequired,
61 : bool *continuescan, int *ikey);
62 : static bool _bt_check_rowcompare(ScanKey skey,
63 : IndexTuple tuple, int tupnatts, TupleDesc tupdesc,
64 : ScanDirection dir, bool forcenonrequired, bool *continuescan);
65 : static void _bt_checkkeys_look_ahead(IndexScanDesc scan, BTReadPageState *pstate,
66 : int tupnatts, TupleDesc tupdesc);
67 : static int _bt_keep_natts(Relation rel, IndexTuple lastleft,
68 : IndexTuple firstright, BTScanInsert itup_key);
69 :
70 :
71 : /*
72 : * _bt_mkscankey
73 : * Build an insertion scan key that contains comparison data from itup
74 : * as well as comparator routines appropriate to the key datatypes.
75 : *
76 : * The result is intended for use with _bt_compare() and _bt_truncate().
77 : * Callers that don't need to fill out the insertion scankey arguments
78 : * (e.g. they use an ad-hoc comparison routine, or only need a scankey
79 : * for _bt_truncate()) can pass a NULL index tuple. The scankey will
80 : * be initialized as if an "all truncated" pivot tuple was passed
81 : * instead.
82 : *
83 : * Note that we may occasionally have to share lock the metapage to
84 : * determine whether or not the keys in the index are expected to be
85 : * unique (i.e. if this is a "heapkeyspace" index). We assume a
86 : * heapkeyspace index when caller passes a NULL tuple, allowing index
87 : * build callers to avoid accessing the non-existent metapage. We
88 : * also assume that the index is _not_ allequalimage when a NULL tuple
89 : * is passed; CREATE INDEX callers call _bt_allequalimage() to set the
90 : * field themselves.
91 : */
92 : BTScanInsert
93 11873826 : _bt_mkscankey(Relation rel, IndexTuple itup)
94 : {
95 : BTScanInsert key;
96 : ScanKey skey;
97 : TupleDesc itupdesc;
98 : int indnkeyatts;
99 : int16 *indoption;
100 : int tupnatts;
101 : int i;
102 :
103 11873826 : itupdesc = RelationGetDescr(rel);
104 11873826 : indnkeyatts = IndexRelationGetNumberOfKeyAttributes(rel);
105 11873826 : indoption = rel->rd_indoption;
106 11873826 : tupnatts = itup ? BTreeTupleGetNAtts(itup, rel) : 0;
107 :
108 : Assert(tupnatts <= IndexRelationGetNumberOfAttributes(rel));
109 :
110 : /*
111 : * We'll execute search using scan key constructed on key columns.
112 : * Truncated attributes and non-key attributes are omitted from the final
113 : * scan key.
114 : */
115 11873826 : key = palloc(offsetof(BTScanInsertData, scankeys) +
116 11873826 : sizeof(ScanKeyData) * indnkeyatts);
117 11873826 : if (itup)
118 11728332 : _bt_metaversion(rel, &key->heapkeyspace, &key->allequalimage);
119 : else
120 : {
121 : /* Utility statement callers can set these fields themselves */
122 145494 : key->heapkeyspace = true;
123 145494 : key->allequalimage = false;
124 : }
125 11873826 : key->anynullkeys = false; /* initial assumption */
126 11873826 : key->nextkey = false; /* usual case, required by btinsert */
127 11873826 : key->backward = false; /* usual case, required by btinsert */
128 11873826 : key->keysz = Min(indnkeyatts, tupnatts);
129 11873826 : key->scantid = key->heapkeyspace && itup ?
130 23747652 : BTreeTupleGetHeapTID(itup) : NULL;
131 11873826 : skey = key->scankeys;
132 32133742 : for (i = 0; i < indnkeyatts; i++)
133 : {
134 : FmgrInfo *procinfo;
135 : Datum arg;
136 : bool null;
137 : int flags;
138 :
139 : /*
140 : * We can use the cached (default) support procs since no cross-type
141 : * comparison can be needed.
142 : */
143 20259916 : procinfo = index_getprocinfo(rel, i + 1, BTORDER_PROC);
144 :
145 : /*
146 : * Key arguments built from truncated attributes (or when caller
147 : * provides no tuple) are defensively represented as NULL values. They
148 : * should never be used.
149 : */
150 20259916 : if (i < tupnatts)
151 19999016 : arg = index_getattr(itup, i + 1, itupdesc, &null);
152 : else
153 : {
154 260900 : arg = (Datum) 0;
155 260900 : null = true;
156 : }
157 20259916 : flags = (null ? SK_ISNULL : 0) | (indoption[i] << SK_BT_INDOPTION_SHIFT);
158 20259916 : ScanKeyEntryInitializeWithInfo(&skey[i],
159 : flags,
160 20259916 : (AttrNumber) (i + 1),
161 : InvalidStrategy,
162 : InvalidOid,
163 20259916 : rel->rd_indcollation[i],
164 : procinfo,
165 : arg);
166 : /* Record if any key attribute is NULL (or truncated) */
167 20259916 : if (null)
168 281548 : key->anynullkeys = true;
169 : }
170 :
171 : /*
172 : * In NULLS NOT DISTINCT mode, we pretend that there are no null keys, so
173 : * that full uniqueness check is done.
174 : */
175 11873826 : if (rel->rd_index->indnullsnotdistinct)
176 186 : key->anynullkeys = false;
177 :
178 11873826 : return key;
179 : }
180 :
181 : /*
182 : * free a retracement stack made by _bt_search.
183 : */
184 : void
185 22475164 : _bt_freestack(BTStack stack)
186 : {
187 : BTStack ostack;
188 :
189 41309238 : while (stack != NULL)
190 : {
191 18834074 : ostack = stack;
192 18834074 : stack = stack->bts_parent;
193 18834074 : pfree(ostack);
194 : }
195 22475164 : }
196 :
197 : /*
198 : * _bt_compare_array_skey() -- apply array comparison function
199 : *
200 : * Compares caller's tuple attribute value to a scan key/array element.
201 : * Helper function used during binary searches of SK_SEARCHARRAY arrays.
202 : *
203 : * This routine returns:
204 : * <0 if tupdatum < arrdatum;
205 : * 0 if tupdatum == arrdatum;
206 : * >0 if tupdatum > arrdatum.
207 : *
208 : * This is essentially the same interface as _bt_compare: both functions
209 : * compare the value that they're searching for to a binary search pivot.
210 : * However, unlike _bt_compare, this function's "tuple argument" comes first,
211 : * while its "array/scankey argument" comes second.
212 : */
213 : static inline int32
214 469776 : _bt_compare_array_skey(FmgrInfo *orderproc,
215 : Datum tupdatum, bool tupnull,
216 : Datum arrdatum, ScanKey cur)
217 : {
218 469776 : int32 result = 0;
219 :
220 : Assert(cur->sk_strategy == BTEqualStrategyNumber);
221 : Assert(!(cur->sk_flags & (SK_BT_MINVAL | SK_BT_MAXVAL)));
222 :
223 469776 : if (tupnull) /* NULL tupdatum */
224 : {
225 228 : if (cur->sk_flags & SK_ISNULL)
226 132 : result = 0; /* NULL "=" NULL */
227 96 : else if (cur->sk_flags & SK_BT_NULLS_FIRST)
228 0 : result = -1; /* NULL "<" NOT_NULL */
229 : else
230 96 : result = 1; /* NULL ">" NOT_NULL */
231 : }
232 469548 : else if (cur->sk_flags & SK_ISNULL) /* NOT_NULL tupdatum, NULL arrdatum */
233 : {
234 30540 : if (cur->sk_flags & SK_BT_NULLS_FIRST)
235 54 : result = 1; /* NOT_NULL ">" NULL */
236 : else
237 30486 : result = -1; /* NOT_NULL "<" NULL */
238 : }
239 : else
240 : {
241 : /*
242 : * Like _bt_compare, we need to be careful of cross-type comparisons,
243 : * so the left value has to be the value that came from an index tuple
244 : */
245 439008 : result = DatumGetInt32(FunctionCall2Coll(orderproc, cur->sk_collation,
246 : tupdatum, arrdatum));
247 :
248 : /*
249 : * We flip the sign by following the obvious rule: flip whenever the
250 : * column is a DESC column.
251 : *
252 : * _bt_compare does it the wrong way around (flip when *ASC*) in order
253 : * to compensate for passing its orderproc arguments backwards. We
254 : * don't need to play these games because we find it natural to pass
255 : * tupdatum as the left value (and arrdatum as the right value).
256 : */
257 439008 : if (cur->sk_flags & SK_BT_DESC)
258 45498 : INVERT_COMPARE_RESULT(result);
259 : }
260 :
261 469776 : return result;
262 : }
263 :
264 : /*
265 : * _bt_binsrch_array_skey() -- Binary search for next matching array key
266 : *
267 : * Returns an index to the first array element >= caller's tupdatum argument.
268 : * This convention is more natural for forwards scan callers, but that can't
269 : * really matter to backwards scan callers. Both callers require handling for
270 : * the case where the match we return is < tupdatum, and symmetric handling
271 : * for the case where our best match is > tupdatum.
272 : *
273 : * Also sets *set_elem_result to the result _bt_compare_array_skey returned
274 : * when we used it to compare the matching array element to tupdatum/tupnull.
275 : *
276 : * cur_elem_trig indicates if array advancement was triggered by this array's
277 : * scan key, and that the array is for a required scan key. We can apply this
278 : * information to find the next matching array element in the current scan
279 : * direction using far fewer comparisons (fewer on average, compared to naive
280 : * binary search). This scheme takes advantage of an important property of
281 : * required arrays: required arrays always advance in lockstep with the index
282 : * scan's progress through the index's key space.
283 : */
284 : int
285 31172 : _bt_binsrch_array_skey(FmgrInfo *orderproc,
286 : bool cur_elem_trig, ScanDirection dir,
287 : Datum tupdatum, bool tupnull,
288 : BTArrayKeyInfo *array, ScanKey cur,
289 : int32 *set_elem_result)
290 : {
291 31172 : int low_elem = 0,
292 31172 : mid_elem = -1,
293 31172 : high_elem = array->num_elems - 1,
294 31172 : result = 0;
295 : Datum arrdatum;
296 :
297 : Assert(cur->sk_flags & SK_SEARCHARRAY);
298 : Assert(!(cur->sk_flags & SK_BT_SKIP));
299 : Assert(!(cur->sk_flags & SK_ISNULL)); /* SAOP arrays never have NULLs */
300 : Assert(cur->sk_strategy == BTEqualStrategyNumber);
301 :
302 31172 : if (cur_elem_trig)
303 : {
304 : Assert(!ScanDirectionIsNoMovement(dir));
305 : Assert(cur->sk_flags & SK_BT_REQFWD);
306 :
307 : /*
308 : * When the scan key that triggered array advancement is a required
309 : * array scan key, it is now certain that the current array element
310 : * (plus all prior elements relative to the current scan direction)
311 : * cannot possibly be at or ahead of the corresponding tuple value.
312 : * (_bt_checkkeys must have called _bt_tuple_before_array_skeys, which
313 : * makes sure this is true as a condition of advancing the arrays.)
314 : *
315 : * This makes it safe to exclude array elements up to and including
316 : * the former-current array element from our search.
317 : *
318 : * Separately, when array advancement was triggered by a required scan
319 : * key, the array element immediately after the former-current element
320 : * is often either an exact tupdatum match, or a "close by" near-match
321 : * (a near-match tupdatum is one whose key space falls _between_ the
322 : * former-current and new-current array elements). We'll detect both
323 : * cases via an optimistic comparison of the new search lower bound
324 : * (or new search upper bound in the case of backwards scans).
325 : */
326 30854 : if (ScanDirectionIsForward(dir))
327 : {
328 30794 : low_elem = array->cur_elem + 1; /* old cur_elem exhausted */
329 :
330 : /* Compare prospective new cur_elem (also the new lower bound) */
331 30794 : if (high_elem >= low_elem)
332 : {
333 22998 : arrdatum = array->elem_values[low_elem];
334 22998 : result = _bt_compare_array_skey(orderproc, tupdatum, tupnull,
335 : arrdatum, cur);
336 :
337 22998 : if (result <= 0)
338 : {
339 : /* Optimistic comparison optimization worked out */
340 22912 : *set_elem_result = result;
341 22912 : return low_elem;
342 : }
343 86 : mid_elem = low_elem;
344 86 : low_elem++; /* this cur_elem exhausted, too */
345 : }
346 :
347 7882 : if (high_elem < low_elem)
348 : {
349 : /* Caller needs to perform "beyond end" array advancement */
350 7802 : *set_elem_result = 1;
351 7802 : return high_elem;
352 : }
353 : }
354 : else
355 : {
356 60 : high_elem = array->cur_elem - 1; /* old cur_elem exhausted */
357 :
358 : /* Compare prospective new cur_elem (also the new upper bound) */
359 60 : if (high_elem >= low_elem)
360 : {
361 42 : arrdatum = array->elem_values[high_elem];
362 42 : result = _bt_compare_array_skey(orderproc, tupdatum, tupnull,
363 : arrdatum, cur);
364 :
365 42 : if (result >= 0)
366 : {
367 : /* Optimistic comparison optimization worked out */
368 30 : *set_elem_result = result;
369 30 : return high_elem;
370 : }
371 12 : mid_elem = high_elem;
372 12 : high_elem--; /* this cur_elem exhausted, too */
373 : }
374 :
375 30 : if (high_elem < low_elem)
376 : {
377 : /* Caller needs to perform "beyond end" array advancement */
378 30 : *set_elem_result = -1;
379 30 : return low_elem;
380 : }
381 : }
382 : }
383 :
384 698 : while (high_elem > low_elem)
385 : {
386 438 : mid_elem = low_elem + ((high_elem - low_elem) / 2);
387 438 : arrdatum = array->elem_values[mid_elem];
388 :
389 438 : result = _bt_compare_array_skey(orderproc, tupdatum, tupnull,
390 : arrdatum, cur);
391 :
392 438 : if (result == 0)
393 : {
394 : /*
395 : * It's safe to quit as soon as we see an equal array element.
396 : * This often saves an extra comparison or two...
397 : */
398 138 : low_elem = mid_elem;
399 138 : break;
400 : }
401 :
402 300 : if (result > 0)
403 270 : low_elem = mid_elem + 1;
404 : else
405 30 : high_elem = mid_elem;
406 : }
407 :
408 : /*
409 : * ...but our caller also cares about how its searched-for tuple datum
410 : * compares to the low_elem datum. Must always set *set_elem_result with
411 : * the result of that comparison specifically.
412 : */
413 398 : if (low_elem != mid_elem)
414 242 : result = _bt_compare_array_skey(orderproc, tupdatum, tupnull,
415 242 : array->elem_values[low_elem], cur);
416 :
417 398 : *set_elem_result = result;
418 :
419 398 : return low_elem;
420 : }
421 :
422 : /*
423 : * _bt_binsrch_skiparray_skey() -- "Binary search" within a skip array
424 : *
425 : * Does not return an index into the array, since skip arrays don't really
426 : * contain elements (they generate their array elements procedurally instead).
427 : * Our interface matches that of _bt_binsrch_array_skey in every other way.
428 : *
429 : * Sets *set_elem_result just like _bt_binsrch_array_skey would with a true
430 : * array. The value 0 indicates that tupdatum/tupnull is within the range of
431 : * the skip array. We return -1 when tupdatum/tupnull is lower that any value
432 : * within the range of the array, and 1 when it is higher than every value.
433 : * Caller should pass *set_elem_result to _bt_skiparray_set_element to advance
434 : * the array.
435 : *
436 : * cur_elem_trig indicates if array advancement was triggered by this array's
437 : * scan key. We use this to optimize-away comparisons that are known by our
438 : * caller to be unnecessary from context, just like _bt_binsrch_array_skey.
439 : */
440 : static void
441 168860 : _bt_binsrch_skiparray_skey(bool cur_elem_trig, ScanDirection dir,
442 : Datum tupdatum, bool tupnull,
443 : BTArrayKeyInfo *array, ScanKey cur,
444 : int32 *set_elem_result)
445 : {
446 : Assert(cur->sk_flags & SK_BT_SKIP);
447 : Assert(cur->sk_flags & SK_SEARCHARRAY);
448 : Assert(cur->sk_flags & SK_BT_REQFWD);
449 : Assert(array->num_elems == -1);
450 : Assert(!ScanDirectionIsNoMovement(dir));
451 :
452 168860 : if (array->null_elem)
453 : {
454 : Assert(!array->low_compare && !array->high_compare);
455 :
456 142260 : *set_elem_result = 0;
457 142260 : return;
458 : }
459 :
460 26600 : if (tupnull) /* NULL tupdatum */
461 : {
462 24 : if (cur->sk_flags & SK_BT_NULLS_FIRST)
463 0 : *set_elem_result = -1; /* NULL "<" NOT_NULL */
464 : else
465 24 : *set_elem_result = 1; /* NULL ">" NOT_NULL */
466 24 : return;
467 : }
468 :
469 : /*
470 : * Array inequalities determine whether tupdatum is within the range of
471 : * caller's skip array
472 : */
473 26576 : *set_elem_result = 0;
474 26576 : if (ScanDirectionIsForward(dir))
475 : {
476 : /*
477 : * Evaluate low_compare first (unless cur_elem_trig tells us that it
478 : * cannot possibly fail to be satisfied), then evaluate high_compare
479 : */
480 26528 : if (!cur_elem_trig && array->low_compare &&
481 764 : !DatumGetBool(FunctionCall2Coll(&array->low_compare->sk_func,
482 764 : array->low_compare->sk_collation,
483 : tupdatum,
484 764 : array->low_compare->sk_argument)))
485 0 : *set_elem_result = -1;
486 26528 : else if (array->high_compare &&
487 10436 : !DatumGetBool(FunctionCall2Coll(&array->high_compare->sk_func,
488 10436 : array->high_compare->sk_collation,
489 : tupdatum,
490 10436 : array->high_compare->sk_argument)))
491 6418 : *set_elem_result = 1;
492 : }
493 : else
494 : {
495 : /*
496 : * Evaluate high_compare first (unless cur_elem_trig tells us that it
497 : * cannot possibly fail to be satisfied), then evaluate low_compare
498 : */
499 48 : if (!cur_elem_trig && array->high_compare &&
500 6 : !DatumGetBool(FunctionCall2Coll(&array->high_compare->sk_func,
501 6 : array->high_compare->sk_collation,
502 : tupdatum,
503 6 : array->high_compare->sk_argument)))
504 0 : *set_elem_result = 1;
505 48 : else if (array->low_compare &&
506 24 : !DatumGetBool(FunctionCall2Coll(&array->low_compare->sk_func,
507 24 : array->low_compare->sk_collation,
508 : tupdatum,
509 24 : array->low_compare->sk_argument)))
510 0 : *set_elem_result = -1;
511 : }
512 :
513 : /*
514 : * Assert that any keys that were assumed to be satisfied already (due to
515 : * caller passing cur_elem_trig=true) really are satisfied as expected
516 : */
517 : #ifdef USE_ASSERT_CHECKING
518 : if (cur_elem_trig)
519 : {
520 : if (ScanDirectionIsForward(dir) && array->low_compare)
521 : Assert(DatumGetBool(FunctionCall2Coll(&array->low_compare->sk_func,
522 : array->low_compare->sk_collation,
523 : tupdatum,
524 : array->low_compare->sk_argument)));
525 :
526 : if (ScanDirectionIsBackward(dir) && array->high_compare)
527 : Assert(DatumGetBool(FunctionCall2Coll(&array->high_compare->sk_func,
528 : array->high_compare->sk_collation,
529 : tupdatum,
530 : array->high_compare->sk_argument)));
531 : }
532 : #endif
533 : }
534 :
535 : /*
536 : * _bt_skiparray_set_element() -- Set skip array scan key's sk_argument
537 : *
538 : * Caller passes set_elem_result returned by _bt_binsrch_skiparray_skey for
539 : * caller's tupdatum/tupnull.
540 : *
541 : * We copy tupdatum/tupnull into skey's sk_argument iff set_elem_result == 0.
542 : * Otherwise, we set skey to either the lowest or highest value that's within
543 : * the range of caller's skip array (whichever is the best available match to
544 : * tupdatum/tupnull that is still within the range of the skip array according
545 : * to _bt_binsrch_skiparray_skey/set_elem_result).
546 : */
547 : static void
548 156960 : _bt_skiparray_set_element(Relation rel, ScanKey skey, BTArrayKeyInfo *array,
549 : int32 set_elem_result, Datum tupdatum, bool tupnull)
550 : {
551 : Assert(skey->sk_flags & SK_BT_SKIP);
552 : Assert(skey->sk_flags & SK_SEARCHARRAY);
553 :
554 156960 : if (set_elem_result)
555 : {
556 : /* tupdatum/tupnull is out of the range of the skip array */
557 : Assert(!array->null_elem);
558 :
559 646 : _bt_array_set_low_or_high(rel, skey, array, set_elem_result < 0);
560 646 : return;
561 : }
562 :
563 : /* Advance skip array to tupdatum (or tupnull) value */
564 156314 : if (unlikely(tupnull))
565 : {
566 36 : _bt_skiparray_set_isnull(rel, skey, array);
567 36 : return;
568 : }
569 :
570 : /* Free memory previously allocated for sk_argument if needed */
571 156278 : if (!array->attbyval && skey->sk_argument)
572 79940 : pfree(DatumGetPointer(skey->sk_argument));
573 :
574 : /* tupdatum becomes new sk_argument/new current element */
575 156278 : skey->sk_flags &= ~(SK_SEARCHNULL | SK_ISNULL |
576 : SK_BT_MINVAL | SK_BT_MAXVAL |
577 : SK_BT_NEXT | SK_BT_PRIOR);
578 156278 : skey->sk_argument = datumCopy(tupdatum, array->attbyval, array->attlen);
579 : }
580 :
581 : /*
582 : * _bt_skiparray_set_isnull() -- set skip array scan key to NULL
583 : */
584 : static void
585 48 : _bt_skiparray_set_isnull(Relation rel, ScanKey skey, BTArrayKeyInfo *array)
586 : {
587 : Assert(skey->sk_flags & SK_BT_SKIP);
588 : Assert(skey->sk_flags & SK_SEARCHARRAY);
589 : Assert(array->null_elem && !array->low_compare && !array->high_compare);
590 :
591 : /* Free memory previously allocated for sk_argument if needed */
592 48 : if (!array->attbyval && skey->sk_argument)
593 6 : pfree(DatumGetPointer(skey->sk_argument));
594 :
595 : /* NULL becomes new sk_argument/new current element */
596 48 : skey->sk_argument = (Datum) 0;
597 48 : skey->sk_flags &= ~(SK_BT_MINVAL | SK_BT_MAXVAL |
598 : SK_BT_NEXT | SK_BT_PRIOR);
599 48 : skey->sk_flags |= (SK_SEARCHNULL | SK_ISNULL);
600 48 : }
601 :
602 : /*
603 : * _bt_start_array_keys() -- Initialize array keys at start of a scan
604 : *
605 : * Set up the cur_elem counters and fill in the first sk_argument value for
606 : * each array scankey.
607 : */
608 : void
609 81266 : _bt_start_array_keys(IndexScanDesc scan, ScanDirection dir)
610 : {
611 81266 : Relation rel = scan->indexRelation;
612 81266 : BTScanOpaque so = (BTScanOpaque) scan->opaque;
613 :
614 : Assert(so->numArrayKeys);
615 : Assert(so->qual_ok);
616 :
617 163160 : for (int i = 0; i < so->numArrayKeys; i++)
618 : {
619 81894 : BTArrayKeyInfo *array = &so->arrayKeys[i];
620 81894 : ScanKey skey = &so->keyData[array->scan_key];
621 :
622 : Assert(skey->sk_flags & SK_SEARCHARRAY);
623 :
624 81894 : _bt_array_set_low_or_high(rel, skey, array,
625 : ScanDirectionIsForward(dir));
626 : }
627 81266 : so->scanBehind = so->oppositeDirCheck = false; /* reset */
628 81266 : }
629 :
630 : /*
631 : * _bt_array_set_low_or_high() -- Set array scan key to lowest/highest element
632 : *
633 : * Caller also passes associated scan key, which will have its argument set to
634 : * the lowest/highest array value in passing.
635 : */
636 : static void
637 93090 : _bt_array_set_low_or_high(Relation rel, ScanKey skey, BTArrayKeyInfo *array,
638 : bool low_not_high)
639 : {
640 : Assert(skey->sk_flags & SK_SEARCHARRAY);
641 :
642 93090 : if (array->num_elems != -1)
643 : {
644 : /* set low or high element for SAOP array */
645 83870 : int set_elem = 0;
646 :
647 : Assert(!(skey->sk_flags & SK_BT_SKIP));
648 :
649 83870 : if (!low_not_high)
650 8034 : set_elem = array->num_elems - 1;
651 :
652 : /*
653 : * Just copy over array datum (only skip arrays require freeing and
654 : * allocating memory for sk_argument)
655 : */
656 83870 : array->cur_elem = set_elem;
657 83870 : skey->sk_argument = array->elem_values[set_elem];
658 :
659 83870 : return;
660 : }
661 :
662 : /* set low or high element for skip array */
663 : Assert(skey->sk_flags & SK_BT_SKIP);
664 : Assert(array->num_elems == -1);
665 :
666 : /* Free memory previously allocated for sk_argument if needed */
667 9220 : if (!array->attbyval && skey->sk_argument)
668 1978 : pfree(DatumGetPointer(skey->sk_argument));
669 :
670 : /* Reset flags */
671 9220 : skey->sk_argument = (Datum) 0;
672 9220 : skey->sk_flags &= ~(SK_SEARCHNULL | SK_ISNULL |
673 : SK_BT_MINVAL | SK_BT_MAXVAL |
674 : SK_BT_NEXT | SK_BT_PRIOR);
675 :
676 9220 : if (array->null_elem &&
677 7374 : (low_not_high == ((skey->sk_flags & SK_BT_NULLS_FIRST) != 0)))
678 : {
679 : /* Requested element (either lowest or highest) has the value NULL */
680 962 : skey->sk_flags |= (SK_SEARCHNULL | SK_ISNULL);
681 : }
682 8258 : else if (low_not_high)
683 : {
684 : /* Setting array to lowest element (according to low_compare) */
685 7530 : skey->sk_flags |= SK_BT_MINVAL;
686 : }
687 : else
688 : {
689 : /* Setting array to highest element (according to high_compare) */
690 728 : skey->sk_flags |= SK_BT_MAXVAL;
691 : }
692 : }
693 :
694 : /*
695 : * _bt_array_decrement() -- decrement array scan key's sk_argument
696 : *
697 : * Return value indicates whether caller's array was successfully decremented.
698 : * Cannot decrement an array whose current element is already the first one.
699 : */
700 : static bool
701 912 : _bt_array_decrement(Relation rel, ScanKey skey, BTArrayKeyInfo *array)
702 : {
703 912 : bool uflow = false;
704 : Datum dec_sk_argument;
705 :
706 : Assert(skey->sk_flags & SK_SEARCHARRAY);
707 : Assert(!(skey->sk_flags & (SK_BT_MAXVAL | SK_BT_NEXT | SK_BT_PRIOR)));
708 :
709 : /* SAOP array? */
710 912 : if (array->num_elems != -1)
711 : {
712 : Assert(!(skey->sk_flags & (SK_BT_SKIP | SK_BT_MINVAL | SK_BT_MAXVAL)));
713 36 : if (array->cur_elem > 0)
714 : {
715 : /*
716 : * Just decrement current element, and assign its datum to skey
717 : * (only skip arrays need us to free existing sk_argument memory)
718 : */
719 6 : array->cur_elem--;
720 6 : skey->sk_argument = array->elem_values[array->cur_elem];
721 :
722 : /* Successfully decremented array */
723 6 : return true;
724 : }
725 :
726 : /* Cannot decrement to before first array element */
727 30 : return false;
728 : }
729 :
730 : /* Nope, this is a skip array */
731 : Assert(skey->sk_flags & SK_BT_SKIP);
732 :
733 : /*
734 : * The sentinel value that represents the minimum value within the range
735 : * of a skip array (often just -inf) is never decrementable
736 : */
737 876 : if (skey->sk_flags & SK_BT_MINVAL)
738 0 : return false;
739 :
740 : /*
741 : * When the current array element is NULL, and the lowest sorting value in
742 : * the index is also NULL, we cannot decrement before first array element
743 : */
744 876 : if ((skey->sk_flags & SK_ISNULL) && (skey->sk_flags & SK_BT_NULLS_FIRST))
745 0 : return false;
746 :
747 : /*
748 : * Opclasses without skip support "decrement" the scan key's current
749 : * element by setting the PRIOR flag. The true prior value is determined
750 : * by repositioning to the last index tuple < existing sk_argument/current
751 : * array element. Note that this works in the usual way when the scan key
752 : * is already marked ISNULL (i.e. when the current element is NULL).
753 : */
754 876 : if (!array->sksup)
755 : {
756 : /* Successfully "decremented" array */
757 12 : skey->sk_flags |= SK_BT_PRIOR;
758 12 : return true;
759 : }
760 :
761 : /*
762 : * Opclasses with skip support directly decrement sk_argument
763 : */
764 864 : if (skey->sk_flags & SK_ISNULL)
765 : {
766 : Assert(!(skey->sk_flags & SK_BT_NULLS_FIRST));
767 :
768 : /*
769 : * Existing sk_argument/array element is NULL (for an IS NULL qual).
770 : *
771 : * "Decrement" from NULL to the high_elem value provided by opclass
772 : * skip support routine.
773 : */
774 6 : skey->sk_flags &= ~(SK_SEARCHNULL | SK_ISNULL);
775 12 : skey->sk_argument = datumCopy(array->sksup->high_elem,
776 6 : array->attbyval, array->attlen);
777 6 : return true;
778 : }
779 :
780 : /*
781 : * Ask opclass support routine to provide decremented copy of existing
782 : * non-NULL sk_argument
783 : */
784 858 : dec_sk_argument = array->sksup->decrement(rel, skey->sk_argument, &uflow);
785 858 : if (unlikely(uflow))
786 : {
787 : /* dec_sk_argument has undefined value (so no pfree) */
788 0 : if (array->null_elem && (skey->sk_flags & SK_BT_NULLS_FIRST))
789 : {
790 0 : _bt_skiparray_set_isnull(rel, skey, array);
791 :
792 : /* Successfully "decremented" array to NULL */
793 0 : return true;
794 : }
795 :
796 : /* Cannot decrement to before first array element */
797 0 : return false;
798 : }
799 :
800 : /*
801 : * Successfully decremented sk_argument to a non-NULL value. Make sure
802 : * that the decremented value is still within the range of the array.
803 : */
804 858 : if (array->low_compare &&
805 12 : !DatumGetBool(FunctionCall2Coll(&array->low_compare->sk_func,
806 12 : array->low_compare->sk_collation,
807 : dec_sk_argument,
808 12 : array->low_compare->sk_argument)))
809 : {
810 : /* Keep existing sk_argument after all */
811 6 : if (!array->attbyval)
812 0 : pfree(DatumGetPointer(dec_sk_argument));
813 :
814 : /* Cannot decrement to before first array element */
815 6 : return false;
816 : }
817 :
818 : /* Accept value returned by opclass decrement callback */
819 852 : if (!array->attbyval && skey->sk_argument)
820 0 : pfree(DatumGetPointer(skey->sk_argument));
821 852 : skey->sk_argument = dec_sk_argument;
822 :
823 : /* Successfully decremented array */
824 852 : return true;
825 : }
826 :
827 : /*
828 : * _bt_array_increment() -- increment array scan key's sk_argument
829 : *
830 : * Return value indicates whether caller's array was successfully incremented.
831 : * Cannot increment an array whose current element is already the final one.
832 : */
833 : static bool
834 31300 : _bt_array_increment(Relation rel, ScanKey skey, BTArrayKeyInfo *array)
835 : {
836 31300 : bool oflow = false;
837 : Datum inc_sk_argument;
838 :
839 : Assert(skey->sk_flags & SK_SEARCHARRAY);
840 : Assert(!(skey->sk_flags & (SK_BT_MINVAL | SK_BT_NEXT | SK_BT_PRIOR)));
841 :
842 : /* SAOP array? */
843 31300 : if (array->num_elems != -1)
844 : {
845 : Assert(!(skey->sk_flags & (SK_BT_SKIP | SK_BT_MINVAL | SK_BT_MAXVAL)));
846 8058 : if (array->cur_elem < array->num_elems - 1)
847 : {
848 : /*
849 : * Just increment current element, and assign its datum to skey
850 : * (only skip arrays need us to free existing sk_argument memory)
851 : */
852 38 : array->cur_elem++;
853 38 : skey->sk_argument = array->elem_values[array->cur_elem];
854 :
855 : /* Successfully incremented array */
856 38 : return true;
857 : }
858 :
859 : /* Cannot increment past final array element */
860 8020 : return false;
861 : }
862 :
863 : /* Nope, this is a skip array */
864 : Assert(skey->sk_flags & SK_BT_SKIP);
865 :
866 : /*
867 : * The sentinel value that represents the maximum value within the range
868 : * of a skip array (often just +inf) is never incrementable
869 : */
870 23242 : if (skey->sk_flags & SK_BT_MAXVAL)
871 646 : return false;
872 :
873 : /*
874 : * When the current array element is NULL, and the highest sorting value
875 : * in the index is also NULL, we cannot increment past the final element
876 : */
877 22596 : if ((skey->sk_flags & SK_ISNULL) && !(skey->sk_flags & SK_BT_NULLS_FIRST))
878 436 : return false;
879 :
880 : /*
881 : * Opclasses without skip support "increment" the scan key's current
882 : * element by setting the NEXT flag. The true next value is determined by
883 : * repositioning to the first index tuple > existing sk_argument/current
884 : * array element. Note that this works in the usual way when the scan key
885 : * is already marked ISNULL (i.e. when the current element is NULL).
886 : */
887 22160 : if (!array->sksup)
888 : {
889 : /* Successfully "incremented" array */
890 14882 : skey->sk_flags |= SK_BT_NEXT;
891 14882 : return true;
892 : }
893 :
894 : /*
895 : * Opclasses with skip support directly increment sk_argument
896 : */
897 7278 : if (skey->sk_flags & SK_ISNULL)
898 : {
899 : Assert(skey->sk_flags & SK_BT_NULLS_FIRST);
900 :
901 : /*
902 : * Existing sk_argument/array element is NULL (for an IS NULL qual).
903 : *
904 : * "Increment" from NULL to the low_elem value provided by opclass
905 : * skip support routine.
906 : */
907 36 : skey->sk_flags &= ~(SK_SEARCHNULL | SK_ISNULL);
908 72 : skey->sk_argument = datumCopy(array->sksup->low_elem,
909 36 : array->attbyval, array->attlen);
910 36 : return true;
911 : }
912 :
913 : /*
914 : * Ask opclass support routine to provide incremented copy of existing
915 : * non-NULL sk_argument
916 : */
917 7242 : inc_sk_argument = array->sksup->increment(rel, skey->sk_argument, &oflow);
918 7242 : if (unlikely(oflow))
919 : {
920 : /* inc_sk_argument has undefined value (so no pfree) */
921 30 : if (array->null_elem && !(skey->sk_flags & SK_BT_NULLS_FIRST))
922 : {
923 12 : _bt_skiparray_set_isnull(rel, skey, array);
924 :
925 : /* Successfully "incremented" array to NULL */
926 12 : return true;
927 : }
928 :
929 : /* Cannot increment past final array element */
930 18 : return false;
931 : }
932 :
933 : /*
934 : * Successfully incremented sk_argument to a non-NULL value. Make sure
935 : * that the incremented value is still within the range of the array.
936 : */
937 7212 : if (array->high_compare &&
938 42 : !DatumGetBool(FunctionCall2Coll(&array->high_compare->sk_func,
939 42 : array->high_compare->sk_collation,
940 : inc_sk_argument,
941 42 : array->high_compare->sk_argument)))
942 : {
943 : /* Keep existing sk_argument after all */
944 12 : if (!array->attbyval)
945 0 : pfree(DatumGetPointer(inc_sk_argument));
946 :
947 : /* Cannot increment past final array element */
948 12 : return false;
949 : }
950 :
951 : /* Accept value returned by opclass increment callback */
952 7200 : if (!array->attbyval && skey->sk_argument)
953 0 : pfree(DatumGetPointer(skey->sk_argument));
954 7200 : skey->sk_argument = inc_sk_argument;
955 :
956 : /* Successfully incremented array */
957 7200 : return true;
958 : }
959 :
960 : /*
961 : * _bt_advance_array_keys_increment() -- Advance to next set of array elements
962 : *
963 : * Advances the array keys by a single increment in the current scan
964 : * direction. When there are multiple array keys this can roll over from the
965 : * lowest order array to higher order arrays.
966 : *
967 : * Returns true if there is another set of values to consider, false if not.
968 : * On true result, the scankeys are initialized with the next set of values.
969 : * On false result, the scankeys stay the same, and the array keys are not
970 : * advanced (every array remains at its final element for scan direction).
971 : */
972 : static bool
973 31070 : _bt_advance_array_keys_increment(IndexScanDesc scan, ScanDirection dir,
974 : bool *skip_array_set)
975 : {
976 31070 : Relation rel = scan->indexRelation;
977 31070 : BTScanOpaque so = (BTScanOpaque) scan->opaque;
978 :
979 : /*
980 : * We must advance the last array key most quickly, since it will
981 : * correspond to the lowest-order index column among the available
982 : * qualifications
983 : */
984 40238 : for (int i = so->numArrayKeys - 1; i >= 0; i--)
985 : {
986 32212 : BTArrayKeyInfo *array = &so->arrayKeys[i];
987 32212 : ScanKey skey = &so->keyData[array->scan_key];
988 :
989 32212 : if (array->num_elems == -1)
990 24118 : *skip_array_set = true;
991 :
992 32212 : if (ScanDirectionIsForward(dir))
993 : {
994 31300 : if (_bt_array_increment(rel, skey, array))
995 22168 : return true;
996 : }
997 : else
998 : {
999 912 : if (_bt_array_decrement(rel, skey, array))
1000 876 : return true;
1001 : }
1002 :
1003 : /*
1004 : * Couldn't increment (or decrement) array. Handle array roll over.
1005 : *
1006 : * Start over at the array's lowest sorting value (or its highest
1007 : * value, for backward scans)...
1008 : */
1009 9168 : _bt_array_set_low_or_high(rel, skey, array,
1010 : ScanDirectionIsForward(dir));
1011 :
1012 : /* ...then increment (or decrement) next most significant array */
1013 : }
1014 :
1015 : /*
1016 : * The array keys are now exhausted.
1017 : *
1018 : * Restore the array keys to the state they were in immediately before we
1019 : * were called. This ensures that the arrays only ever ratchet in the
1020 : * current scan direction.
1021 : *
1022 : * Without this, scans could overlook matching tuples when the scan
1023 : * direction gets reversed just before btgettuple runs out of items to
1024 : * return, but just after _bt_readpage prepares all the items from the
1025 : * scan's final page in so->currPos. When we're on the final page it is
1026 : * typical for so->currPos to get invalidated once btgettuple finally
1027 : * returns false, which'll effectively invalidate the scan's array keys.
1028 : * That hasn't happened yet, though -- and in general it may never happen.
1029 : */
1030 8026 : _bt_start_array_keys(scan, -dir);
1031 :
1032 8026 : return false;
1033 : }
1034 :
1035 : /*
1036 : * _bt_tuple_before_array_skeys() -- too early to advance required arrays?
1037 : *
1038 : * We always compare the tuple using the current array keys (which we assume
1039 : * are already set in so->keyData[]). readpagetup indicates if tuple is the
1040 : * scan's current _bt_readpage-wise tuple.
1041 : *
1042 : * readpagetup callers must only call here when _bt_check_compare already set
1043 : * continuescan=false. We help these callers deal with _bt_check_compare's
1044 : * inability to distinguish between the < and > cases (it uses equality
1045 : * operator scan keys, whereas we use 3-way ORDER procs). These callers pass
1046 : * a _bt_check_compare-set sktrig value that indicates which scan key
1047 : * triggered the call (!readpagetup callers just pass us sktrig=0 instead).
1048 : * This information allows us to avoid wastefully checking earlier scan keys
1049 : * that were already deemed to have been satisfied inside _bt_check_compare.
1050 : *
1051 : * Returns false when caller's tuple is >= the current required equality scan
1052 : * keys (or <=, in the case of backwards scans). This happens to readpagetup
1053 : * callers when the scan has reached the point of needing its array keys
1054 : * advanced; caller will need to advance required and non-required arrays at
1055 : * scan key offsets >= sktrig, plus scan keys < sktrig iff sktrig rolls over.
1056 : * (When we return false to readpagetup callers, tuple can only be == current
1057 : * required equality scan keys when caller's sktrig indicates that the arrays
1058 : * need to be advanced due to an unsatisfied required inequality key trigger.)
1059 : *
1060 : * Returns true when caller passes a tuple that is < the current set of
1061 : * equality keys for the most significant non-equal required scan key/column
1062 : * (or > the keys, during backwards scans). This happens to readpagetup
1063 : * callers when tuple is still before the start of matches for the scan's
1064 : * required equality strategy scan keys. (sktrig can't have indicated that an
1065 : * inequality strategy scan key wasn't satisfied in _bt_check_compare when we
1066 : * return true. In fact, we automatically return false when passed such an
1067 : * inequality sktrig by readpagetup callers -- _bt_check_compare's initial
1068 : * continuescan=false doesn't really need to be confirmed here by us.)
1069 : *
1070 : * !readpagetup callers optionally pass us *scanBehind, which tracks whether
1071 : * any missing truncated attributes might have affected array advancement
1072 : * (compared to what would happen if it was shown the first non-pivot tuple on
1073 : * the page to the right of caller's finaltup/high key tuple instead). It's
1074 : * only possible that we'll set *scanBehind to true when caller passes us a
1075 : * pivot tuple (with truncated -inf attributes) that we return false for.
1076 : */
1077 : static bool
1078 327478 : _bt_tuple_before_array_skeys(IndexScanDesc scan, ScanDirection dir,
1079 : IndexTuple tuple, TupleDesc tupdesc, int tupnatts,
1080 : bool readpagetup, int sktrig, bool *scanBehind)
1081 : {
1082 327478 : BTScanOpaque so = (BTScanOpaque) scan->opaque;
1083 :
1084 : Assert(so->numArrayKeys);
1085 : Assert(so->numberOfKeys);
1086 : Assert(sktrig == 0 || readpagetup);
1087 : Assert(!readpagetup || scanBehind == NULL);
1088 :
1089 327478 : if (scanBehind)
1090 85590 : *scanBehind = false;
1091 :
1092 330460 : for (int ikey = sktrig; ikey < so->numberOfKeys; ikey++)
1093 : {
1094 330014 : ScanKey cur = so->keyData + ikey;
1095 : Datum tupdatum;
1096 : bool tupnull;
1097 : int32 result;
1098 :
1099 : /* readpagetup calls require one ORDER proc comparison (at most) */
1100 : Assert(!readpagetup || ikey == sktrig);
1101 :
1102 : /*
1103 : * Once we reach a non-required scan key, we're completely done.
1104 : *
1105 : * Note: we deliberately don't consider the scan direction here.
1106 : * _bt_advance_array_keys caller requires that we track *scanBehind
1107 : * without concern for scan direction.
1108 : */
1109 330014 : if ((cur->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)) == 0)
1110 : {
1111 : Assert(!readpagetup);
1112 : Assert(ikey > sktrig || ikey == 0);
1113 327032 : return false;
1114 : }
1115 :
1116 330014 : if (cur->sk_attno > tupnatts)
1117 : {
1118 : Assert(!readpagetup);
1119 :
1120 : /*
1121 : * When we reach a high key's truncated attribute, assume that the
1122 : * tuple attribute's value is >= the scan's equality constraint
1123 : * scan keys (but set *scanBehind to let interested callers know
1124 : * that a truncated attribute might have affected our answer).
1125 : */
1126 26 : if (scanBehind)
1127 26 : *scanBehind = true;
1128 :
1129 26 : return false;
1130 : }
1131 :
1132 : /*
1133 : * Deal with inequality strategy scan keys that _bt_check_compare set
1134 : * continuescan=false for
1135 : */
1136 329988 : if (cur->sk_strategy != BTEqualStrategyNumber)
1137 : {
1138 : /*
1139 : * When _bt_check_compare indicated that a required inequality
1140 : * scan key wasn't satisfied, there's no need to verify anything;
1141 : * caller always calls _bt_advance_array_keys with this sktrig.
1142 : */
1143 620 : if (readpagetup)
1144 348 : return false;
1145 :
1146 : /*
1147 : * Otherwise we can't give up, since we must check all required
1148 : * scan keys (required in either direction) in order to correctly
1149 : * track *scanBehind for caller
1150 : */
1151 272 : continue;
1152 : }
1153 :
1154 329368 : tupdatum = index_getattr(tuple, cur->sk_attno, tupdesc, &tupnull);
1155 :
1156 329368 : if (likely(!(cur->sk_flags & (SK_BT_MINVAL | SK_BT_MAXVAL))))
1157 : {
1158 : /* Scankey has a valid/comparable sk_argument value */
1159 323770 : result = _bt_compare_array_skey(&so->orderProcs[ikey],
1160 : tupdatum, tupnull,
1161 : cur->sk_argument, cur);
1162 :
1163 323770 : if (result == 0)
1164 : {
1165 : /*
1166 : * Interpret result in a way that takes NEXT/PRIOR into
1167 : * account
1168 : */
1169 16656 : if (cur->sk_flags & SK_BT_NEXT)
1170 13916 : result = -1;
1171 2740 : else if (cur->sk_flags & SK_BT_PRIOR)
1172 30 : result = 1;
1173 :
1174 : Assert(result == 0 || (cur->sk_flags & SK_BT_SKIP));
1175 : }
1176 : }
1177 : else
1178 : {
1179 5598 : BTArrayKeyInfo *array = NULL;
1180 :
1181 : /*
1182 : * Current array element/array = scan key value is a sentinel
1183 : * value that represents the lowest (or highest) possible value
1184 : * that's still within the range of the array.
1185 : *
1186 : * Like _bt_first, we only see MINVAL keys during forwards scans
1187 : * (and similarly only see MAXVAL keys during backwards scans).
1188 : * Even if the scan's direction changes, we'll stop at some higher
1189 : * order key before we can ever reach any MAXVAL (or MINVAL) keys.
1190 : * (However, unlike _bt_first we _can_ get to keys marked either
1191 : * NEXT or PRIOR, regardless of the scan's current direction.)
1192 : */
1193 : Assert(ScanDirectionIsForward(dir) ?
1194 : !(cur->sk_flags & SK_BT_MAXVAL) :
1195 : !(cur->sk_flags & SK_BT_MINVAL));
1196 :
1197 : /*
1198 : * There are no valid sk_argument values in MINVAL/MAXVAL keys.
1199 : * Check if tupdatum is within the range of skip array instead.
1200 : */
1201 6124 : for (int arrayidx = 0; arrayidx < so->numArrayKeys; arrayidx++)
1202 : {
1203 6124 : array = &so->arrayKeys[arrayidx];
1204 6124 : if (array->scan_key == ikey)
1205 5598 : break;
1206 : }
1207 :
1208 5598 : _bt_binsrch_skiparray_skey(false, dir, tupdatum, tupnull,
1209 : array, cur, &result);
1210 :
1211 5598 : if (result == 0)
1212 : {
1213 : /*
1214 : * tupdatum satisfies both low_compare and high_compare, so
1215 : * it's time to advance the array keys.
1216 : *
1217 : * Note: It's possible that the skip array will "advance" from
1218 : * its MINVAL (or MAXVAL) representation to an alternative,
1219 : * logically equivalent representation of the same value: a
1220 : * representation where the = key gets a valid datum in its
1221 : * sk_argument. This is only possible when low_compare uses
1222 : * the >= strategy (or high_compare uses the <= strategy).
1223 : */
1224 5586 : return false;
1225 : }
1226 : }
1227 :
1228 : /*
1229 : * Does this comparison indicate that caller must _not_ advance the
1230 : * scan's arrays just yet?
1231 : */
1232 323782 : if ((ScanDirectionIsForward(dir) && result < 0) ||
1233 3252 : (ScanDirectionIsBackward(dir) && result > 0))
1234 59872 : return true;
1235 :
1236 : /*
1237 : * Does this comparison indicate that caller should now advance the
1238 : * scan's arrays? (Must be if we get here during a readpagetup call.)
1239 : */
1240 263910 : if (readpagetup || result != 0)
1241 : {
1242 : Assert(result != 0);
1243 261200 : return false;
1244 : }
1245 :
1246 : /*
1247 : * Inconclusive -- need to check later scan keys, too.
1248 : *
1249 : * This must be a finaltup precheck, or a call made from an assertion.
1250 : */
1251 : Assert(result == 0);
1252 : }
1253 :
1254 : Assert(!readpagetup);
1255 :
1256 446 : return false;
1257 : }
1258 :
1259 : /*
1260 : * _bt_start_prim_scan() -- start scheduled primitive index scan?
1261 : *
1262 : * Returns true if _bt_checkkeys scheduled another primitive index scan, just
1263 : * as the last one ended. Otherwise returns false, indicating that the array
1264 : * keys are now fully exhausted.
1265 : *
1266 : * Only call here during scans with one or more equality type array scan keys,
1267 : * after _bt_first or _bt_next return false.
1268 : */
1269 : bool
1270 88434 : _bt_start_prim_scan(IndexScanDesc scan, ScanDirection dir)
1271 : {
1272 88434 : BTScanOpaque so = (BTScanOpaque) scan->opaque;
1273 :
1274 : Assert(so->numArrayKeys);
1275 :
1276 88434 : so->scanBehind = so->oppositeDirCheck = false; /* reset */
1277 :
1278 : /*
1279 : * Array keys are advanced within _bt_checkkeys when the scan reaches the
1280 : * leaf level (more precisely, they're advanced when the scan reaches the
1281 : * end of each distinct set of array elements). This process avoids
1282 : * repeat access to leaf pages (across multiple primitive index scans) by
1283 : * advancing the scan's array keys when it allows the primitive index scan
1284 : * to find nearby matching tuples (or when it eliminates ranges of array
1285 : * key space that can't possibly be satisfied by any index tuple).
1286 : *
1287 : * _bt_checkkeys sets a simple flag variable to schedule another primitive
1288 : * index scan. The flag tells us what to do.
1289 : *
1290 : * We cannot rely on _bt_first always reaching _bt_checkkeys. There are
1291 : * various cases where that won't happen. For example, if the index is
1292 : * completely empty, then _bt_first won't call _bt_readpage/_bt_checkkeys.
1293 : * We also don't expect a call to _bt_checkkeys during searches for a
1294 : * non-existent value that happens to be lower/higher than any existing
1295 : * value in the index.
1296 : *
1297 : * We don't require special handling for these cases -- we don't need to
1298 : * be explicitly instructed to _not_ perform another primitive index scan.
1299 : * It's up to code under the control of _bt_first to always set the flag
1300 : * when another primitive index scan will be required.
1301 : *
1302 : * This works correctly, even with the tricky cases listed above, which
1303 : * all involve access to leaf pages "near the boundaries of the key space"
1304 : * (whether it's from a leftmost/rightmost page, or an imaginary empty
1305 : * leaf root page). If _bt_checkkeys cannot be reached by a primitive
1306 : * index scan for one set of array keys, then it also won't be reached for
1307 : * any later set ("later" in terms of the direction that we scan the index
1308 : * and advance the arrays). The array keys won't have advanced in these
1309 : * cases, but that's the correct behavior (even _bt_advance_array_keys
1310 : * won't always advance the arrays at the point they become "exhausted").
1311 : */
1312 88434 : if (so->needPrimScan)
1313 : {
1314 : /*
1315 : * Flag was set -- must call _bt_first again, which will reset the
1316 : * scan's needPrimScan flag
1317 : */
1318 17530 : return true;
1319 : }
1320 :
1321 : /* The top-level index scan ran out of tuples in this scan direction */
1322 70904 : if (scan->parallel_scan != NULL)
1323 30 : _bt_parallel_done(scan);
1324 :
1325 70904 : return false;
1326 : }
1327 :
1328 : /*
1329 : * _bt_advance_array_keys() -- Advance array elements using a tuple
1330 : *
1331 : * The scan always gets a new qual as a consequence of calling here (except
1332 : * when we determine that the top-level scan has run out of matching tuples).
1333 : * All later _bt_check_compare calls also use the same new qual that was first
1334 : * used here (at least until the next call here advances the keys once again).
1335 : * It's convenient to structure _bt_check_compare rechecks of caller's tuple
1336 : * (using the new qual) as one the steps of advancing the scan's array keys,
1337 : * so this function works as a wrapper around _bt_check_compare.
1338 : *
1339 : * Like _bt_check_compare, we'll set pstate.continuescan on behalf of the
1340 : * caller, and return a boolean indicating if caller's tuple satisfies the
1341 : * scan's new qual. But unlike _bt_check_compare, we set so->needPrimScan
1342 : * when we set continuescan=false, indicating if a new primitive index scan
1343 : * has been scheduled (otherwise, the top-level scan has run out of tuples in
1344 : * the current scan direction).
1345 : *
1346 : * Caller must use _bt_tuple_before_array_skeys to determine if the current
1347 : * place in the scan is >= the current array keys _before_ calling here.
1348 : * We're responsible for ensuring that caller's tuple is <= the newly advanced
1349 : * required array keys once we return. We try to find an exact match, but
1350 : * failing that we'll advance the array keys to whatever set of array elements
1351 : * comes next in the key space for the current scan direction. Required array
1352 : * keys "ratchet forwards" (or backwards). They can only advance as the scan
1353 : * itself advances through the index/key space.
1354 : *
1355 : * (The rules are the same for backwards scans, except that the operators are
1356 : * flipped: just replace the precondition's >= operator with a <=, and the
1357 : * postcondition's <= operator with a >=. In other words, just swap the
1358 : * precondition with the postcondition.)
1359 : *
1360 : * We also deal with "advancing" non-required arrays here (or arrays that are
1361 : * treated as non-required for the duration of a _bt_readpage call). Callers
1362 : * whose sktrig scan key is non-required specify sktrig_required=false. These
1363 : * calls are the only exception to the general rule about always advancing the
1364 : * required array keys (the scan may not even have a required array). These
1365 : * callers should just pass a NULL pstate (since there is never any question
1366 : * of stopping the scan). No call to _bt_tuple_before_array_skeys is required
1367 : * ahead of these calls (it's already clear that any required scan keys must
1368 : * be satisfied by caller's tuple).
1369 : *
1370 : * Note that we deal with non-array required equality strategy scan keys as
1371 : * degenerate single element arrays here. Obviously, they can never really
1372 : * advance in the way that real arrays can, but they must still affect how we
1373 : * advance real array scan keys (exactly like true array equality scan keys).
1374 : * We have to keep around a 3-way ORDER proc for these (using the "=" operator
1375 : * won't do), since in general whether the tuple is < or > _any_ unsatisfied
1376 : * required equality key influences how the scan's real arrays must advance.
1377 : *
1378 : * Note also that we may sometimes need to advance the array keys when the
1379 : * existing required array keys (and other required equality keys) are already
1380 : * an exact match for every corresponding value from caller's tuple. We must
1381 : * do this for inequalities that _bt_check_compare set continuescan=false for.
1382 : * They'll advance the array keys here, just like any other scan key that
1383 : * _bt_check_compare stops on. (This can even happen _after_ we advance the
1384 : * array keys, in which case we'll advance the array keys a second time. That
1385 : * way _bt_checkkeys caller always has its required arrays advance to the
1386 : * maximum possible extent that its tuple will allow.)
1387 : */
1388 : static bool
1389 202326 : _bt_advance_array_keys(IndexScanDesc scan, BTReadPageState *pstate,
1390 : IndexTuple tuple, int tupnatts, TupleDesc tupdesc,
1391 : int sktrig, bool sktrig_required)
1392 : {
1393 202326 : BTScanOpaque so = (BTScanOpaque) scan->opaque;
1394 202326 : Relation rel = scan->indexRelation;
1395 202326 : ScanDirection dir = so->currPos.dir;
1396 202326 : int arrayidx = 0;
1397 202326 : bool beyond_end_advance = false,
1398 202326 : skip_array_advanced = false,
1399 202326 : has_required_opposite_direction_only = false,
1400 202326 : all_required_satisfied = true,
1401 202326 : all_satisfied = true;
1402 :
1403 : Assert(!so->needPrimScan && !so->scanBehind && !so->oppositeDirCheck);
1404 : Assert(_bt_verify_keys_with_arraykeys(scan));
1405 :
1406 202326 : if (sktrig_required)
1407 : {
1408 : /*
1409 : * Precondition array state assertion
1410 : */
1411 : Assert(!_bt_tuple_before_array_skeys(scan, dir, tuple, tupdesc,
1412 : tupnatts, false, 0, NULL));
1413 :
1414 : /*
1415 : * Once we return we'll have a new set of required array keys, so
1416 : * reset state used by "look ahead" optimization
1417 : */
1418 193184 : pstate->rechecks = 0;
1419 193184 : pstate->targetdistance = 0;
1420 : }
1421 9142 : else if (sktrig < so->numberOfKeys - 1 &&
1422 9142 : !(so->keyData[so->numberOfKeys - 1].sk_flags & SK_SEARCHARRAY))
1423 : {
1424 9142 : int least_sign_ikey = so->numberOfKeys - 1;
1425 : bool continuescan;
1426 :
1427 : /*
1428 : * Optimization: perform a precheck of the least significant key
1429 : * during !sktrig_required calls when it isn't already our sktrig
1430 : * (provided the precheck key is not itself an array).
1431 : *
1432 : * When the precheck works out we'll avoid an expensive binary search
1433 : * of sktrig's array (plus any other arrays before least_sign_ikey).
1434 : */
1435 : Assert(so->keyData[sktrig].sk_flags & SK_SEARCHARRAY);
1436 9142 : if (!_bt_check_compare(scan, dir, tuple, tupnatts, tupdesc, false,
1437 : false, &continuescan,
1438 : &least_sign_ikey))
1439 2840 : return false;
1440 : }
1441 :
1442 586330 : for (int ikey = 0; ikey < so->numberOfKeys; ikey++)
1443 : {
1444 392610 : ScanKey cur = so->keyData + ikey;
1445 392610 : BTArrayKeyInfo *array = NULL;
1446 : Datum tupdatum;
1447 392610 : bool required = false,
1448 392610 : required_opposite_direction_only = false,
1449 : tupnull;
1450 : int32 result;
1451 392610 : int set_elem = 0;
1452 :
1453 392610 : if (cur->sk_strategy == BTEqualStrategyNumber)
1454 : {
1455 : /* Manage array state */
1456 344218 : if (cur->sk_flags & SK_SEARCHARRAY)
1457 : {
1458 209824 : array = &so->arrayKeys[arrayidx++];
1459 : Assert(array->scan_key == ikey);
1460 : }
1461 : }
1462 : else
1463 : {
1464 : /*
1465 : * Are any inequalities required in the opposite direction only
1466 : * present here?
1467 : */
1468 48392 : if (((ScanDirectionIsForward(dir) &&
1469 48392 : (cur->sk_flags & (SK_BT_REQBKWD))) ||
1470 0 : (ScanDirectionIsBackward(dir) &&
1471 0 : (cur->sk_flags & (SK_BT_REQFWD)))))
1472 15848 : has_required_opposite_direction_only =
1473 15848 : required_opposite_direction_only = true;
1474 : }
1475 :
1476 : /* Optimization: skip over known-satisfied scan keys */
1477 392610 : if (ikey < sktrig)
1478 76456 : continue;
1479 :
1480 376260 : if (cur->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD))
1481 : {
1482 376260 : required = true;
1483 :
1484 376260 : if (cur->sk_attno > tupnatts)
1485 : {
1486 : /* Set this just like _bt_tuple_before_array_skeys */
1487 : Assert(sktrig < ikey);
1488 2460 : so->scanBehind = true;
1489 : }
1490 : }
1491 :
1492 : /*
1493 : * Handle a required non-array scan key that the initial call to
1494 : * _bt_check_compare indicated triggered array advancement, if any.
1495 : *
1496 : * The non-array scan key's strategy will be <, <=, or = during a
1497 : * forwards scan (or any one of =, >=, or > during a backwards scan).
1498 : * It follows that the corresponding tuple attribute's value must now
1499 : * be either > or >= the scan key value (for backwards scans it must
1500 : * be either < or <= that value).
1501 : *
1502 : * If this is a required equality strategy scan key, this is just an
1503 : * optimization; _bt_tuple_before_array_skeys already confirmed that
1504 : * this scan key places us ahead of caller's tuple. There's no need
1505 : * to repeat that work now. (The same underlying principle also gets
1506 : * applied by the cur_elem_trig optimization used to speed up searches
1507 : * for the next array element.)
1508 : *
1509 : * If this is a required inequality strategy scan key, we _must_ rely
1510 : * on _bt_check_compare like this; we aren't capable of directly
1511 : * evaluating required inequality strategy scan keys here, on our own.
1512 : */
1513 376260 : if (ikey == sktrig && !array)
1514 : {
1515 : Assert(sktrig_required && required && all_required_satisfied);
1516 :
1517 : /* Use "beyond end" advancement. See below for an explanation. */
1518 7422 : beyond_end_advance = true;
1519 7422 : all_satisfied = all_required_satisfied = false;
1520 :
1521 7422 : continue;
1522 : }
1523 :
1524 : /*
1525 : * Nothing more for us to do with an inequality strategy scan key that
1526 : * wasn't the one that _bt_check_compare stopped on, though.
1527 : *
1528 : * Note: if our later call to _bt_check_compare (to recheck caller's
1529 : * tuple) sets continuescan=false due to finding this same inequality
1530 : * unsatisfied (possible when it's required in the scan direction),
1531 : * we'll deal with it via a recursive "second pass" call.
1532 : */
1533 368838 : else if (cur->sk_strategy != BTEqualStrategyNumber)
1534 47822 : continue;
1535 :
1536 : /*
1537 : * Nothing for us to do with an equality strategy scan key that isn't
1538 : * marked required, either -- unless it's a non-required array
1539 : */
1540 321016 : else if (!required && !array)
1541 0 : continue;
1542 :
1543 : /*
1544 : * Here we perform steps for all array scan keys after a required
1545 : * array scan key whose binary search triggered "beyond end of array
1546 : * element" array advancement due to encountering a tuple attribute
1547 : * value > the closest matching array key (or < for backwards scans).
1548 : */
1549 321016 : if (beyond_end_advance)
1550 : {
1551 1416 : if (array)
1552 594 : _bt_array_set_low_or_high(rel, cur, array,
1553 : ScanDirectionIsBackward(dir));
1554 :
1555 1416 : continue;
1556 : }
1557 :
1558 : /*
1559 : * Here we perform steps for all array scan keys after a required
1560 : * array scan key whose tuple attribute was < the closest matching
1561 : * array key when we dealt with it (or > for backwards scans).
1562 : *
1563 : * This earlier required array key already puts us ahead of caller's
1564 : * tuple in the key space (for the current scan direction). We must
1565 : * make sure that subsequent lower-order array keys do not put us too
1566 : * far ahead (ahead of tuples that have yet to be seen by our caller).
1567 : * For example, when a tuple "(a, b) = (42, 5)" advances the array
1568 : * keys on "a" from 40 to 45, we must also set "b" to whatever the
1569 : * first array element for "b" is. It would be wrong to allow "b" to
1570 : * be set based on the tuple value.
1571 : *
1572 : * Perform the same steps with truncated high key attributes. You can
1573 : * think of this as a "binary search" for the element closest to the
1574 : * value -inf. Again, the arrays must never get ahead of the scan.
1575 : */
1576 319600 : if (!all_required_satisfied || cur->sk_attno > tupnatts)
1577 : {
1578 3446 : if (array)
1579 788 : _bt_array_set_low_or_high(rel, cur, array,
1580 : ScanDirectionIsForward(dir));
1581 :
1582 3446 : continue;
1583 : }
1584 :
1585 : /*
1586 : * Search in scankey's array for the corresponding tuple attribute
1587 : * value from caller's tuple
1588 : */
1589 316154 : tupdatum = index_getattr(tuple, cur->sk_attno, tupdesc, &tupnull);
1590 :
1591 316154 : if (array)
1592 : {
1593 193868 : bool cur_elem_trig = (sktrig_required && ikey == sktrig);
1594 :
1595 : /*
1596 : * "Binary search" by checking if tupdatum/tupnull are within the
1597 : * range of the skip array
1598 : */
1599 193868 : if (array->num_elems == -1)
1600 162726 : _bt_binsrch_skiparray_skey(cur_elem_trig, dir,
1601 : tupdatum, tupnull, array, cur,
1602 : &result);
1603 :
1604 : /*
1605 : * Binary search for the closest match from the SAOP array
1606 : */
1607 : else
1608 31142 : set_elem = _bt_binsrch_array_skey(&so->orderProcs[ikey],
1609 : cur_elem_trig, dir,
1610 : tupdatum, tupnull, array, cur,
1611 : &result);
1612 : }
1613 : else
1614 : {
1615 : Assert(required);
1616 :
1617 : /*
1618 : * This is a required non-array equality strategy scan key, which
1619 : * we'll treat as a degenerate single element array.
1620 : *
1621 : * This scan key's imaginary "array" can't really advance, but it
1622 : * can still roll over like any other array. (Actually, this is
1623 : * no different to real single value arrays, which never advance
1624 : * without rolling over -- they can never truly advance, either.)
1625 : */
1626 122286 : result = _bt_compare_array_skey(&so->orderProcs[ikey],
1627 : tupdatum, tupnull,
1628 : cur->sk_argument, cur);
1629 : }
1630 :
1631 : /*
1632 : * Consider "beyond end of array element" array advancement.
1633 : *
1634 : * When the tuple attribute value is > the closest matching array key
1635 : * (or < in the backwards scan case), we need to ratchet this array
1636 : * forward (backward) by one increment, so that caller's tuple ends up
1637 : * being < final array value instead (or > final array value instead).
1638 : * This process has to work for all of the arrays, not just this one:
1639 : * it must "carry" to higher-order arrays when the set_elem that we
1640 : * just found happens to be the final one for the scan's direction.
1641 : * Incrementing (decrementing) set_elem itself isn't good enough.
1642 : *
1643 : * Our approach is to provisionally use set_elem as if it was an exact
1644 : * match now, then set each later/less significant array to whatever
1645 : * its final element is. Once outside the loop we'll then "increment
1646 : * this array's set_elem" by calling _bt_advance_array_keys_increment.
1647 : * That way the process rolls over to higher order arrays as needed.
1648 : *
1649 : * Under this scheme any required arrays only ever ratchet forwards
1650 : * (or backwards), and always do so to the maximum possible extent
1651 : * that we can know will be safe without seeing the scan's next tuple.
1652 : * We don't need any special handling for required scan keys that lack
1653 : * a real array to advance, nor for redundant scan keys that couldn't
1654 : * be eliminated by _bt_preprocess_keys. It won't matter if some of
1655 : * our "true" array scan keys (or even all of them) are non-required.
1656 : */
1657 316154 : if (sktrig_required && required &&
1658 309838 : ((ScanDirectionIsForward(dir) && result > 0) ||
1659 1716 : (ScanDirectionIsBackward(dir) && result < 0)))
1660 23648 : beyond_end_advance = true;
1661 :
1662 : Assert(all_required_satisfied && all_satisfied);
1663 316154 : if (result != 0)
1664 : {
1665 : /*
1666 : * Track whether caller's tuple satisfies our new post-advancement
1667 : * qual, for required scan keys, as well as for the entire set of
1668 : * interesting scan keys (all required scan keys plus non-required
1669 : * array scan keys are considered interesting.)
1670 : */
1671 144626 : all_satisfied = false;
1672 144626 : if (sktrig_required && required)
1673 138860 : all_required_satisfied = false;
1674 : else
1675 : {
1676 : /*
1677 : * There's no need to advance the arrays using the best
1678 : * available match for a non-required array. Give up now.
1679 : * (Though note that sktrig_required calls still have to do
1680 : * all the usual post-advancement steps, including the recheck
1681 : * call to _bt_check_compare.)
1682 : */
1683 : break;
1684 : }
1685 : }
1686 :
1687 : /* Advance array keys, even when we don't have an exact match */
1688 310388 : if (array)
1689 : {
1690 188102 : if (array->num_elems == -1)
1691 : {
1692 : /* Skip array's new element is tupdatum (or MINVAL/MAXVAL) */
1693 156960 : _bt_skiparray_set_element(rel, cur, array, result,
1694 : tupdatum, tupnull);
1695 156960 : skip_array_advanced = true;
1696 : }
1697 31142 : else if (array->cur_elem != set_elem)
1698 : {
1699 : /* SAOP array's new element is set_elem datum */
1700 23268 : array->cur_elem = set_elem;
1701 23268 : cur->sk_argument = array->elem_values[set_elem];
1702 : }
1703 : }
1704 : }
1705 :
1706 : /*
1707 : * Advance the array keys incrementally whenever "beyond end of array
1708 : * element" array advancement happens, so that advancement will carry to
1709 : * higher-order arrays (might exhaust all the scan's arrays instead, which
1710 : * ends the top-level scan).
1711 : */
1712 199486 : if (beyond_end_advance &&
1713 31070 : !_bt_advance_array_keys_increment(scan, dir, &skip_array_advanced))
1714 8026 : goto end_toplevel_scan;
1715 :
1716 : Assert(_bt_verify_keys_with_arraykeys(scan));
1717 :
1718 : /*
1719 : * Maintain a page-level count of the number of times the scan's array
1720 : * keys advanced in a way that affected at least one skip array
1721 : */
1722 191460 : if (sktrig_required && skip_array_advanced)
1723 162954 : pstate->nskipadvances++;
1724 :
1725 : /*
1726 : * Does tuple now satisfy our new qual? Recheck with _bt_check_compare.
1727 : *
1728 : * Calls triggered by an unsatisfied required scan key, whose tuple now
1729 : * satisfies all required scan keys, but not all nonrequired array keys,
1730 : * will still require a recheck call to _bt_check_compare. They'll still
1731 : * need its "second pass" handling of required inequality scan keys.
1732 : * (Might have missed a still-unsatisfied required inequality scan key
1733 : * that caller didn't detect as the sktrig scan key during its initial
1734 : * _bt_check_compare call that used the old/original qual.)
1735 : *
1736 : * Calls triggered by an unsatisfied nonrequired array scan key never need
1737 : * "second pass" handling of required inequalities (nor any other handling
1738 : * of any required scan key). All that matters is whether caller's tuple
1739 : * satisfies the new qual, so it's safe to just skip the _bt_check_compare
1740 : * recheck when we've already determined that it can only return 'false'.
1741 : *
1742 : * Note: In practice most scan keys are marked required by preprocessing,
1743 : * if necessary by generating a preceding skip array. We nevertheless
1744 : * often handle array keys marked required as if they were nonrequired.
1745 : * This behavior is requested by our _bt_check_compare caller, though only
1746 : * when it is passed "forcenonrequired=true" by _bt_checkkeys.
1747 : */
1748 191460 : if ((sktrig_required && all_required_satisfied) ||
1749 144558 : (!sktrig_required && all_satisfied))
1750 : {
1751 47438 : int nsktrig = sktrig + 1;
1752 : bool continuescan;
1753 :
1754 : Assert(all_required_satisfied);
1755 :
1756 : /* Recheck _bt_check_compare on behalf of caller */
1757 47438 : if (_bt_check_compare(scan, dir, tuple, tupnatts, tupdesc, false,
1758 47438 : !sktrig_required, &continuescan,
1759 47438 : &nsktrig) &&
1760 39674 : !so->scanBehind)
1761 : {
1762 : /* This tuple satisfies the new qual */
1763 : Assert(all_satisfied && continuescan);
1764 :
1765 37316 : if (pstate)
1766 36780 : pstate->continuescan = true;
1767 :
1768 37538 : return true;
1769 : }
1770 :
1771 : /*
1772 : * Consider "second pass" handling of required inequalities.
1773 : *
1774 : * It's possible that our _bt_check_compare call indicated that the
1775 : * scan should end due to some unsatisfied inequality that wasn't
1776 : * initially recognized as such by us. Handle this by calling
1777 : * ourselves recursively, this time indicating that the trigger is the
1778 : * inequality that we missed first time around (and using a set of
1779 : * required array/equality keys that are now exact matches for tuple).
1780 : *
1781 : * We make a strong, general guarantee that every _bt_checkkeys call
1782 : * here will advance the array keys to the maximum possible extent
1783 : * that we can know to be safe based on caller's tuple alone. If we
1784 : * didn't perform this step, then that guarantee wouldn't quite hold.
1785 : */
1786 10122 : if (unlikely(!continuescan))
1787 : {
1788 : bool satisfied PG_USED_FOR_ASSERTS_ONLY;
1789 :
1790 : Assert(sktrig_required);
1791 : Assert(so->keyData[nsktrig].sk_strategy != BTEqualStrategyNumber);
1792 :
1793 : /*
1794 : * The tuple must use "beyond end" advancement during the
1795 : * recursive call, so we cannot possibly end up back here when
1796 : * recursing. We'll consume a small, fixed amount of stack space.
1797 : */
1798 : Assert(!beyond_end_advance);
1799 :
1800 : /* Advance the array keys a second time using same tuple */
1801 222 : satisfied = _bt_advance_array_keys(scan, pstate, tuple, tupnatts,
1802 : tupdesc, nsktrig, true);
1803 :
1804 : /* This tuple doesn't satisfy the inequality */
1805 : Assert(!satisfied);
1806 222 : return false;
1807 : }
1808 :
1809 : /*
1810 : * Some non-required scan key (from new qual) still not satisfied.
1811 : *
1812 : * All scan keys required in the current scan direction must still be
1813 : * satisfied, though, so we can trust all_required_satisfied below.
1814 : */
1815 : }
1816 :
1817 : /*
1818 : * When we were called just to deal with "advancing" non-required arrays,
1819 : * this is as far as we can go (cannot stop the scan for these callers)
1820 : */
1821 153922 : if (!sktrig_required)
1822 : {
1823 : /* Caller's tuple doesn't match any qual */
1824 5766 : return false;
1825 : }
1826 :
1827 : /*
1828 : * Postcondition array state assertion (for still-unsatisfied tuples).
1829 : *
1830 : * By here we have established that the scan's required arrays (scan must
1831 : * have at least one required array) advanced, without becoming exhausted.
1832 : *
1833 : * Caller's tuple is now < the newly advanced array keys (or > when this
1834 : * is a backwards scan), except in the case where we only got this far due
1835 : * to an unsatisfied non-required scan key. Verify that with an assert.
1836 : *
1837 : * Note: we don't just quit at this point when all required scan keys were
1838 : * found to be satisfied because we need to consider edge-cases involving
1839 : * scan keys required in the opposite direction only; those aren't tracked
1840 : * by all_required_satisfied.
1841 : */
1842 : Assert(_bt_tuple_before_array_skeys(scan, dir, tuple, tupdesc, tupnatts,
1843 : false, 0, NULL) ==
1844 : !all_required_satisfied);
1845 :
1846 : /*
1847 : * We generally permit primitive index scans to continue onto the next
1848 : * sibling page when the page's finaltup satisfies all required scan keys
1849 : * at the point where we're between pages.
1850 : *
1851 : * If caller's tuple is also the page's finaltup, and we see that required
1852 : * scan keys still aren't satisfied, start a new primitive index scan.
1853 : */
1854 148156 : if (!all_required_satisfied && pstate->finaltup == tuple)
1855 516 : goto new_prim_scan;
1856 :
1857 : /*
1858 : * Proactively check finaltup (don't wait until finaltup is reached by the
1859 : * scan) when it might well turn out to not be satisfied later on.
1860 : *
1861 : * Note: if so->scanBehind hasn't already been set for finaltup by us,
1862 : * it'll be set during this call to _bt_tuple_before_array_skeys. Either
1863 : * way, it'll be set correctly (for the whole page) after this point.
1864 : */
1865 230502 : if (!all_required_satisfied && pstate->finaltup &&
1866 165724 : _bt_tuple_before_array_skeys(scan, dir, pstate->finaltup, tupdesc,
1867 165724 : BTreeTupleGetNAtts(pstate->finaltup, rel),
1868 : false, 0, &so->scanBehind))
1869 17464 : goto new_prim_scan;
1870 :
1871 : /*
1872 : * When we encounter a truncated finaltup high key attribute, we're
1873 : * optimistic about the chances of its corresponding required scan key
1874 : * being satisfied when we go on to recheck it against tuples from this
1875 : * page's right sibling leaf page. We consider truncated attributes to be
1876 : * satisfied by required scan keys, which allows the primitive index scan
1877 : * to continue to the next leaf page. We must set so->scanBehind to true
1878 : * to remember that the last page's finaltup had "satisfied" required scan
1879 : * keys for one or more truncated attribute values (scan keys required in
1880 : * _either_ scan direction).
1881 : *
1882 : * There is a chance that _bt_readpage (which checks so->scanBehind) will
1883 : * find that even the sibling leaf page's finaltup is < the new array
1884 : * keys. When that happens, our optimistic policy will have incurred a
1885 : * single extra leaf page access that could have been avoided.
1886 : *
1887 : * A pessimistic policy would give backward scans a gratuitous advantage
1888 : * over forward scans. We'd punish forward scans for applying more
1889 : * accurate information from the high key, rather than just using the
1890 : * final non-pivot tuple as finaltup, in the style of backward scans.
1891 : * Being pessimistic would also give some scans with non-required arrays a
1892 : * perverse advantage over similar scans that use required arrays instead.
1893 : *
1894 : * This is similar to our scan-level heuristics, below. They also set
1895 : * scanBehind to speculatively continue the primscan onto the next page.
1896 : */
1897 130176 : if (so->scanBehind)
1898 : {
1899 : /* Truncated high key -- _bt_scanbehind_checkkeys recheck scheduled */
1900 : }
1901 :
1902 : /*
1903 : * Handle inequalities marked required in the opposite scan direction.
1904 : * They can also signal that we should start a new primitive index scan.
1905 : *
1906 : * It's possible that the scan is now positioned where "matching" tuples
1907 : * begin, and that caller's tuple satisfies all scan keys required in the
1908 : * current scan direction. But if caller's tuple still doesn't satisfy
1909 : * other scan keys that are required in the opposite scan direction only
1910 : * (e.g., a required >= strategy scan key when scan direction is forward),
1911 : * it's still possible that there are many leaf pages before the page that
1912 : * _bt_first could skip straight to. Groveling through all those pages
1913 : * will always give correct answers, but it can be very inefficient. We
1914 : * must avoid needlessly scanning extra pages.
1915 : *
1916 : * Separately, it's possible that _bt_check_compare set continuescan=false
1917 : * for a scan key that's required in the opposite direction only. This is
1918 : * a special case, that happens only when _bt_check_compare sees that the
1919 : * inequality encountered a NULL value. This signals the end of non-NULL
1920 : * values in the current scan direction, which is reason enough to end the
1921 : * (primitive) scan. If this happens at the start of a large group of
1922 : * NULL values, then we shouldn't expect to be called again until after
1923 : * the scan has already read indefinitely-many leaf pages full of tuples
1924 : * with NULL suffix values. (_bt_first is expected to skip over the group
1925 : * of NULLs by applying a similar "deduce NOT NULL" rule of its own, which
1926 : * involves consing up an explicit SK_SEARCHNOTNULL key.)
1927 : *
1928 : * Apply a test against finaltup to detect and recover from the problem:
1929 : * if even finaltup doesn't satisfy such an inequality, we just skip by
1930 : * starting a new primitive index scan. When we skip, we know for sure
1931 : * that all of the tuples on the current page following caller's tuple are
1932 : * also before the _bt_first-wise start of tuples for our new qual. That
1933 : * at least suggests many more skippable pages beyond the current page.
1934 : * (when so->scanBehind and so->oppositeDirCheck are set, this'll happen
1935 : * when we test the next page's finaltup/high key instead.)
1936 : */
1937 127792 : else if (has_required_opposite_direction_only && pstate->finaltup &&
1938 4326 : unlikely(!_bt_oppodir_checkkeys(scan, dir, pstate->finaltup)))
1939 0 : goto new_prim_scan;
1940 :
1941 127792 : continue_scan:
1942 :
1943 : /*
1944 : * Stick with the ongoing primitive index scan for now.
1945 : *
1946 : * It's possible that later tuples will also turn out to have values that
1947 : * are still < the now-current array keys (or > the current array keys).
1948 : * Our caller will handle this by performing what amounts to a linear
1949 : * search of the page, implemented by calling _bt_check_compare and then
1950 : * _bt_tuple_before_array_skeys for each tuple.
1951 : *
1952 : * This approach has various advantages over a binary search of the page.
1953 : * Repeated binary searches of the page (one binary search for every array
1954 : * advancement) won't outperform a continuous linear search. While there
1955 : * are workloads that a naive linear search won't handle well, our caller
1956 : * has a "look ahead" fallback mechanism to deal with that problem.
1957 : */
1958 131036 : pstate->continuescan = true; /* Override _bt_check_compare */
1959 131036 : so->needPrimScan = false; /* _bt_readpage has more tuples to check */
1960 :
1961 131036 : if (so->scanBehind)
1962 : {
1963 : /*
1964 : * Remember if recheck needs to call _bt_oppodir_checkkeys for next
1965 : * page's finaltup (see above comments about "Handle inequalities
1966 : * marked required in the opposite scan direction" for why).
1967 : */
1968 3244 : so->oppositeDirCheck = has_required_opposite_direction_only;
1969 :
1970 : /*
1971 : * skip by setting "look ahead" mechanism's offnum for forwards scans
1972 : * (backwards scans check scanBehind flag directly instead)
1973 : */
1974 3244 : if (ScanDirectionIsForward(dir))
1975 3226 : pstate->skip = pstate->maxoff + 1;
1976 : }
1977 :
1978 : /* Caller's tuple doesn't match the new qual */
1979 131036 : return false;
1980 :
1981 17980 : new_prim_scan:
1982 :
1983 : Assert(pstate->finaltup); /* not on rightmost/leftmost page */
1984 :
1985 : /*
1986 : * Looks like another primitive index scan is required. But consider
1987 : * continuing the current primscan based on scan-level heuristics.
1988 : *
1989 : * Continue the ongoing primitive scan (and schedule a recheck for when
1990 : * the scan arrives on the next sibling leaf page) when it has already
1991 : * read at least one leaf page before the one we're reading now. This
1992 : * makes primscan scheduling more efficient when scanning subsets of an
1993 : * index with many distinct attribute values matching many array elements.
1994 : * It encourages fewer, larger primitive scans where that makes sense.
1995 : * This will in turn encourage _bt_readpage to apply the pstate.startikey
1996 : * optimization more often.
1997 : *
1998 : * Also continue the ongoing primitive index scan when it is still on the
1999 : * first page if there have been more than NSKIPADVANCES_THRESHOLD calls
2000 : * here that each advanced at least one of the scan's skip arrays
2001 : * (deliberately ignore advancements that only affected SAOP arrays here).
2002 : * A page that cycles through this many skip array elements is quite
2003 : * likely to neighbor similar pages, that we'll also need to read.
2004 : *
2005 : * Note: These heuristics aren't as aggressive as you might think. We're
2006 : * conservative about allowing a primitive scan to step from the first
2007 : * leaf page it reads to the page's sibling page (we only allow it on
2008 : * first pages whose finaltup strongly suggests that it'll work out, as
2009 : * well as first pages that have a large number of skip array advances).
2010 : * Clearing this first page finaltup hurdle is a strong signal in itself.
2011 : *
2012 : * Note: The NSKIPADVANCES_THRESHOLD heuristic exists only to avoid
2013 : * pathological cases. Specifically, cases where a skip scan should just
2014 : * behave like a traditional full index scan, but ends up "skipping" again
2015 : * and again, descending to the prior leaf page's direct sibling leaf page
2016 : * each time. This misbehavior would otherwise be possible during scans
2017 : * that never quite manage to "clear the first page finaltup hurdle".
2018 : */
2019 17980 : if (!pstate->firstpage || pstate->nskipadvances > NSKIPADVANCES_THRESHOLD)
2020 : {
2021 : /* Schedule a recheck once on the next (or previous) page */
2022 860 : so->scanBehind = true;
2023 :
2024 : /* Continue the current primitive scan after all */
2025 860 : goto continue_scan;
2026 : }
2027 :
2028 : /*
2029 : * End this primitive index scan, but schedule another.
2030 : *
2031 : * Note: We make a soft assumption that the current scan direction will
2032 : * also be used within _bt_next, when it is asked to step off this page.
2033 : * It is up to _bt_next to cancel this scheduled primitive index scan
2034 : * whenever it steps to a page in the direction opposite currPos.dir.
2035 : */
2036 17120 : pstate->continuescan = false; /* Tell _bt_readpage we're done... */
2037 17120 : so->needPrimScan = true; /* ...but call _bt_first again */
2038 :
2039 17120 : if (scan->parallel_scan)
2040 36 : _bt_parallel_primscan_schedule(scan, so->currPos.currPage);
2041 :
2042 : /* Caller's tuple doesn't match the new qual */
2043 17120 : return false;
2044 :
2045 8026 : end_toplevel_scan:
2046 :
2047 : /*
2048 : * End the current primitive index scan, but don't schedule another.
2049 : *
2050 : * This ends the entire top-level scan in the current scan direction.
2051 : *
2052 : * Note: The scan's arrays (including any non-required arrays) are now in
2053 : * their final positions for the current scan direction. If the scan
2054 : * direction happens to change, then the arrays will already be in their
2055 : * first positions for what will then be the current scan direction.
2056 : */
2057 8026 : pstate->continuescan = false; /* Tell _bt_readpage we're done... */
2058 8026 : so->needPrimScan = false; /* ...and don't call _bt_first again */
2059 :
2060 : /* Caller's tuple doesn't match any qual */
2061 8026 : return false;
2062 : }
2063 :
2064 : #ifdef USE_ASSERT_CHECKING
2065 : /*
2066 : * Verify that the scan's "so->keyData[]" scan keys are in agreement with
2067 : * its array key state
2068 : */
2069 : static bool
2070 : _bt_verify_keys_with_arraykeys(IndexScanDesc scan)
2071 : {
2072 : BTScanOpaque so = (BTScanOpaque) scan->opaque;
2073 : int last_sk_attno = InvalidAttrNumber,
2074 : arrayidx = 0;
2075 : bool nonrequiredseen = false;
2076 :
2077 : if (!so->qual_ok)
2078 : return false;
2079 :
2080 : for (int ikey = 0; ikey < so->numberOfKeys; ikey++)
2081 : {
2082 : ScanKey cur = so->keyData + ikey;
2083 : BTArrayKeyInfo *array;
2084 :
2085 : if (cur->sk_strategy != BTEqualStrategyNumber ||
2086 : !(cur->sk_flags & SK_SEARCHARRAY))
2087 : continue;
2088 :
2089 : array = &so->arrayKeys[arrayidx++];
2090 : if (array->scan_key != ikey)
2091 : return false;
2092 :
2093 : if (array->num_elems == 0 || array->num_elems < -1)
2094 : return false;
2095 :
2096 : if (array->num_elems != -1 &&
2097 : cur->sk_argument != array->elem_values[array->cur_elem])
2098 : return false;
2099 : if (cur->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD))
2100 : {
2101 : if (last_sk_attno > cur->sk_attno)
2102 : return false;
2103 : if (nonrequiredseen)
2104 : return false;
2105 : }
2106 : else
2107 : nonrequiredseen = true;
2108 :
2109 : last_sk_attno = cur->sk_attno;
2110 : }
2111 :
2112 : if (arrayidx != so->numArrayKeys)
2113 : return false;
2114 :
2115 : return true;
2116 : }
2117 : #endif
2118 :
2119 : /*
2120 : * Test whether an indextuple satisfies all the scankey conditions.
2121 : *
2122 : * Return true if so, false if not. If the tuple fails to pass the qual,
2123 : * we also determine whether there's any need to continue the scan beyond
2124 : * this tuple, and set pstate.continuescan accordingly. See comments for
2125 : * _bt_preprocess_keys() about how this is done.
2126 : *
2127 : * Forward scan callers can pass a high key tuple in the hopes of having
2128 : * us set *continuescan to false, and avoiding an unnecessary visit to
2129 : * the page to the right.
2130 : *
2131 : * Advances the scan's array keys when necessary for arrayKeys=true callers.
2132 : * Scans without any array keys must always pass arrayKeys=false.
2133 : *
2134 : * Also stops and starts primitive index scans for arrayKeys=true callers.
2135 : * Scans with array keys are required to set up page state that helps us with
2136 : * this. The page's finaltup tuple (the page high key for a forward scan, or
2137 : * the page's first non-pivot tuple for a backward scan) must be set in
2138 : * pstate.finaltup ahead of the first call here for the page. Set this to
2139 : * NULL for rightmost page (or the leftmost page for backwards scans).
2140 : *
2141 : * scan: index scan descriptor (containing a search-type scankey)
2142 : * pstate: page level input and output parameters
2143 : * arrayKeys: should we advance the scan's array keys if necessary?
2144 : * tuple: index tuple to test
2145 : * tupnatts: number of attributes in tupnatts (high key may be truncated)
2146 : */
2147 : bool
2148 60882530 : _bt_checkkeys(IndexScanDesc scan, BTReadPageState *pstate, bool arrayKeys,
2149 : IndexTuple tuple, int tupnatts)
2150 : {
2151 60882530 : TupleDesc tupdesc = RelationGetDescr(scan->indexRelation);
2152 60882530 : BTScanOpaque so = (BTScanOpaque) scan->opaque;
2153 60882530 : ScanDirection dir = so->currPos.dir;
2154 60882530 : int ikey = pstate->startikey;
2155 : bool res;
2156 :
2157 : Assert(BTreeTupleGetNAtts(tuple, scan->indexRelation) == tupnatts);
2158 : Assert(!so->needPrimScan && !so->scanBehind && !so->oppositeDirCheck);
2159 : Assert(arrayKeys || so->numArrayKeys == 0);
2160 :
2161 60882530 : res = _bt_check_compare(scan, dir, tuple, tupnatts, tupdesc, arrayKeys,
2162 60882530 : pstate->forcenonrequired, &pstate->continuescan,
2163 : &ikey);
2164 :
2165 : /*
2166 : * If _bt_check_compare relied on the pstate.startikey optimization, call
2167 : * again (in assert-enabled builds) to verify it didn't affect our answer.
2168 : *
2169 : * Note: we can't do this when !pstate.forcenonrequired, since any arrays
2170 : * before pstate.startikey won't have advanced on this page at all.
2171 : */
2172 : Assert(!pstate->forcenonrequired || arrayKeys);
2173 : #ifdef USE_ASSERT_CHECKING
2174 : if (pstate->startikey > 0 && !pstate->forcenonrequired)
2175 : {
2176 : bool dres,
2177 : dcontinuescan;
2178 : int dikey = 0;
2179 :
2180 : /* Pass arrayKeys=false to avoid array side-effects */
2181 : dres = _bt_check_compare(scan, dir, tuple, tupnatts, tupdesc, false,
2182 : pstate->forcenonrequired, &dcontinuescan,
2183 : &dikey);
2184 : Assert(res == dres);
2185 : Assert(pstate->continuescan == dcontinuescan);
2186 :
2187 : /*
2188 : * Should also get the same ikey result. We need a slightly weaker
2189 : * assertion during arrayKeys calls, since they might be using an
2190 : * array that couldn't be marked required during preprocessing.
2191 : */
2192 : Assert(arrayKeys || ikey == dikey);
2193 : Assert(ikey <= dikey);
2194 : }
2195 : #endif
2196 :
2197 : /*
2198 : * Only one _bt_check_compare call is required in the common case where
2199 : * there are no equality strategy array scan keys. Otherwise we can only
2200 : * accept _bt_check_compare's answer unreservedly when it didn't set
2201 : * pstate.continuescan=false.
2202 : */
2203 60882530 : if (!arrayKeys || pstate->continuescan)
2204 60650860 : return res;
2205 :
2206 : /*
2207 : * _bt_check_compare call set continuescan=false in the presence of
2208 : * equality type array keys. This could mean that the tuple is just past
2209 : * the end of matches for the current array keys.
2210 : *
2211 : * It's also possible that the scan is still _before_ the _start_ of
2212 : * tuples matching the current set of array keys. Check for that first.
2213 : */
2214 : Assert(!pstate->forcenonrequired);
2215 231670 : if (_bt_tuple_before_array_skeys(scan, dir, tuple, tupdesc, tupnatts, true,
2216 : ikey, NULL))
2217 : {
2218 : /* Override _bt_check_compare, continue primitive scan */
2219 38708 : pstate->continuescan = true;
2220 :
2221 : /*
2222 : * We will end up here repeatedly given a group of tuples > the
2223 : * previous array keys and < the now-current keys (for a backwards
2224 : * scan it's just the same, though the operators swap positions).
2225 : *
2226 : * We must avoid allowing this linear search process to scan very many
2227 : * tuples from well before the start of tuples matching the current
2228 : * array keys (or from well before the point where we'll once again
2229 : * have to advance the scan's array keys).
2230 : *
2231 : * We keep the overhead under control by speculatively "looking ahead"
2232 : * to later still-unscanned items from this same leaf page. We'll
2233 : * only attempt this once the number of tuples that the linear search
2234 : * process has examined starts to get out of hand.
2235 : */
2236 38708 : pstate->rechecks++;
2237 38708 : if (pstate->rechecks >= LOOK_AHEAD_REQUIRED_RECHECKS)
2238 : {
2239 : /* See if we should skip ahead within the current leaf page */
2240 10596 : _bt_checkkeys_look_ahead(scan, pstate, tupnatts, tupdesc);
2241 :
2242 : /*
2243 : * Might have set pstate.skip to a later page offset. When that
2244 : * happens then _bt_readpage caller will inexpensively skip ahead
2245 : * to a later tuple from the same page (the one just after the
2246 : * tuple we successfully "looked ahead" to).
2247 : */
2248 : }
2249 :
2250 : /* This indextuple doesn't match the current qual, in any case */
2251 38708 : return false;
2252 : }
2253 :
2254 : /*
2255 : * Caller's tuple is >= the current set of array keys and other equality
2256 : * constraint scan keys (or <= if this is a backwards scan). It's now
2257 : * clear that we _must_ advance any required array keys in lockstep with
2258 : * the scan.
2259 : */
2260 192962 : return _bt_advance_array_keys(scan, pstate, tuple, tupnatts, tupdesc,
2261 : ikey, true);
2262 : }
2263 :
2264 : /*
2265 : * Test whether caller's finaltup tuple is still before the start of matches
2266 : * for the current array keys.
2267 : *
2268 : * Called at the start of reading a page during a scan with array keys, though
2269 : * only when the so->scanBehind flag was set on the scan's prior page.
2270 : *
2271 : * Returns false if the tuple is still before the start of matches. When that
2272 : * happens, caller should cut its losses and start a new primitive index scan.
2273 : * Otherwise returns true.
2274 : */
2275 : bool
2276 2728 : _bt_scanbehind_checkkeys(IndexScanDesc scan, ScanDirection dir,
2277 : IndexTuple finaltup)
2278 : {
2279 2728 : Relation rel = scan->indexRelation;
2280 2728 : TupleDesc tupdesc = RelationGetDescr(rel);
2281 2728 : BTScanOpaque so = (BTScanOpaque) scan->opaque;
2282 2728 : int nfinaltupatts = BTreeTupleGetNAtts(finaltup, rel);
2283 : bool scanBehind;
2284 :
2285 : Assert(so->numArrayKeys);
2286 :
2287 2728 : if (_bt_tuple_before_array_skeys(scan, dir, finaltup, tupdesc,
2288 : nfinaltupatts, false, 0, &scanBehind))
2289 410 : return false;
2290 :
2291 : /*
2292 : * If scanBehind was set, all of the untruncated attribute values from
2293 : * finaltup that correspond to an array match the array's current element,
2294 : * but there are other keys associated with truncated suffix attributes.
2295 : * Array advancement must have incremented the scan's arrays on the
2296 : * previous page, resulting in a set of array keys that happen to be an
2297 : * exact match for the current page high key's untruncated prefix values.
2298 : *
2299 : * This page definitely doesn't contain tuples that the scan will need to
2300 : * return. The next page may or may not contain relevant tuples. Handle
2301 : * this by cutting our losses and starting a new primscan.
2302 : */
2303 2318 : if (scanBehind)
2304 0 : return false;
2305 :
2306 2318 : if (!so->oppositeDirCheck)
2307 2124 : return true;
2308 :
2309 194 : return _bt_oppodir_checkkeys(scan, dir, finaltup);
2310 : }
2311 :
2312 : /*
2313 : * Test whether an indextuple fails to satisfy an inequality required in the
2314 : * opposite direction only.
2315 : *
2316 : * Caller's finaltup tuple is the page high key (for forwards scans), or the
2317 : * first non-pivot tuple (for backwards scans). Called during scans with
2318 : * required array keys and required opposite-direction inequalities.
2319 : *
2320 : * Returns false if an inequality scan key required in the opposite direction
2321 : * only isn't satisfied (and any earlier required scan keys are satisfied).
2322 : * Otherwise returns true.
2323 : *
2324 : * An unsatisfied inequality required in the opposite direction only might
2325 : * well enable skipping over many leaf pages, provided another _bt_first call
2326 : * takes place. This type of unsatisfied inequality won't usually cause
2327 : * _bt_checkkeys to stop the scan to consider array advancement/starting a new
2328 : * primitive index scan.
2329 : */
2330 : static bool
2331 4520 : _bt_oppodir_checkkeys(IndexScanDesc scan, ScanDirection dir,
2332 : IndexTuple finaltup)
2333 : {
2334 4520 : Relation rel = scan->indexRelation;
2335 4520 : TupleDesc tupdesc = RelationGetDescr(rel);
2336 4520 : BTScanOpaque so = (BTScanOpaque) scan->opaque;
2337 4520 : int nfinaltupatts = BTreeTupleGetNAtts(finaltup, rel);
2338 : bool continuescan;
2339 4520 : ScanDirection flipped = -dir;
2340 4520 : int ikey = 0;
2341 :
2342 : Assert(so->numArrayKeys);
2343 :
2344 4520 : _bt_check_compare(scan, flipped, finaltup, nfinaltupatts, tupdesc, false,
2345 : false, &continuescan,
2346 : &ikey);
2347 :
2348 4520 : if (!continuescan && so->keyData[ikey].sk_strategy != BTEqualStrategyNumber)
2349 0 : return false;
2350 :
2351 4520 : return true;
2352 : }
2353 :
2354 : /*
2355 : * Determines an offset to the first scan key (an so->keyData[]-wise offset)
2356 : * that is _not_ guaranteed to be satisfied by every tuple from pstate.page,
2357 : * which is set in pstate.startikey for _bt_checkkeys calls for the page.
2358 : * This allows caller to save cycles on comparisons of a prefix of keys while
2359 : * reading pstate.page.
2360 : *
2361 : * Also determines if later calls to _bt_checkkeys (for pstate.page) should be
2362 : * forced to treat all required scan keys >= pstate.startikey as nonrequired
2363 : * (that is, if they're to be treated as if any SK_BT_REQFWD/SK_BT_REQBKWD
2364 : * markings that were set by preprocessing were not set at all, for the
2365 : * duration of _bt_checkkeys calls prior to the call for pstate.finaltup).
2366 : * This is indicated to caller by setting pstate.forcenonrequired.
2367 : *
2368 : * Call here at the start of reading a leaf page beyond the first one for the
2369 : * primitive index scan. We consider all non-pivot tuples, so it doesn't make
2370 : * sense to call here when only a subset of those tuples can ever be read.
2371 : * This is also a good idea on performance grounds; not calling here when on
2372 : * the first page (first for the current primitive scan) avoids wasting cycles
2373 : * during selective point queries. They typically don't stand to gain as much
2374 : * when we can set pstate.startikey, and are likely to notice the overhead of
2375 : * calling here. (Also, allowing pstate.forcenonrequired to be set on a
2376 : * primscan's first page would mislead _bt_advance_array_keys, which expects
2377 : * pstate.nskipadvances to be representative of every first page's key space.)
2378 : *
2379 : * Caller must call _bt_start_array_keys and reset startikey/forcenonrequired
2380 : * ahead of the finaltup _bt_checkkeys call when we set forcenonrequired=true.
2381 : * This will give _bt_checkkeys the opportunity to call _bt_advance_array_keys
2382 : * with sktrig_required=true, restoring the invariant that the scan's required
2383 : * arrays always track the scan's progress through the index's key space.
2384 : * Caller won't need to do this on the rightmost/leftmost page in the index
2385 : * (where pstate.finaltup isn't ever set), since forcenonrequired will never
2386 : * be set here in the first place.
2387 : */
2388 : void
2389 35900 : _bt_set_startikey(IndexScanDesc scan, BTReadPageState *pstate)
2390 : {
2391 35900 : BTScanOpaque so = (BTScanOpaque) scan->opaque;
2392 35900 : Relation rel = scan->indexRelation;
2393 35900 : TupleDesc tupdesc = RelationGetDescr(rel);
2394 : ItemId iid;
2395 : IndexTuple firsttup,
2396 : lasttup;
2397 35900 : int startikey = 0,
2398 35900 : arrayidx = 0,
2399 : firstchangingattnum;
2400 35900 : bool start_past_saop_eq = false;
2401 :
2402 : Assert(!so->scanBehind);
2403 : Assert(pstate->minoff < pstate->maxoff);
2404 : Assert(!pstate->firstpage);
2405 : Assert(pstate->startikey == 0);
2406 : Assert(!so->numArrayKeys || pstate->finaltup ||
2407 : P_RIGHTMOST(BTPageGetOpaque(pstate->page)) ||
2408 : P_LEFTMOST(BTPageGetOpaque(pstate->page)));
2409 :
2410 35900 : if (so->numberOfKeys == 0)
2411 12720 : return;
2412 :
2413 : /* minoff is an offset to the lowest non-pivot tuple on the page */
2414 23180 : iid = PageGetItemId(pstate->page, pstate->minoff);
2415 23180 : firsttup = (IndexTuple) PageGetItem(pstate->page, iid);
2416 :
2417 : /* maxoff is an offset to the highest non-pivot tuple on the page */
2418 23180 : iid = PageGetItemId(pstate->page, pstate->maxoff);
2419 23180 : lasttup = (IndexTuple) PageGetItem(pstate->page, iid);
2420 :
2421 : /* Determine the first attribute whose values change on caller's page */
2422 23180 : firstchangingattnum = _bt_keep_natts_fast(rel, firsttup, lasttup);
2423 :
2424 32776 : for (; startikey < so->numberOfKeys; startikey++)
2425 : {
2426 26202 : ScanKey key = so->keyData + startikey;
2427 : BTArrayKeyInfo *array;
2428 : Datum firstdatum,
2429 : lastdatum;
2430 : bool firstnull,
2431 : lastnull;
2432 : int32 result;
2433 :
2434 : /*
2435 : * Determine if it's safe to set pstate.startikey to an offset to a
2436 : * key that comes after this key, by examining this key
2437 : */
2438 26202 : if (!(key->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)))
2439 : {
2440 : /* Scan key isn't marked required (corner case) */
2441 16606 : break; /* unsafe */
2442 : }
2443 26202 : if (key->sk_flags & SK_ROW_HEADER)
2444 : {
2445 : /* RowCompare inequalities currently aren't supported */
2446 0 : break; /* "unsafe" */
2447 : }
2448 26202 : if (key->sk_strategy != BTEqualStrategyNumber)
2449 : {
2450 : /*
2451 : * Scalar inequality key.
2452 : *
2453 : * It's definitely safe for _bt_checkkeys to avoid assessing this
2454 : * inequality when the page's first and last non-pivot tuples both
2455 : * satisfy the inequality (since the same must also be true of all
2456 : * the tuples in between these two).
2457 : *
2458 : * Unlike the "=" case, it doesn't matter if this attribute has
2459 : * more than one distinct value (though it _is_ necessary for any
2460 : * and all _prior_ attributes to contain no more than one distinct
2461 : * value amongst all of the tuples from pstate.page).
2462 : */
2463 4722 : if (key->sk_attno > firstchangingattnum) /* >, not >= */
2464 432 : break; /* unsafe, preceding attr has multiple
2465 : * distinct values */
2466 :
2467 4290 : firstdatum = index_getattr(firsttup, key->sk_attno, tupdesc, &firstnull);
2468 4290 : lastdatum = index_getattr(lasttup, key->sk_attno, tupdesc, &lastnull);
2469 :
2470 4290 : if (key->sk_flags & SK_ISNULL)
2471 : {
2472 : /* IS NOT NULL key */
2473 : Assert(key->sk_flags & SK_SEARCHNOTNULL);
2474 :
2475 110 : if (firstnull || lastnull)
2476 : break; /* unsafe */
2477 :
2478 : /* Safe, IS NOT NULL key satisfied by every tuple */
2479 9346 : continue;
2480 : }
2481 :
2482 : /* Test firsttup */
2483 4180 : if (firstnull ||
2484 4180 : !DatumGetBool(FunctionCall2Coll(&key->sk_func,
2485 : key->sk_collation, firstdatum,
2486 : key->sk_argument)))
2487 : break; /* unsafe */
2488 :
2489 : /* Test lasttup */
2490 4178 : if (lastnull ||
2491 4178 : !DatumGetBool(FunctionCall2Coll(&key->sk_func,
2492 : key->sk_collation, lastdatum,
2493 : key->sk_argument)))
2494 : break; /* unsafe */
2495 :
2496 : /* Safe, scalar inequality satisfied by every tuple */
2497 4072 : continue;
2498 : }
2499 :
2500 : /* Some = key (could be a scalar = key, could be an array = key) */
2501 : Assert(key->sk_strategy == BTEqualStrategyNumber);
2502 :
2503 21480 : if (!(key->sk_flags & SK_SEARCHARRAY))
2504 : {
2505 : /*
2506 : * Scalar = key (possibly an IS NULL key).
2507 : *
2508 : * It is unsafe to set pstate.startikey to an ikey beyond this
2509 : * key, unless the = key is satisfied by every possible tuple on
2510 : * the page (possible only when attribute has just one distinct
2511 : * value among all tuples on the page).
2512 : */
2513 18066 : if (key->sk_attno >= firstchangingattnum)
2514 15420 : break; /* unsafe, multiple distinct attr values */
2515 :
2516 2646 : firstdatum = index_getattr(firsttup, key->sk_attno, tupdesc,
2517 : &firstnull);
2518 2646 : if (key->sk_flags & SK_ISNULL)
2519 : {
2520 : /* IS NULL key */
2521 : Assert(key->sk_flags & SK_SEARCHNULL);
2522 :
2523 0 : if (!firstnull)
2524 0 : break; /* unsafe */
2525 :
2526 : /* Safe, IS NULL key satisfied by every tuple */
2527 0 : continue;
2528 : }
2529 2646 : if (firstnull ||
2530 2646 : !DatumGetBool(FunctionCall2Coll(&key->sk_func,
2531 : key->sk_collation, firstdatum,
2532 : key->sk_argument)))
2533 : break; /* unsafe */
2534 :
2535 : /* Safe, scalar = key satisfied by every tuple */
2536 2646 : continue;
2537 : }
2538 :
2539 : /* = array key (could be a SAOP array, could be a skip array) */
2540 3414 : array = &so->arrayKeys[arrayidx++];
2541 : Assert(array->scan_key == startikey);
2542 3414 : if (array->num_elems != -1)
2543 : {
2544 : /*
2545 : * SAOP array = key.
2546 : *
2547 : * Handle this like we handle scalar = keys (though binary search
2548 : * for a matching element, to avoid relying on key's sk_argument).
2549 : */
2550 580 : if (key->sk_attno >= firstchangingattnum)
2551 580 : break; /* unsafe, multiple distinct attr values */
2552 :
2553 0 : firstdatum = index_getattr(firsttup, key->sk_attno, tupdesc,
2554 : &firstnull);
2555 0 : _bt_binsrch_array_skey(&so->orderProcs[startikey],
2556 : false, NoMovementScanDirection,
2557 : firstdatum, firstnull, array, key,
2558 : &result);
2559 0 : if (result != 0)
2560 0 : break; /* unsafe */
2561 :
2562 : /* Safe, SAOP = key satisfied by every tuple */
2563 0 : start_past_saop_eq = true;
2564 0 : continue;
2565 : }
2566 :
2567 : /*
2568 : * Skip array = key
2569 : */
2570 : Assert(key->sk_flags & SK_BT_SKIP);
2571 2834 : if (array->null_elem)
2572 : {
2573 : /*
2574 : * Non-range skip array = key.
2575 : *
2576 : * Safe, non-range skip array "satisfied" by every tuple on page
2577 : * (safe even when "key->sk_attno > firstchangingattnum").
2578 : */
2579 2518 : continue;
2580 : }
2581 :
2582 : /*
2583 : * Range skip array = key.
2584 : *
2585 : * Handle this like we handle scalar inequality keys (but avoid using
2586 : * key's sk_argument directly, as in the SAOP array case).
2587 : */
2588 316 : if (key->sk_attno > firstchangingattnum) /* >, not >= */
2589 48 : break; /* unsafe, preceding attr has multiple
2590 : * distinct values */
2591 :
2592 268 : firstdatum = index_getattr(firsttup, key->sk_attno, tupdesc, &firstnull);
2593 268 : lastdatum = index_getattr(lasttup, key->sk_attno, tupdesc, &lastnull);
2594 :
2595 : /* Test firsttup */
2596 268 : _bt_binsrch_skiparray_skey(false, ForwardScanDirection,
2597 : firstdatum, firstnull, array, key,
2598 : &result);
2599 268 : if (result != 0)
2600 0 : break; /* unsafe */
2601 :
2602 : /* Test lasttup */
2603 268 : _bt_binsrch_skiparray_skey(false, ForwardScanDirection,
2604 : lastdatum, lastnull, array, key,
2605 : &result);
2606 268 : if (result != 0)
2607 18 : break; /* unsafe */
2608 :
2609 : /* Safe, range skip array satisfied by every tuple on page */
2610 : }
2611 :
2612 : /*
2613 : * Use of forcenonrequired is typically undesirable, since it'll force
2614 : * _bt_readpage caller to read every tuple on the page -- even though, in
2615 : * general, it might well be possible to end the scan on an earlier tuple.
2616 : * However, caller must use forcenonrequired when start_past_saop_eq=true,
2617 : * since the usual required array behavior might fail to roll over to the
2618 : * SAOP array.
2619 : *
2620 : * We always prefer forcenonrequired=true during scans with skip arrays
2621 : * (except on the first page of each primitive index scan), though -- even
2622 : * when "startikey == 0". That way, _bt_advance_array_keys's low-order
2623 : * key precheck optimization can always be used (unless on the first page
2624 : * of the scan). It seems slightly preferable to check more tuples when
2625 : * that allows us to do significantly less skip array maintenance.
2626 : */
2627 23180 : pstate->forcenonrequired = (start_past_saop_eq || so->skipScan);
2628 23180 : pstate->startikey = startikey;
2629 :
2630 : /*
2631 : * _bt_readpage caller is required to call _bt_checkkeys against page's
2632 : * finaltup with forcenonrequired=false whenever we initially set
2633 : * forcenonrequired=true. That way the scan's arrays will reliably track
2634 : * its progress through the index's key space.
2635 : *
2636 : * We don't expect this when _bt_readpage caller has no finaltup due to
2637 : * its page being the rightmost (or the leftmost, during backwards scans).
2638 : * When we see that _bt_readpage has no finaltup, back out of everything.
2639 : */
2640 : Assert(!pstate->forcenonrequired || so->numArrayKeys);
2641 23180 : if (pstate->forcenonrequired && !pstate->finaltup)
2642 : {
2643 470 : pstate->forcenonrequired = false;
2644 470 : pstate->startikey = 0;
2645 : }
2646 : }
2647 :
2648 : /*
2649 : * Test whether an indextuple satisfies current scan condition.
2650 : *
2651 : * Return true if so, false if not. If not, also sets *continuescan to false
2652 : * when it's also not possible for any later tuples to pass the current qual
2653 : * (with the scan's current set of array keys, in the current scan direction),
2654 : * in addition to setting *ikey to the so->keyData[] subscript/offset for the
2655 : * unsatisfied scan key (needed when caller must consider advancing the scan's
2656 : * array keys).
2657 : *
2658 : * This is a subroutine for _bt_checkkeys. We provisionally assume that
2659 : * reaching the end of the current set of required keys (in particular the
2660 : * current required array keys) ends the ongoing (primitive) index scan.
2661 : * Callers without array keys should just end the scan right away when they
2662 : * find that continuescan has been set to false here by us. Things are more
2663 : * complicated for callers with array keys.
2664 : *
2665 : * Callers with array keys must first consider advancing the arrays when
2666 : * continuescan has been set to false here by us. They must then consider if
2667 : * it really does make sense to end the current (primitive) index scan, in
2668 : * light of everything that is known at that point. (In general when we set
2669 : * continuescan=false for these callers it must be treated as provisional.)
2670 : *
2671 : * We deal with advancing unsatisfied non-required arrays directly, though.
2672 : * This is safe, since by definition non-required keys can't end the scan.
2673 : * This is just how we determine if non-required arrays are just unsatisfied
2674 : * by the current array key, or if they're truly unsatisfied (that is, if
2675 : * they're unsatisfied by every possible array key).
2676 : *
2677 : * Pass advancenonrequired=false to avoid all array related side effects.
2678 : * This allows _bt_advance_array_keys caller to avoid infinite recursion.
2679 : *
2680 : * Pass forcenonrequired=true to instruct us to treat all keys as nonrequired.
2681 : * This is used to make it safe to temporarily stop properly maintaining the
2682 : * scan's required arrays. _bt_checkkeys caller (_bt_readpage, actually)
2683 : * determines a prefix of keys that must satisfy every possible corresponding
2684 : * index attribute value from its page, which is passed to us via *ikey arg
2685 : * (this is the first key that might be unsatisfied by tuples on the page).
2686 : * Obviously, we won't maintain any array keys from before *ikey, so it's
2687 : * quite possible for such arrays to "fall behind" the index's keyspace.
2688 : * Caller will need to "catch up" by passing forcenonrequired=true (alongside
2689 : * an *ikey=0) once the page's finaltup is reached.
2690 : *
2691 : * Note: it's safe to pass an *ikey > 0 with forcenonrequired=false, but only
2692 : * when caller determines that it won't affect array maintenance.
2693 : */
2694 : static bool
2695 60943630 : _bt_check_compare(IndexScanDesc scan, ScanDirection dir,
2696 : IndexTuple tuple, int tupnatts, TupleDesc tupdesc,
2697 : bool advancenonrequired, bool forcenonrequired,
2698 : bool *continuescan, int *ikey)
2699 : {
2700 60943630 : BTScanOpaque so = (BTScanOpaque) scan->opaque;
2701 :
2702 60943630 : *continuescan = true; /* default assumption */
2703 :
2704 117075866 : for (; *ikey < so->numberOfKeys; (*ikey)++)
2705 : {
2706 68852970 : ScanKey key = so->keyData + *ikey;
2707 : Datum datum;
2708 : bool isNull;
2709 68852970 : bool requiredSameDir = false,
2710 68852970 : requiredOppositeDirOnly = false;
2711 :
2712 : /*
2713 : * Check if the key is required in the current scan direction, in the
2714 : * opposite scan direction _only_, or in neither direction (except
2715 : * when we're forced to treat all scan keys as nonrequired)
2716 : */
2717 68852970 : if (forcenonrequired)
2718 : {
2719 : /* treating scan's keys as non-required */
2720 : }
2721 68383796 : else if (((key->sk_flags & SK_BT_REQFWD) && ScanDirectionIsForward(dir)) ||
2722 14864944 : ((key->sk_flags & SK_BT_REQBKWD) && ScanDirectionIsBackward(dir)))
2723 53543658 : requiredSameDir = true;
2724 14840138 : else if (((key->sk_flags & SK_BT_REQFWD) && ScanDirectionIsBackward(dir)) ||
2725 5783626 : ((key->sk_flags & SK_BT_REQBKWD) && ScanDirectionIsForward(dir)))
2726 14840138 : requiredOppositeDirOnly = true;
2727 :
2728 68852970 : if (key->sk_attno > tupnatts)
2729 : {
2730 : /*
2731 : * This attribute is truncated (must be high key). The value for
2732 : * this attribute in the first non-pivot tuple on the page to the
2733 : * right could be any possible value. Assume that truncated
2734 : * attribute passes the qual.
2735 : */
2736 : Assert(BTreeTupleIsPivot(tuple));
2737 19311000 : continue;
2738 : }
2739 :
2740 : /*
2741 : * A skip array scan key uses one of several sentinel values. We just
2742 : * fall back on _bt_tuple_before_array_skeys when we see such a value.
2743 : */
2744 68850516 : if (key->sk_flags & (SK_BT_MINVAL | SK_BT_MAXVAL |
2745 : SK_BT_NEXT | SK_BT_PRIOR))
2746 : {
2747 : Assert(key->sk_flags & SK_SEARCHARRAY);
2748 : Assert(key->sk_flags & SK_BT_SKIP);
2749 : Assert(requiredSameDir || forcenonrequired);
2750 :
2751 : /*
2752 : * Cannot fall back on _bt_tuple_before_array_skeys when we're
2753 : * treating the scan's keys as nonrequired, though. Just handle
2754 : * this like any other non-required equality-type array key.
2755 : */
2756 36090 : if (forcenonrequired)
2757 12720734 : return _bt_advance_array_keys(scan, NULL, tuple, tupnatts,
2758 : tupdesc, *ikey, false);
2759 :
2760 34062 : *continuescan = false;
2761 34062 : return false;
2762 : }
2763 :
2764 : /* row-comparison keys need special processing */
2765 68814426 : if (key->sk_flags & SK_ROW_HEADER)
2766 : {
2767 2454 : if (_bt_check_rowcompare(key, tuple, tupnatts, tupdesc, dir,
2768 : forcenonrequired, continuescan))
2769 2388 : continue;
2770 66 : return false;
2771 : }
2772 :
2773 68811972 : datum = index_getattr(tuple,
2774 68811972 : key->sk_attno,
2775 : tupdesc,
2776 : &isNull);
2777 :
2778 68811972 : if (key->sk_flags & SK_ISNULL)
2779 : {
2780 : /* Handle IS NULL/NOT NULL tests */
2781 19323930 : if (key->sk_flags & SK_SEARCHNULL)
2782 : {
2783 18128 : if (isNull)
2784 428 : continue; /* tuple satisfies this qual */
2785 : }
2786 : else
2787 : {
2788 : Assert(key->sk_flags & SK_SEARCHNOTNULL);
2789 : Assert(!(key->sk_flags & SK_BT_SKIP));
2790 19305802 : if (!isNull)
2791 19305730 : continue; /* tuple satisfies this qual */
2792 : }
2793 :
2794 : /*
2795 : * Tuple fails this qual. If it's a required qual for the current
2796 : * scan direction, then we can conclude no further tuples will
2797 : * pass, either.
2798 : */
2799 17772 : if (requiredSameDir)
2800 204 : *continuescan = false;
2801 17568 : else if (unlikely(key->sk_flags & SK_BT_SKIP))
2802 : {
2803 : /*
2804 : * If we're treating scan keys as nonrequired, and encounter a
2805 : * skip array scan key whose current element is NULL, then it
2806 : * must be a non-range skip array. It must be satisfied, so
2807 : * there's no need to call _bt_advance_array_keys to check.
2808 : */
2809 : Assert(forcenonrequired && *ikey > 0);
2810 0 : continue;
2811 : }
2812 :
2813 : /*
2814 : * This indextuple doesn't match the qual.
2815 : */
2816 17772 : return false;
2817 : }
2818 :
2819 49488042 : if (isNull)
2820 : {
2821 : /*
2822 : * Scalar scan key isn't satisfied by NULL tuple value.
2823 : *
2824 : * If we're treating scan keys as nonrequired, and key is for a
2825 : * skip array, then we must attempt to advance the array to NULL
2826 : * (if we're successful then the tuple might match the qual).
2827 : */
2828 228 : if (unlikely(forcenonrequired && key->sk_flags & SK_BT_SKIP))
2829 0 : return _bt_advance_array_keys(scan, NULL, tuple, tupnatts,
2830 : tupdesc, *ikey, false);
2831 :
2832 228 : if (key->sk_flags & SK_BT_NULLS_FIRST)
2833 : {
2834 : /*
2835 : * Since NULLs are sorted before non-NULLs, we know we have
2836 : * reached the lower limit of the range of values for this
2837 : * index attr. On a backward scan, we can stop if this qual
2838 : * is one of the "must match" subset. We can stop regardless
2839 : * of whether the qual is > or <, so long as it's required,
2840 : * because it's not possible for any future tuples to pass. On
2841 : * a forward scan, however, we must keep going, because we may
2842 : * have initially positioned to the start of the index.
2843 : * (_bt_advance_array_keys also relies on this behavior during
2844 : * forward scans.)
2845 : */
2846 0 : if ((requiredSameDir || requiredOppositeDirOnly) &&
2847 : ScanDirectionIsBackward(dir))
2848 0 : *continuescan = false;
2849 : }
2850 : else
2851 : {
2852 : /*
2853 : * Since NULLs are sorted after non-NULLs, we know we have
2854 : * reached the upper limit of the range of values for this
2855 : * index attr. On a forward scan, we can stop if this qual is
2856 : * one of the "must match" subset. We can stop regardless of
2857 : * whether the qual is > or <, so long as it's required,
2858 : * because it's not possible for any future tuples to pass. On
2859 : * a backward scan, however, we must keep going, because we
2860 : * may have initially positioned to the end of the index.
2861 : * (_bt_advance_array_keys also relies on this behavior during
2862 : * backward scans.)
2863 : */
2864 228 : if ((requiredSameDir || requiredOppositeDirOnly) &&
2865 : ScanDirectionIsForward(dir))
2866 222 : *continuescan = false;
2867 : }
2868 :
2869 : /*
2870 : * This indextuple doesn't match the qual.
2871 : */
2872 228 : return false;
2873 : }
2874 :
2875 49487814 : if (!DatumGetBool(FunctionCall2Coll(&key->sk_func, key->sk_collation,
2876 : datum, key->sk_argument)))
2877 : {
2878 : /*
2879 : * Tuple fails this qual. If it's a required qual for the current
2880 : * scan direction, then we can conclude no further tuples will
2881 : * pass, either.
2882 : *
2883 : * Note: because we stop the scan as soon as any required equality
2884 : * qual fails, it is critical that equality quals be used for the
2885 : * initial positioning in _bt_first() when they are available. See
2886 : * comments in _bt_first().
2887 : */
2888 12666578 : if (requiredSameDir)
2889 12259118 : *continuescan = false;
2890 :
2891 : /*
2892 : * If this is a non-required equality-type array key, the tuple
2893 : * needs to be checked against every possible array key. Handle
2894 : * this by "advancing" the scan key's array to a matching value
2895 : * (if we're successful then the tuple might match the qual).
2896 : */
2897 407460 : else if (advancenonrequired &&
2898 399918 : key->sk_strategy == BTEqualStrategyNumber &&
2899 310536 : (key->sk_flags & SK_SEARCHARRAY))
2900 7114 : return _bt_advance_array_keys(scan, NULL, tuple, tupnatts,
2901 : tupdesc, *ikey, false);
2902 :
2903 : /*
2904 : * This indextuple doesn't match the qual.
2905 : */
2906 12659464 : return false;
2907 : }
2908 : }
2909 :
2910 : /* If we get here, the tuple passes all index quals. */
2911 48222896 : return true;
2912 : }
2913 :
2914 : /*
2915 : * Test whether an indextuple satisfies a row-comparison scan condition.
2916 : *
2917 : * Return true if so, false if not. If not, also clear *continuescan if
2918 : * it's not possible for any future tuples in the current scan direction
2919 : * to pass the qual.
2920 : *
2921 : * This is a subroutine for _bt_checkkeys/_bt_check_compare.
2922 : */
2923 : static bool
2924 2454 : _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, int tupnatts,
2925 : TupleDesc tupdesc, ScanDirection dir,
2926 : bool forcenonrequired, bool *continuescan)
2927 : {
2928 2454 : ScanKey subkey = (ScanKey) DatumGetPointer(skey->sk_argument);
2929 2454 : int32 cmpresult = 0;
2930 : bool result;
2931 :
2932 : /* First subkey should be same as the header says */
2933 : Assert(subkey->sk_attno == skey->sk_attno);
2934 :
2935 : /* Loop over columns of the row condition */
2936 : for (;;)
2937 240 : {
2938 : Datum datum;
2939 : bool isNull;
2940 :
2941 : Assert(subkey->sk_flags & SK_ROW_MEMBER);
2942 :
2943 : /* When a NULL row member is compared, the row never matches */
2944 2694 : if (subkey->sk_flags & SK_ISNULL)
2945 : {
2946 : /*
2947 : * Unlike the simple-scankey case, this isn't a disallowed case
2948 : * (except when it's the first row element that has the NULL arg).
2949 : * But it can never match. If all the earlier row comparison
2950 : * columns are required for the scan direction, we can stop the
2951 : * scan, because there can't be another tuple that will succeed.
2952 : */
2953 : Assert(subkey != (ScanKey) DatumGetPointer(skey->sk_argument));
2954 12 : subkey--;
2955 12 : if (forcenonrequired)
2956 : {
2957 : /* treating scan's keys as non-required */
2958 : }
2959 12 : else if ((subkey->sk_flags & SK_BT_REQFWD) &&
2960 : ScanDirectionIsForward(dir))
2961 6 : *continuescan = false;
2962 6 : else if ((subkey->sk_flags & SK_BT_REQBKWD) &&
2963 : ScanDirectionIsBackward(dir))
2964 6 : *continuescan = false;
2965 66 : return false;
2966 : }
2967 :
2968 2682 : if (subkey->sk_attno > tupnatts)
2969 : {
2970 : /*
2971 : * This attribute is truncated (must be high key). The value for
2972 : * this attribute in the first non-pivot tuple on the page to the
2973 : * right could be any possible value. Assume that truncated
2974 : * attribute passes the qual.
2975 : */
2976 : Assert(BTreeTupleIsPivot(tuple));
2977 6 : return true;
2978 : }
2979 :
2980 2676 : datum = index_getattr(tuple,
2981 2676 : subkey->sk_attno,
2982 : tupdesc,
2983 : &isNull);
2984 :
2985 2676 : if (isNull)
2986 : {
2987 : int reqflags;
2988 :
2989 48 : if (forcenonrequired)
2990 : {
2991 : /* treating scan's keys as non-required */
2992 : }
2993 48 : else if (subkey->sk_flags & SK_BT_NULLS_FIRST)
2994 : {
2995 : /*
2996 : * Since NULLs are sorted before non-NULLs, we know we have
2997 : * reached the lower limit of the range of values for this
2998 : * index attr. On a backward scan, we can stop if this qual
2999 : * is one of the "must match" subset. However, on a forwards
3000 : * scan, we must keep going, because we may have initially
3001 : * positioned to the start of the index.
3002 : *
3003 : * All required NULLS FIRST > row members can use NULL tuple
3004 : * values to end backwards scans, just like with other values.
3005 : * A qual "WHERE (a, b, c) > (9, 42, 'foo')" can terminate a
3006 : * backwards scan upon reaching the index's rightmost "a = 9"
3007 : * tuple whose "b" column contains a NULL (if not sooner).
3008 : * Since "b" is NULLS FIRST, we can treat its NULLs as "<" 42.
3009 : */
3010 0 : reqflags = SK_BT_REQBKWD;
3011 :
3012 : /*
3013 : * When a most significant required NULLS FIRST < row compare
3014 : * member sees NULL tuple values during a backwards scan, it
3015 : * signals the end of matches for the whole row compare/scan.
3016 : * A qual "WHERE (a, b, c) < (9, 42, 'foo')" will terminate a
3017 : * backwards scan upon reaching the rightmost tuple whose "a"
3018 : * column has a NULL. The "a" NULL value is "<" 9, and yet
3019 : * our < row compare will still end the scan. (This isn't
3020 : * safe with later/lower-order row members. Notice that it
3021 : * can only happen with an "a" NULL some time after the scan
3022 : * completely stops needing to use its "b" and "c" members.)
3023 : */
3024 0 : if (subkey == (ScanKey) DatumGetPointer(skey->sk_argument))
3025 0 : reqflags |= SK_BT_REQFWD; /* safe, first row member */
3026 :
3027 0 : if ((subkey->sk_flags & reqflags) &&
3028 : ScanDirectionIsBackward(dir))
3029 0 : *continuescan = false;
3030 : }
3031 : else
3032 : {
3033 : /*
3034 : * Since NULLs are sorted after non-NULLs, we know we have
3035 : * reached the upper limit of the range of values for this
3036 : * index attr. On a forward scan, we can stop if this qual is
3037 : * one of the "must match" subset. However, on a backward
3038 : * scan, we must keep going, because we may have initially
3039 : * positioned to the end of the index.
3040 : *
3041 : * All required NULLS LAST < row members can use NULL tuple
3042 : * values to end forwards scans, just like with other values.
3043 : * A qual "WHERE (a, b, c) < (9, 42, 'foo')" can terminate a
3044 : * forwards scan upon reaching the index's leftmost "a = 9"
3045 : * tuple whose "b" column contains a NULL (if not sooner).
3046 : * Since "b" is NULLS LAST, we can treat its NULLs as ">" 42.
3047 : */
3048 48 : reqflags = SK_BT_REQFWD;
3049 :
3050 : /*
3051 : * When a most significant required NULLS LAST > row compare
3052 : * member sees NULL tuple values during a forwards scan, it
3053 : * signals the end of matches for the whole row compare/scan.
3054 : * A qual "WHERE (a, b, c) > (9, 42, 'foo')" will terminate a
3055 : * forwards scan upon reaching the leftmost tuple whose "a"
3056 : * column has a NULL. The "a" NULL value is ">" 9, and yet
3057 : * our > row compare will end the scan. (This isn't safe with
3058 : * later/lower-order row members. Notice that it can only
3059 : * happen with an "a" NULL some time after the scan completely
3060 : * stops needing to use its "b" and "c" members.)
3061 : */
3062 48 : if (subkey == (ScanKey) DatumGetPointer(skey->sk_argument))
3063 0 : reqflags |= SK_BT_REQBKWD; /* safe, first row member */
3064 :
3065 48 : if ((subkey->sk_flags & reqflags) &&
3066 : ScanDirectionIsForward(dir))
3067 0 : *continuescan = false;
3068 : }
3069 :
3070 : /*
3071 : * In any case, this indextuple doesn't match the qual.
3072 : */
3073 48 : return false;
3074 : }
3075 :
3076 : /* Perform the test --- three-way comparison not bool operator */
3077 2628 : cmpresult = DatumGetInt32(FunctionCall2Coll(&subkey->sk_func,
3078 : subkey->sk_collation,
3079 : datum,
3080 : subkey->sk_argument));
3081 :
3082 2628 : if (subkey->sk_flags & SK_BT_DESC)
3083 0 : INVERT_COMPARE_RESULT(cmpresult);
3084 :
3085 : /* Done comparing if unequal, else advance to next column */
3086 2628 : if (cmpresult != 0)
3087 2388 : break;
3088 :
3089 240 : if (subkey->sk_flags & SK_ROW_END)
3090 0 : break;
3091 240 : subkey++;
3092 : }
3093 :
3094 : /*
3095 : * At this point cmpresult indicates the overall result of the row
3096 : * comparison, and subkey points to the deciding column (or the last
3097 : * column if the result is "=").
3098 : */
3099 2388 : switch (subkey->sk_strategy)
3100 : {
3101 : /* EQ and NE cases aren't allowed here */
3102 186 : case BTLessStrategyNumber:
3103 186 : result = (cmpresult < 0);
3104 186 : break;
3105 1584 : case BTLessEqualStrategyNumber:
3106 1584 : result = (cmpresult <= 0);
3107 1584 : break;
3108 246 : case BTGreaterEqualStrategyNumber:
3109 246 : result = (cmpresult >= 0);
3110 246 : break;
3111 372 : case BTGreaterStrategyNumber:
3112 372 : result = (cmpresult > 0);
3113 372 : break;
3114 0 : default:
3115 0 : elog(ERROR, "unexpected strategy number %d", subkey->sk_strategy);
3116 : result = 0; /* keep compiler quiet */
3117 : break;
3118 : }
3119 :
3120 2388 : if (!result && !forcenonrequired)
3121 : {
3122 : /*
3123 : * Tuple fails this qual. If it's a required qual for the current
3124 : * scan direction, then we can conclude no further tuples will pass,
3125 : * either. Note we have to look at the deciding column, not
3126 : * necessarily the first or last column of the row condition.
3127 : */
3128 6 : if ((subkey->sk_flags & SK_BT_REQFWD) &&
3129 : ScanDirectionIsForward(dir))
3130 6 : *continuescan = false;
3131 0 : else if ((subkey->sk_flags & SK_BT_REQBKWD) &&
3132 : ScanDirectionIsBackward(dir))
3133 0 : *continuescan = false;
3134 : }
3135 :
3136 2388 : return result;
3137 : }
3138 :
3139 : /*
3140 : * Determine if a scan with array keys should skip over uninteresting tuples.
3141 : *
3142 : * This is a subroutine for _bt_checkkeys. Called when _bt_readpage's linear
3143 : * search process (started after it finishes reading an initial group of
3144 : * matching tuples, used to locate the start of the next group of tuples
3145 : * matching the next set of required array keys) has already scanned an
3146 : * excessive number of tuples whose key space is "between arrays".
3147 : *
3148 : * When we perform look ahead successfully, we'll sets pstate.skip, which
3149 : * instructs _bt_readpage to skip ahead to that tuple next (could be past the
3150 : * end of the scan's leaf page). Pages where the optimization is effective
3151 : * will generally still need to skip several times. Each call here performs
3152 : * only a single "look ahead" comparison of a later tuple, whose distance from
3153 : * the current tuple's offset number is determined by applying heuristics.
3154 : */
3155 : static void
3156 10596 : _bt_checkkeys_look_ahead(IndexScanDesc scan, BTReadPageState *pstate,
3157 : int tupnatts, TupleDesc tupdesc)
3158 : {
3159 10596 : BTScanOpaque so = (BTScanOpaque) scan->opaque;
3160 10596 : ScanDirection dir = so->currPos.dir;
3161 : OffsetNumber aheadoffnum;
3162 : IndexTuple ahead;
3163 :
3164 : Assert(!pstate->forcenonrequired);
3165 :
3166 : /* Avoid looking ahead when comparing the page high key */
3167 10596 : if (pstate->offnum < pstate->minoff)
3168 0 : return;
3169 :
3170 : /*
3171 : * Don't look ahead when there aren't enough tuples remaining on the page
3172 : * (in the current scan direction) for it to be worth our while
3173 : */
3174 10596 : if (ScanDirectionIsForward(dir) &&
3175 10518 : pstate->offnum >= pstate->maxoff - LOOK_AHEAD_DEFAULT_DISTANCE)
3176 354 : return;
3177 10242 : else if (ScanDirectionIsBackward(dir) &&
3178 78 : pstate->offnum <= pstate->minoff + LOOK_AHEAD_DEFAULT_DISTANCE)
3179 24 : return;
3180 :
3181 : /*
3182 : * The look ahead distance starts small, and ramps up as each call here
3183 : * allows _bt_readpage to skip over more tuples
3184 : */
3185 10218 : if (!pstate->targetdistance)
3186 6266 : pstate->targetdistance = LOOK_AHEAD_DEFAULT_DISTANCE;
3187 3952 : else if (pstate->targetdistance < MaxIndexTuplesPerPage / 2)
3188 3952 : pstate->targetdistance *= 2;
3189 :
3190 : /* Don't read past the end (or before the start) of the page, though */
3191 10218 : if (ScanDirectionIsForward(dir))
3192 10164 : aheadoffnum = Min((int) pstate->maxoff,
3193 : (int) pstate->offnum + pstate->targetdistance);
3194 : else
3195 54 : aheadoffnum = Max((int) pstate->minoff,
3196 : (int) pstate->offnum - pstate->targetdistance);
3197 :
3198 10218 : ahead = (IndexTuple) PageGetItem(pstate->page,
3199 10218 : PageGetItemId(pstate->page, aheadoffnum));
3200 10218 : if (_bt_tuple_before_array_skeys(scan, dir, ahead, tupdesc, tupnatts,
3201 : false, 0, NULL))
3202 : {
3203 : /*
3204 : * Success -- instruct _bt_readpage to skip ahead to very next tuple
3205 : * after the one we determined was still before the current array keys
3206 : */
3207 3290 : if (ScanDirectionIsForward(dir))
3208 3254 : pstate->skip = aheadoffnum + 1;
3209 : else
3210 36 : pstate->skip = aheadoffnum - 1;
3211 : }
3212 : else
3213 : {
3214 : /*
3215 : * Failure -- "ahead" tuple is too far ahead (we were too aggressive).
3216 : *
3217 : * Reset the number of rechecks, and aggressively reduce the target
3218 : * distance (we're much more aggressive here than we were when the
3219 : * distance was initially ramped up).
3220 : */
3221 6928 : pstate->rechecks = 0;
3222 6928 : pstate->targetdistance = Max(pstate->targetdistance / 8, 1);
3223 : }
3224 : }
3225 :
3226 : /*
3227 : * _bt_killitems - set LP_DEAD state for items an indexscan caller has
3228 : * told us were killed
3229 : *
3230 : * scan->opaque, referenced locally through so, contains information about the
3231 : * current page and killed tuples thereon (generally, this should only be
3232 : * called if so->numKilled > 0).
3233 : *
3234 : * Caller should not have a lock on the so->currPos page, but must hold a
3235 : * buffer pin when !so->dropPin. When we return, it still won't be locked.
3236 : * It'll continue to hold whatever pins were held before calling here.
3237 : *
3238 : * We match items by heap TID before assuming they are the right ones to set
3239 : * LP_DEAD. If the scan is one that holds a buffer pin on the target page
3240 : * continuously from initially reading the items until applying this function
3241 : * (if it is a !so->dropPin scan), VACUUM cannot have deleted any items on the
3242 : * page, so the page's TIDs can't have been recycled by now. There's no risk
3243 : * that we'll confuse a new index tuple that happens to use a recycled TID
3244 : * with a now-removed tuple with the same TID (that used to be on this same
3245 : * page). We can't rely on that during scans that drop buffer pins eagerly
3246 : * (so->dropPin scans), though, so we must condition setting LP_DEAD bits on
3247 : * the page LSN having not changed since back when _bt_readpage saw the page.
3248 : * We totally give up on setting LP_DEAD bits when the page LSN changed.
3249 : *
3250 : * We give up much less often during !so->dropPin scans, but it still happens.
3251 : * We cope with cases where items have moved right due to insertions. If an
3252 : * item has moved off the current page due to a split, we'll fail to find it
3253 : * and just give up on it.
3254 : */
3255 : void
3256 172654 : _bt_killitems(IndexScanDesc scan)
3257 : {
3258 172654 : Relation rel = scan->indexRelation;
3259 172654 : BTScanOpaque so = (BTScanOpaque) scan->opaque;
3260 : Page page;
3261 : BTPageOpaque opaque;
3262 : OffsetNumber minoff;
3263 : OffsetNumber maxoff;
3264 172654 : int numKilled = so->numKilled;
3265 172654 : bool killedsomething = false;
3266 : Buffer buf;
3267 :
3268 : Assert(numKilled > 0);
3269 : Assert(BTScanPosIsValid(so->currPos));
3270 : Assert(scan->heapRelation != NULL); /* can't be a bitmap index scan */
3271 :
3272 : /* Always invalidate so->killedItems[] before leaving so->currPos */
3273 172654 : so->numKilled = 0;
3274 :
3275 172654 : if (!so->dropPin)
3276 : {
3277 : /*
3278 : * We have held the pin on this page since we read the index tuples,
3279 : * so all we need to do is lock it. The pin will have prevented
3280 : * concurrent VACUUMs from recycling any of the TIDs on the page.
3281 : */
3282 : Assert(BTScanPosIsPinned(so->currPos));
3283 38678 : buf = so->currPos.buf;
3284 38678 : _bt_lockbuf(rel, buf, BT_READ);
3285 : }
3286 : else
3287 : {
3288 : XLogRecPtr latestlsn;
3289 :
3290 : Assert(!BTScanPosIsPinned(so->currPos));
3291 : Assert(RelationNeedsWAL(rel));
3292 133976 : buf = _bt_getbuf(rel, so->currPos.currPage, BT_READ);
3293 :
3294 133976 : latestlsn = BufferGetLSNAtomic(buf);
3295 : Assert(!XLogRecPtrIsInvalid(so->currPos.lsn));
3296 : Assert(so->currPos.lsn <= latestlsn);
3297 133976 : if (so->currPos.lsn != latestlsn)
3298 : {
3299 : /* Modified, give up on hinting */
3300 122 : _bt_relbuf(rel, buf);
3301 122 : return;
3302 : }
3303 :
3304 : /* Unmodified, hinting is safe */
3305 : }
3306 :
3307 172532 : page = BufferGetPage(buf);
3308 172532 : opaque = BTPageGetOpaque(page);
3309 172532 : minoff = P_FIRSTDATAKEY(opaque);
3310 172532 : maxoff = PageGetMaxOffsetNumber(page);
3311 :
3312 526308 : for (int i = 0; i < numKilled; i++)
3313 : {
3314 353776 : int itemIndex = so->killedItems[i];
3315 353776 : BTScanPosItem *kitem = &so->currPos.items[itemIndex];
3316 353776 : OffsetNumber offnum = kitem->indexOffset;
3317 :
3318 : Assert(itemIndex >= so->currPos.firstItem &&
3319 : itemIndex <= so->currPos.lastItem);
3320 353776 : if (offnum < minoff)
3321 0 : continue; /* pure paranoia */
3322 9548174 : while (offnum <= maxoff)
3323 : {
3324 9478122 : ItemId iid = PageGetItemId(page, offnum);
3325 9478122 : IndexTuple ituple = (IndexTuple) PageGetItem(page, iid);
3326 9478122 : bool killtuple = false;
3327 :
3328 9478122 : if (BTreeTupleIsPosting(ituple))
3329 : {
3330 2954872 : int pi = i + 1;
3331 2954872 : int nposting = BTreeTupleGetNPosting(ituple);
3332 : int j;
3333 :
3334 : /*
3335 : * We rely on the convention that heap TIDs in the scanpos
3336 : * items array are stored in ascending heap TID order for a
3337 : * group of TIDs that originally came from a posting list
3338 : * tuple. This convention even applies during backwards
3339 : * scans, where returning the TIDs in descending order might
3340 : * seem more natural. This is about effectiveness, not
3341 : * correctness.
3342 : *
3343 : * Note that the page may have been modified in almost any way
3344 : * since we first read it (in the !so->dropPin case), so it's
3345 : * possible that this posting list tuple wasn't a posting list
3346 : * tuple when we first encountered its heap TIDs.
3347 : */
3348 3026192 : for (j = 0; j < nposting; j++)
3349 : {
3350 3023928 : ItemPointer item = BTreeTupleGetPostingN(ituple, j);
3351 :
3352 3023928 : if (!ItemPointerEquals(item, &kitem->heapTid))
3353 2952608 : break; /* out of posting list loop */
3354 :
3355 : /*
3356 : * kitem must have matching offnum when heap TIDs match,
3357 : * though only in the common case where the page can't
3358 : * have been concurrently modified
3359 : */
3360 : Assert(kitem->indexOffset == offnum || !so->dropPin);
3361 :
3362 : /*
3363 : * Read-ahead to later kitems here.
3364 : *
3365 : * We rely on the assumption that not advancing kitem here
3366 : * will prevent us from considering the posting list tuple
3367 : * fully dead by not matching its next heap TID in next
3368 : * loop iteration.
3369 : *
3370 : * If, on the other hand, this is the final heap TID in
3371 : * the posting list tuple, then tuple gets killed
3372 : * regardless (i.e. we handle the case where the last
3373 : * kitem is also the last heap TID in the last index tuple
3374 : * correctly -- posting tuple still gets killed).
3375 : */
3376 71320 : if (pi < numKilled)
3377 34694 : kitem = &so->currPos.items[so->killedItems[pi++]];
3378 : }
3379 :
3380 : /*
3381 : * Don't bother advancing the outermost loop's int iterator to
3382 : * avoid processing killed items that relate to the same
3383 : * offnum/posting list tuple. This micro-optimization hardly
3384 : * seems worth it. (Further iterations of the outermost loop
3385 : * will fail to match on this same posting list's first heap
3386 : * TID instead, so we'll advance to the next offnum/index
3387 : * tuple pretty quickly.)
3388 : */
3389 2954872 : if (j == nposting)
3390 2264 : killtuple = true;
3391 : }
3392 6523250 : else if (ItemPointerEquals(&ituple->t_tid, &kitem->heapTid))
3393 282540 : killtuple = true;
3394 :
3395 : /*
3396 : * Mark index item as dead, if it isn't already. Since this
3397 : * happens while holding a buffer lock possibly in shared mode,
3398 : * it's possible that multiple processes attempt to do this
3399 : * simultaneously, leading to multiple full-page images being sent
3400 : * to WAL (if wal_log_hints or data checksums are enabled), which
3401 : * is undesirable.
3402 : */
3403 9478122 : if (killtuple && !ItemIdIsDead(iid))
3404 : {
3405 : /* found the item/all posting list items */
3406 283724 : ItemIdMarkDead(iid);
3407 283724 : killedsomething = true;
3408 283724 : break; /* out of inner search loop */
3409 : }
3410 9194398 : offnum = OffsetNumberNext(offnum);
3411 : }
3412 : }
3413 :
3414 : /*
3415 : * Since this can be redone later if needed, mark as dirty hint.
3416 : *
3417 : * Whenever we mark anything LP_DEAD, we also set the page's
3418 : * BTP_HAS_GARBAGE flag, which is likewise just a hint. (Note that we
3419 : * only rely on the page-level flag in !heapkeyspace indexes.)
3420 : */
3421 172532 : if (killedsomething)
3422 : {
3423 134468 : opaque->btpo_flags |= BTP_HAS_GARBAGE;
3424 134468 : MarkBufferDirtyHint(buf, true);
3425 : }
3426 :
3427 172532 : if (!so->dropPin)
3428 38678 : _bt_unlockbuf(rel, buf);
3429 : else
3430 133854 : _bt_relbuf(rel, buf);
3431 : }
3432 :
3433 :
3434 : /*
3435 : * The following routines manage a shared-memory area in which we track
3436 : * assignment of "vacuum cycle IDs" to currently-active btree vacuuming
3437 : * operations. There is a single counter which increments each time we
3438 : * start a vacuum to assign it a cycle ID. Since multiple vacuums could
3439 : * be active concurrently, we have to track the cycle ID for each active
3440 : * vacuum; this requires at most MaxBackends entries (usually far fewer).
3441 : * We assume at most one vacuum can be active for a given index.
3442 : *
3443 : * Access to the shared memory area is controlled by BtreeVacuumLock.
3444 : * In principle we could use a separate lmgr locktag for each index,
3445 : * but a single LWLock is much cheaper, and given the short time that
3446 : * the lock is ever held, the concurrency hit should be minimal.
3447 : */
3448 :
3449 : typedef struct BTOneVacInfo
3450 : {
3451 : LockRelId relid; /* global identifier of an index */
3452 : BTCycleId cycleid; /* cycle ID for its active VACUUM */
3453 : } BTOneVacInfo;
3454 :
3455 : typedef struct BTVacInfo
3456 : {
3457 : BTCycleId cycle_ctr; /* cycle ID most recently assigned */
3458 : int num_vacuums; /* number of currently active VACUUMs */
3459 : int max_vacuums; /* allocated length of vacuums[] array */
3460 : BTOneVacInfo vacuums[FLEXIBLE_ARRAY_MEMBER];
3461 : } BTVacInfo;
3462 :
3463 : static BTVacInfo *btvacinfo;
3464 :
3465 :
3466 : /*
3467 : * _bt_vacuum_cycleid --- get the active vacuum cycle ID for an index,
3468 : * or zero if there is no active VACUUM
3469 : *
3470 : * Note: for correct interlocking, the caller must already hold pin and
3471 : * exclusive lock on each buffer it will store the cycle ID into. This
3472 : * ensures that even if a VACUUM starts immediately afterwards, it cannot
3473 : * process those pages until the page split is complete.
3474 : */
3475 : BTCycleId
3476 22604 : _bt_vacuum_cycleid(Relation rel)
3477 : {
3478 22604 : BTCycleId result = 0;
3479 : int i;
3480 :
3481 : /* Share lock is enough since this is a read-only operation */
3482 22604 : LWLockAcquire(BtreeVacuumLock, LW_SHARED);
3483 :
3484 22638 : for (i = 0; i < btvacinfo->num_vacuums; i++)
3485 : {
3486 38 : BTOneVacInfo *vac = &btvacinfo->vacuums[i];
3487 :
3488 38 : if (vac->relid.relId == rel->rd_lockInfo.lockRelId.relId &&
3489 4 : vac->relid.dbId == rel->rd_lockInfo.lockRelId.dbId)
3490 : {
3491 4 : result = vac->cycleid;
3492 4 : break;
3493 : }
3494 : }
3495 :
3496 22604 : LWLockRelease(BtreeVacuumLock);
3497 22604 : return result;
3498 : }
3499 :
3500 : /*
3501 : * _bt_start_vacuum --- assign a cycle ID to a just-starting VACUUM operation
3502 : *
3503 : * Note: the caller must guarantee that it will eventually call
3504 : * _bt_end_vacuum, else we'll permanently leak an array slot. To ensure
3505 : * that this happens even in elog(FATAL) scenarios, the appropriate coding
3506 : * is not just a PG_TRY, but
3507 : * PG_ENSURE_ERROR_CLEANUP(_bt_end_vacuum_callback, PointerGetDatum(rel))
3508 : */
3509 : BTCycleId
3510 2914 : _bt_start_vacuum(Relation rel)
3511 : {
3512 : BTCycleId result;
3513 : int i;
3514 : BTOneVacInfo *vac;
3515 :
3516 2914 : LWLockAcquire(BtreeVacuumLock, LW_EXCLUSIVE);
3517 :
3518 : /*
3519 : * Assign the next cycle ID, being careful to avoid zero as well as the
3520 : * reserved high values.
3521 : */
3522 2914 : result = ++(btvacinfo->cycle_ctr);
3523 2914 : if (result == 0 || result > MAX_BT_CYCLE_ID)
3524 0 : result = btvacinfo->cycle_ctr = 1;
3525 :
3526 : /* Let's just make sure there's no entry already for this index */
3527 2920 : for (i = 0; i < btvacinfo->num_vacuums; i++)
3528 : {
3529 6 : vac = &btvacinfo->vacuums[i];
3530 6 : if (vac->relid.relId == rel->rd_lockInfo.lockRelId.relId &&
3531 0 : vac->relid.dbId == rel->rd_lockInfo.lockRelId.dbId)
3532 : {
3533 : /*
3534 : * Unlike most places in the backend, we have to explicitly
3535 : * release our LWLock before throwing an error. This is because
3536 : * we expect _bt_end_vacuum() to be called before transaction
3537 : * abort cleanup can run to release LWLocks.
3538 : */
3539 0 : LWLockRelease(BtreeVacuumLock);
3540 0 : elog(ERROR, "multiple active vacuums for index \"%s\"",
3541 : RelationGetRelationName(rel));
3542 : }
3543 : }
3544 :
3545 : /* OK, add an entry */
3546 2914 : if (btvacinfo->num_vacuums >= btvacinfo->max_vacuums)
3547 : {
3548 0 : LWLockRelease(BtreeVacuumLock);
3549 0 : elog(ERROR, "out of btvacinfo slots");
3550 : }
3551 2914 : vac = &btvacinfo->vacuums[btvacinfo->num_vacuums];
3552 2914 : vac->relid = rel->rd_lockInfo.lockRelId;
3553 2914 : vac->cycleid = result;
3554 2914 : btvacinfo->num_vacuums++;
3555 :
3556 2914 : LWLockRelease(BtreeVacuumLock);
3557 2914 : return result;
3558 : }
3559 :
3560 : /*
3561 : * _bt_end_vacuum --- mark a btree VACUUM operation as done
3562 : *
3563 : * Note: this is deliberately coded not to complain if no entry is found;
3564 : * this allows the caller to put PG_TRY around the start_vacuum operation.
3565 : */
3566 : void
3567 2914 : _bt_end_vacuum(Relation rel)
3568 : {
3569 : int i;
3570 :
3571 2914 : LWLockAcquire(BtreeVacuumLock, LW_EXCLUSIVE);
3572 :
3573 : /* Find the array entry */
3574 2920 : for (i = 0; i < btvacinfo->num_vacuums; i++)
3575 : {
3576 2920 : BTOneVacInfo *vac = &btvacinfo->vacuums[i];
3577 :
3578 2920 : if (vac->relid.relId == rel->rd_lockInfo.lockRelId.relId &&
3579 2914 : vac->relid.dbId == rel->rd_lockInfo.lockRelId.dbId)
3580 : {
3581 : /* Remove it by shifting down the last entry */
3582 2914 : *vac = btvacinfo->vacuums[btvacinfo->num_vacuums - 1];
3583 2914 : btvacinfo->num_vacuums--;
3584 2914 : break;
3585 : }
3586 : }
3587 :
3588 2914 : LWLockRelease(BtreeVacuumLock);
3589 2914 : }
3590 :
3591 : /*
3592 : * _bt_end_vacuum wrapped as an on_shmem_exit callback function
3593 : */
3594 : void
3595 0 : _bt_end_vacuum_callback(int code, Datum arg)
3596 : {
3597 0 : _bt_end_vacuum((Relation) DatumGetPointer(arg));
3598 0 : }
3599 :
3600 : /*
3601 : * BTreeShmemSize --- report amount of shared memory space needed
3602 : */
3603 : Size
3604 6078 : BTreeShmemSize(void)
3605 : {
3606 : Size size;
3607 :
3608 6078 : size = offsetof(BTVacInfo, vacuums);
3609 6078 : size = add_size(size, mul_size(MaxBackends, sizeof(BTOneVacInfo)));
3610 6078 : return size;
3611 : }
3612 :
3613 : /*
3614 : * BTreeShmemInit --- initialize this module's shared memory
3615 : */
3616 : void
3617 2126 : BTreeShmemInit(void)
3618 : {
3619 : bool found;
3620 :
3621 2126 : btvacinfo = (BTVacInfo *) ShmemInitStruct("BTree Vacuum State",
3622 : BTreeShmemSize(),
3623 : &found);
3624 :
3625 2126 : if (!IsUnderPostmaster)
3626 : {
3627 : /* Initialize shared memory area */
3628 : Assert(!found);
3629 :
3630 : /*
3631 : * It doesn't really matter what the cycle counter starts at, but
3632 : * having it always start the same doesn't seem good. Seed with
3633 : * low-order bits of time() instead.
3634 : */
3635 2126 : btvacinfo->cycle_ctr = (BTCycleId) time(NULL);
3636 :
3637 2126 : btvacinfo->num_vacuums = 0;
3638 2126 : btvacinfo->max_vacuums = MaxBackends;
3639 : }
3640 : else
3641 : Assert(found);
3642 2126 : }
3643 :
3644 : bytea *
3645 354 : btoptions(Datum reloptions, bool validate)
3646 : {
3647 : static const relopt_parse_elt tab[] = {
3648 : {"fillfactor", RELOPT_TYPE_INT, offsetof(BTOptions, fillfactor)},
3649 : {"vacuum_cleanup_index_scale_factor", RELOPT_TYPE_REAL,
3650 : offsetof(BTOptions, vacuum_cleanup_index_scale_factor)},
3651 : {"deduplicate_items", RELOPT_TYPE_BOOL,
3652 : offsetof(BTOptions, deduplicate_items)}
3653 : };
3654 :
3655 354 : return (bytea *) build_reloptions(reloptions, validate,
3656 : RELOPT_KIND_BTREE,
3657 : sizeof(BTOptions),
3658 : tab, lengthof(tab));
3659 : }
3660 :
3661 : /*
3662 : * btproperty() -- Check boolean properties of indexes.
3663 : *
3664 : * This is optional, but handling AMPROP_RETURNABLE here saves opening the rel
3665 : * to call btcanreturn.
3666 : */
3667 : bool
3668 756 : btproperty(Oid index_oid, int attno,
3669 : IndexAMProperty prop, const char *propname,
3670 : bool *res, bool *isnull)
3671 : {
3672 756 : switch (prop)
3673 : {
3674 42 : case AMPROP_RETURNABLE:
3675 : /* answer only for columns, not AM or whole index */
3676 42 : if (attno == 0)
3677 12 : return false;
3678 : /* otherwise, btree can always return data */
3679 30 : *res = true;
3680 30 : return true;
3681 :
3682 714 : default:
3683 714 : return false; /* punt to generic code */
3684 : }
3685 : }
3686 :
3687 : /*
3688 : * btbuildphasename() -- Return name of index build phase.
3689 : */
3690 : char *
3691 0 : btbuildphasename(int64 phasenum)
3692 : {
3693 0 : switch (phasenum)
3694 : {
3695 0 : case PROGRESS_CREATEIDX_SUBPHASE_INITIALIZE:
3696 0 : return "initializing";
3697 0 : case PROGRESS_BTREE_PHASE_INDEXBUILD_TABLESCAN:
3698 0 : return "scanning table";
3699 0 : case PROGRESS_BTREE_PHASE_PERFORMSORT_1:
3700 0 : return "sorting live tuples";
3701 0 : case PROGRESS_BTREE_PHASE_PERFORMSORT_2:
3702 0 : return "sorting dead tuples";
3703 0 : case PROGRESS_BTREE_PHASE_LEAF_LOAD:
3704 0 : return "loading tuples in tree";
3705 0 : default:
3706 0 : return NULL;
3707 : }
3708 : }
3709 :
3710 : /*
3711 : * _bt_truncate() -- create tuple without unneeded suffix attributes.
3712 : *
3713 : * Returns truncated pivot index tuple allocated in caller's memory context,
3714 : * with key attributes copied from caller's firstright argument. If rel is
3715 : * an INCLUDE index, non-key attributes will definitely be truncated away,
3716 : * since they're not part of the key space. More aggressive suffix
3717 : * truncation can take place when it's clear that the returned tuple does not
3718 : * need one or more suffix key attributes. We only need to keep firstright
3719 : * attributes up to and including the first non-lastleft-equal attribute.
3720 : * Caller's insertion scankey is used to compare the tuples; the scankey's
3721 : * argument values are not considered here.
3722 : *
3723 : * Note that returned tuple's t_tid offset will hold the number of attributes
3724 : * present, so the original item pointer offset is not represented. Caller
3725 : * should only change truncated tuple's downlink. Note also that truncated
3726 : * key attributes are treated as containing "minus infinity" values by
3727 : * _bt_compare().
3728 : *
3729 : * In the worst case (when a heap TID must be appended to distinguish lastleft
3730 : * from firstright), the size of the returned tuple is the size of firstright
3731 : * plus the size of an additional MAXALIGN()'d item pointer. This guarantee
3732 : * is important, since callers need to stay under the 1/3 of a page
3733 : * restriction on tuple size. If this routine is ever taught to truncate
3734 : * within an attribute/datum, it will need to avoid returning an enlarged
3735 : * tuple to caller when truncation + TOAST compression ends up enlarging the
3736 : * final datum.
3737 : */
3738 : IndexTuple
3739 63586 : _bt_truncate(Relation rel, IndexTuple lastleft, IndexTuple firstright,
3740 : BTScanInsert itup_key)
3741 : {
3742 63586 : TupleDesc itupdesc = RelationGetDescr(rel);
3743 63586 : int16 nkeyatts = IndexRelationGetNumberOfKeyAttributes(rel);
3744 : int keepnatts;
3745 : IndexTuple pivot;
3746 : IndexTuple tidpivot;
3747 : ItemPointer pivotheaptid;
3748 : Size newsize;
3749 :
3750 : /*
3751 : * We should only ever truncate non-pivot tuples from leaf pages. It's
3752 : * never okay to truncate when splitting an internal page.
3753 : */
3754 : Assert(!BTreeTupleIsPivot(lastleft) && !BTreeTupleIsPivot(firstright));
3755 :
3756 : /* Determine how many attributes must be kept in truncated tuple */
3757 63586 : keepnatts = _bt_keep_natts(rel, lastleft, firstright, itup_key);
3758 :
3759 : #ifdef DEBUG_NO_TRUNCATE
3760 : /* Force truncation to be ineffective for testing purposes */
3761 : keepnatts = nkeyatts + 1;
3762 : #endif
3763 :
3764 63586 : pivot = index_truncate_tuple(itupdesc, firstright,
3765 : Min(keepnatts, nkeyatts));
3766 :
3767 63586 : if (BTreeTupleIsPosting(pivot))
3768 : {
3769 : /*
3770 : * index_truncate_tuple() just returns a straight copy of firstright
3771 : * when it has no attributes to truncate. When that happens, we may
3772 : * need to truncate away a posting list here instead.
3773 : */
3774 : Assert(keepnatts == nkeyatts || keepnatts == nkeyatts + 1);
3775 : Assert(IndexRelationGetNumberOfAttributes(rel) == nkeyatts);
3776 1304 : pivot->t_info &= ~INDEX_SIZE_MASK;
3777 1304 : pivot->t_info |= MAXALIGN(BTreeTupleGetPostingOffset(firstright));
3778 : }
3779 :
3780 : /*
3781 : * If there is a distinguishing key attribute within pivot tuple, we're
3782 : * done
3783 : */
3784 63586 : if (keepnatts <= nkeyatts)
3785 : {
3786 62440 : BTreeTupleSetNAtts(pivot, keepnatts, false);
3787 62440 : return pivot;
3788 : }
3789 :
3790 : /*
3791 : * We have to store a heap TID in the new pivot tuple, since no non-TID
3792 : * key attribute value in firstright distinguishes the right side of the
3793 : * split from the left side. nbtree conceptualizes this case as an
3794 : * inability to truncate away any key attributes, since heap TID is
3795 : * treated as just another key attribute (despite lacking a pg_attribute
3796 : * entry).
3797 : *
3798 : * Use enlarged space that holds a copy of pivot. We need the extra space
3799 : * to store a heap TID at the end (using the special pivot tuple
3800 : * representation). Note that the original pivot already has firstright's
3801 : * possible posting list/non-key attribute values removed at this point.
3802 : */
3803 1146 : newsize = MAXALIGN(IndexTupleSize(pivot)) + MAXALIGN(sizeof(ItemPointerData));
3804 1146 : tidpivot = palloc0(newsize);
3805 1146 : memcpy(tidpivot, pivot, MAXALIGN(IndexTupleSize(pivot)));
3806 : /* Cannot leak memory here */
3807 1146 : pfree(pivot);
3808 :
3809 : /*
3810 : * Store all of firstright's key attribute values plus a tiebreaker heap
3811 : * TID value in enlarged pivot tuple
3812 : */
3813 1146 : tidpivot->t_info &= ~INDEX_SIZE_MASK;
3814 1146 : tidpivot->t_info |= newsize;
3815 1146 : BTreeTupleSetNAtts(tidpivot, nkeyatts, true);
3816 1146 : pivotheaptid = BTreeTupleGetHeapTID(tidpivot);
3817 :
3818 : /*
3819 : * Lehman & Yao use lastleft as the leaf high key in all cases, but don't
3820 : * consider suffix truncation. It seems like a good idea to follow that
3821 : * example in cases where no truncation takes place -- use lastleft's heap
3822 : * TID. (This is also the closest value to negative infinity that's
3823 : * legally usable.)
3824 : */
3825 1146 : ItemPointerCopy(BTreeTupleGetMaxHeapTID(lastleft), pivotheaptid);
3826 :
3827 : /*
3828 : * We're done. Assert() that heap TID invariants hold before returning.
3829 : *
3830 : * Lehman and Yao require that the downlink to the right page, which is to
3831 : * be inserted into the parent page in the second phase of a page split be
3832 : * a strict lower bound on items on the right page, and a non-strict upper
3833 : * bound for items on the left page. Assert that heap TIDs follow these
3834 : * invariants, since a heap TID value is apparently needed as a
3835 : * tiebreaker.
3836 : */
3837 : #ifndef DEBUG_NO_TRUNCATE
3838 : Assert(ItemPointerCompare(BTreeTupleGetMaxHeapTID(lastleft),
3839 : BTreeTupleGetHeapTID(firstright)) < 0);
3840 : Assert(ItemPointerCompare(pivotheaptid,
3841 : BTreeTupleGetHeapTID(lastleft)) >= 0);
3842 : Assert(ItemPointerCompare(pivotheaptid,
3843 : BTreeTupleGetHeapTID(firstright)) < 0);
3844 : #else
3845 :
3846 : /*
3847 : * Those invariants aren't guaranteed to hold for lastleft + firstright
3848 : * heap TID attribute values when they're considered here only because
3849 : * DEBUG_NO_TRUNCATE is defined (a heap TID is probably not actually
3850 : * needed as a tiebreaker). DEBUG_NO_TRUNCATE must therefore use a heap
3851 : * TID value that always works as a strict lower bound for items to the
3852 : * right. In particular, it must avoid using firstright's leading key
3853 : * attribute values along with lastleft's heap TID value when lastleft's
3854 : * TID happens to be greater than firstright's TID.
3855 : */
3856 : ItemPointerCopy(BTreeTupleGetHeapTID(firstright), pivotheaptid);
3857 :
3858 : /*
3859 : * Pivot heap TID should never be fully equal to firstright. Note that
3860 : * the pivot heap TID will still end up equal to lastleft's heap TID when
3861 : * that's the only usable value.
3862 : */
3863 : ItemPointerSetOffsetNumber(pivotheaptid,
3864 : OffsetNumberPrev(ItemPointerGetOffsetNumber(pivotheaptid)));
3865 : Assert(ItemPointerCompare(pivotheaptid,
3866 : BTreeTupleGetHeapTID(firstright)) < 0);
3867 : #endif
3868 :
3869 1146 : return tidpivot;
3870 : }
3871 :
3872 : /*
3873 : * _bt_keep_natts - how many key attributes to keep when truncating.
3874 : *
3875 : * Caller provides two tuples that enclose a split point. Caller's insertion
3876 : * scankey is used to compare the tuples; the scankey's argument values are
3877 : * not considered here.
3878 : *
3879 : * This can return a number of attributes that is one greater than the
3880 : * number of key attributes for the index relation. This indicates that the
3881 : * caller must use a heap TID as a unique-ifier in new pivot tuple.
3882 : */
3883 : static int
3884 63586 : _bt_keep_natts(Relation rel, IndexTuple lastleft, IndexTuple firstright,
3885 : BTScanInsert itup_key)
3886 : {
3887 63586 : int nkeyatts = IndexRelationGetNumberOfKeyAttributes(rel);
3888 63586 : TupleDesc itupdesc = RelationGetDescr(rel);
3889 : int keepnatts;
3890 : ScanKey scankey;
3891 :
3892 : /*
3893 : * _bt_compare() treats truncated key attributes as having the value minus
3894 : * infinity, which would break searches within !heapkeyspace indexes. We
3895 : * must still truncate away non-key attribute values, though.
3896 : */
3897 63586 : if (!itup_key->heapkeyspace)
3898 0 : return nkeyatts;
3899 :
3900 63586 : scankey = itup_key->scankeys;
3901 63586 : keepnatts = 1;
3902 76980 : for (int attnum = 1; attnum <= nkeyatts; attnum++, scankey++)
3903 : {
3904 : Datum datum1,
3905 : datum2;
3906 : bool isNull1,
3907 : isNull2;
3908 :
3909 75834 : datum1 = index_getattr(lastleft, attnum, itupdesc, &isNull1);
3910 75834 : datum2 = index_getattr(firstright, attnum, itupdesc, &isNull2);
3911 :
3912 75834 : if (isNull1 != isNull2)
3913 62440 : break;
3914 :
3915 151636 : if (!isNull1 &&
3916 75802 : DatumGetInt32(FunctionCall2Coll(&scankey->sk_func,
3917 : scankey->sk_collation,
3918 : datum1,
3919 : datum2)) != 0)
3920 62440 : break;
3921 :
3922 13394 : keepnatts++;
3923 : }
3924 :
3925 : /*
3926 : * Assert that _bt_keep_natts_fast() agrees with us in passing. This is
3927 : * expected in an allequalimage index.
3928 : */
3929 : Assert(!itup_key->allequalimage ||
3930 : keepnatts == _bt_keep_natts_fast(rel, lastleft, firstright));
3931 :
3932 63586 : return keepnatts;
3933 : }
3934 :
3935 : /*
3936 : * _bt_keep_natts_fast - fast bitwise variant of _bt_keep_natts.
3937 : *
3938 : * This is exported so that a candidate split point can have its effect on
3939 : * suffix truncation inexpensively evaluated ahead of time when finding a
3940 : * split location. A naive bitwise approach to datum comparisons is used to
3941 : * save cycles.
3942 : *
3943 : * The approach taken here usually provides the same answer as _bt_keep_natts
3944 : * will (for the same pair of tuples from a heapkeyspace index), since the
3945 : * majority of btree opclasses can never indicate that two datums are equal
3946 : * unless they're bitwise equal after detoasting. When an index only has
3947 : * "equal image" columns, routine is guaranteed to give the same result as
3948 : * _bt_keep_natts would.
3949 : *
3950 : * Callers can rely on the fact that attributes considered equal here are
3951 : * definitely also equal according to _bt_keep_natts, even when the index uses
3952 : * an opclass or collation that is not "allequalimage"/deduplication-safe.
3953 : * This weaker guarantee is good enough for nbtsplitloc.c caller, since false
3954 : * negatives generally only have the effect of making leaf page splits use a
3955 : * more balanced split point.
3956 : */
3957 : int
3958 13658302 : _bt_keep_natts_fast(Relation rel, IndexTuple lastleft, IndexTuple firstright)
3959 : {
3960 13658302 : TupleDesc itupdesc = RelationGetDescr(rel);
3961 13658302 : int keysz = IndexRelationGetNumberOfKeyAttributes(rel);
3962 : int keepnatts;
3963 :
3964 13658302 : keepnatts = 1;
3965 22863874 : for (int attnum = 1; attnum <= keysz; attnum++)
3966 : {
3967 : Datum datum1,
3968 : datum2;
3969 : bool isNull1,
3970 : isNull2;
3971 : CompactAttribute *att;
3972 :
3973 20427726 : datum1 = index_getattr(lastleft, attnum, itupdesc, &isNull1);
3974 20427726 : datum2 = index_getattr(firstright, attnum, itupdesc, &isNull2);
3975 20427726 : att = TupleDescCompactAttr(itupdesc, attnum - 1);
3976 :
3977 20427726 : if (isNull1 != isNull2)
3978 11222154 : break;
3979 :
3980 20427520 : if (!isNull1 &&
3981 20380444 : !datum_image_eq(datum1, datum2, att->attbyval, att->attlen))
3982 11221948 : break;
3983 :
3984 9205572 : keepnatts++;
3985 : }
3986 :
3987 13658302 : return keepnatts;
3988 : }
3989 :
3990 : /*
3991 : * _bt_check_natts() -- Verify tuple has expected number of attributes.
3992 : *
3993 : * Returns value indicating if the expected number of attributes were found
3994 : * for a particular offset on page. This can be used as a general purpose
3995 : * sanity check.
3996 : *
3997 : * Testing a tuple directly with BTreeTupleGetNAtts() should generally be
3998 : * preferred to calling here. That's usually more convenient, and is always
3999 : * more explicit. Call here instead when offnum's tuple may be a negative
4000 : * infinity tuple that uses the pre-v11 on-disk representation, or when a low
4001 : * context check is appropriate. This routine is as strict as possible about
4002 : * what is expected on each version of btree.
4003 : */
4004 : bool
4005 4055512 : _bt_check_natts(Relation rel, bool heapkeyspace, Page page, OffsetNumber offnum)
4006 : {
4007 4055512 : int16 natts = IndexRelationGetNumberOfAttributes(rel);
4008 4055512 : int16 nkeyatts = IndexRelationGetNumberOfKeyAttributes(rel);
4009 4055512 : BTPageOpaque opaque = BTPageGetOpaque(page);
4010 : IndexTuple itup;
4011 : int tupnatts;
4012 :
4013 : /*
4014 : * We cannot reliably test a deleted or half-dead page, since they have
4015 : * dummy high keys
4016 : */
4017 4055512 : if (P_IGNORE(opaque))
4018 0 : return true;
4019 :
4020 : Assert(offnum >= FirstOffsetNumber &&
4021 : offnum <= PageGetMaxOffsetNumber(page));
4022 :
4023 4055512 : itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, offnum));
4024 4055512 : tupnatts = BTreeTupleGetNAtts(itup, rel);
4025 :
4026 : /* !heapkeyspace indexes do not support deduplication */
4027 4055512 : if (!heapkeyspace && BTreeTupleIsPosting(itup))
4028 0 : return false;
4029 :
4030 : /* Posting list tuples should never have "pivot heap TID" bit set */
4031 4055512 : if (BTreeTupleIsPosting(itup) &&
4032 21810 : (ItemPointerGetOffsetNumberNoCheck(&itup->t_tid) &
4033 : BT_PIVOT_HEAP_TID_ATTR) != 0)
4034 0 : return false;
4035 :
4036 : /* INCLUDE indexes do not support deduplication */
4037 4055512 : if (natts != nkeyatts && BTreeTupleIsPosting(itup))
4038 0 : return false;
4039 :
4040 4055512 : if (P_ISLEAF(opaque))
4041 : {
4042 4041160 : if (offnum >= P_FIRSTDATAKEY(opaque))
4043 : {
4044 : /*
4045 : * Non-pivot tuple should never be explicitly marked as a pivot
4046 : * tuple
4047 : */
4048 4027928 : if (BTreeTupleIsPivot(itup))
4049 0 : return false;
4050 :
4051 : /*
4052 : * Leaf tuples that are not the page high key (non-pivot tuples)
4053 : * should never be truncated. (Note that tupnatts must have been
4054 : * inferred, even with a posting list tuple, because only pivot
4055 : * tuples store tupnatts directly.)
4056 : */
4057 4027928 : return tupnatts == natts;
4058 : }
4059 : else
4060 : {
4061 : /*
4062 : * Rightmost page doesn't contain a page high key, so tuple was
4063 : * checked above as ordinary leaf tuple
4064 : */
4065 : Assert(!P_RIGHTMOST(opaque));
4066 :
4067 : /*
4068 : * !heapkeyspace high key tuple contains only key attributes. Note
4069 : * that tupnatts will only have been explicitly represented in
4070 : * !heapkeyspace indexes that happen to have non-key attributes.
4071 : */
4072 13232 : if (!heapkeyspace)
4073 0 : return tupnatts == nkeyatts;
4074 :
4075 : /* Use generic heapkeyspace pivot tuple handling */
4076 : }
4077 : }
4078 : else /* !P_ISLEAF(opaque) */
4079 : {
4080 14352 : if (offnum == P_FIRSTDATAKEY(opaque))
4081 : {
4082 : /*
4083 : * The first tuple on any internal page (possibly the first after
4084 : * its high key) is its negative infinity tuple. Negative
4085 : * infinity tuples are always truncated to zero attributes. They
4086 : * are a particular kind of pivot tuple.
4087 : */
4088 1114 : if (heapkeyspace)
4089 1114 : return tupnatts == 0;
4090 :
4091 : /*
4092 : * The number of attributes won't be explicitly represented if the
4093 : * negative infinity tuple was generated during a page split that
4094 : * occurred with a version of Postgres before v11. There must be
4095 : * a problem when there is an explicit representation that is
4096 : * non-zero, or when there is no explicit representation and the
4097 : * tuple is evidently not a pre-pg_upgrade tuple.
4098 : *
4099 : * Prior to v11, downlinks always had P_HIKEY as their offset.
4100 : * Accept that as an alternative indication of a valid
4101 : * !heapkeyspace negative infinity tuple.
4102 : */
4103 0 : return tupnatts == 0 ||
4104 0 : ItemPointerGetOffsetNumber(&(itup->t_tid)) == P_HIKEY;
4105 : }
4106 : else
4107 : {
4108 : /*
4109 : * !heapkeyspace downlink tuple with separator key contains only
4110 : * key attributes. Note that tupnatts will only have been
4111 : * explicitly represented in !heapkeyspace indexes that happen to
4112 : * have non-key attributes.
4113 : */
4114 13238 : if (!heapkeyspace)
4115 0 : return tupnatts == nkeyatts;
4116 :
4117 : /* Use generic heapkeyspace pivot tuple handling */
4118 : }
4119 : }
4120 :
4121 : /* Handle heapkeyspace pivot tuples (excluding minus infinity items) */
4122 : Assert(heapkeyspace);
4123 :
4124 : /*
4125 : * Explicit representation of the number of attributes is mandatory with
4126 : * heapkeyspace index pivot tuples, regardless of whether or not there are
4127 : * non-key attributes.
4128 : */
4129 26470 : if (!BTreeTupleIsPivot(itup))
4130 0 : return false;
4131 :
4132 : /* Pivot tuple should not use posting list representation (redundant) */
4133 26470 : if (BTreeTupleIsPosting(itup))
4134 0 : return false;
4135 :
4136 : /*
4137 : * Heap TID is a tiebreaker key attribute, so it cannot be untruncated
4138 : * when any other key attribute is truncated
4139 : */
4140 26470 : if (BTreeTupleGetHeapTID(itup) != NULL && tupnatts != nkeyatts)
4141 0 : return false;
4142 :
4143 : /*
4144 : * Pivot tuple must have at least one untruncated key attribute (minus
4145 : * infinity pivot tuples are the only exception). Pivot tuples can never
4146 : * represent that there is a value present for a key attribute that
4147 : * exceeds pg_index.indnkeyatts for the index.
4148 : */
4149 26470 : return tupnatts > 0 && tupnatts <= nkeyatts;
4150 : }
4151 :
4152 : /*
4153 : *
4154 : * _bt_check_third_page() -- check whether tuple fits on a btree page at all.
4155 : *
4156 : * We actually need to be able to fit three items on every page, so restrict
4157 : * any one item to 1/3 the per-page available space. Note that itemsz should
4158 : * not include the ItemId overhead.
4159 : *
4160 : * It might be useful to apply TOAST methods rather than throw an error here.
4161 : * Using out of line storage would break assumptions made by suffix truncation
4162 : * and by contrib/amcheck, though.
4163 : */
4164 : void
4165 264 : _bt_check_third_page(Relation rel, Relation heap, bool needheaptidspace,
4166 : Page page, IndexTuple newtup)
4167 : {
4168 : Size itemsz;
4169 : BTPageOpaque opaque;
4170 :
4171 264 : itemsz = MAXALIGN(IndexTupleSize(newtup));
4172 :
4173 : /* Double check item size against limit */
4174 264 : if (itemsz <= BTMaxItemSize)
4175 0 : return;
4176 :
4177 : /*
4178 : * Tuple is probably too large to fit on page, but it's possible that the
4179 : * index uses version 2 or version 3, or that page is an internal page, in
4180 : * which case a slightly higher limit applies.
4181 : */
4182 264 : if (!needheaptidspace && itemsz <= BTMaxItemSizeNoHeapTid)
4183 264 : return;
4184 :
4185 : /*
4186 : * Internal page insertions cannot fail here, because that would mean that
4187 : * an earlier leaf level insertion that should have failed didn't
4188 : */
4189 0 : opaque = BTPageGetOpaque(page);
4190 0 : if (!P_ISLEAF(opaque))
4191 0 : elog(ERROR, "cannot insert oversized tuple of size %zu on internal page of index \"%s\"",
4192 : itemsz, RelationGetRelationName(rel));
4193 :
4194 0 : ereport(ERROR,
4195 : (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
4196 : errmsg("index row size %zu exceeds btree version %u maximum %zu for index \"%s\"",
4197 : itemsz,
4198 : needheaptidspace ? BTREE_VERSION : BTREE_NOVAC_VERSION,
4199 : needheaptidspace ? BTMaxItemSize : BTMaxItemSizeNoHeapTid,
4200 : RelationGetRelationName(rel)),
4201 : errdetail("Index row references tuple (%u,%u) in relation \"%s\".",
4202 : ItemPointerGetBlockNumber(BTreeTupleGetHeapTID(newtup)),
4203 : ItemPointerGetOffsetNumber(BTreeTupleGetHeapTID(newtup)),
4204 : RelationGetRelationName(heap)),
4205 : errhint("Values larger than 1/3 of a buffer page cannot be indexed.\n"
4206 : "Consider a function index of an MD5 hash of the value, "
4207 : "or use full text indexing."),
4208 : errtableconstraint(heap, RelationGetRelationName(rel))));
4209 : }
4210 :
4211 : /*
4212 : * Are all attributes in rel "equality is image equality" attributes?
4213 : *
4214 : * We use each attribute's BTEQUALIMAGE_PROC opclass procedure. If any
4215 : * opclass either lacks a BTEQUALIMAGE_PROC procedure or returns false, we
4216 : * return false; otherwise we return true.
4217 : *
4218 : * Returned boolean value is stored in index metapage during index builds.
4219 : * Deduplication can only be used when we return true.
4220 : */
4221 : bool
4222 59534 : _bt_allequalimage(Relation rel, bool debugmessage)
4223 : {
4224 59534 : bool allequalimage = true;
4225 :
4226 : /* INCLUDE indexes can never support deduplication */
4227 59534 : if (IndexRelationGetNumberOfAttributes(rel) !=
4228 59534 : IndexRelationGetNumberOfKeyAttributes(rel))
4229 292 : return false;
4230 :
4231 156518 : for (int i = 0; i < IndexRelationGetNumberOfKeyAttributes(rel); i++)
4232 : {
4233 97814 : Oid opfamily = rel->rd_opfamily[i];
4234 97814 : Oid opcintype = rel->rd_opcintype[i];
4235 97814 : Oid collation = rel->rd_indcollation[i];
4236 : Oid equalimageproc;
4237 :
4238 97814 : equalimageproc = get_opfamily_proc(opfamily, opcintype, opcintype,
4239 : BTEQUALIMAGE_PROC);
4240 :
4241 : /*
4242 : * If there is no BTEQUALIMAGE_PROC then deduplication is assumed to
4243 : * be unsafe. Otherwise, actually call proc and see what it says.
4244 : */
4245 97814 : if (!OidIsValid(equalimageproc) ||
4246 97320 : !DatumGetBool(OidFunctionCall1Coll(equalimageproc, collation,
4247 : ObjectIdGetDatum(opcintype))))
4248 : {
4249 538 : allequalimage = false;
4250 538 : break;
4251 : }
4252 : }
4253 :
4254 59242 : if (debugmessage)
4255 : {
4256 51212 : if (allequalimage)
4257 50674 : elog(DEBUG1, "index \"%s\" can safely use deduplication",
4258 : RelationGetRelationName(rel));
4259 : else
4260 538 : elog(DEBUG1, "index \"%s\" cannot use deduplication",
4261 : RelationGetRelationName(rel));
4262 : }
4263 :
4264 59242 : return allequalimage;
4265 : }
|