Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * nbtsearch.c
4 : * Search code for postgres btrees.
5 : *
6 : *
7 : * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
8 : * Portions Copyright (c) 1994, Regents of the University of California
9 : *
10 : * IDENTIFICATION
11 : * src/backend/access/nbtree/nbtsearch.c
12 : *
13 : *-------------------------------------------------------------------------
14 : */
15 :
16 : #include "postgres.h"
17 :
18 : #include "access/nbtree.h"
19 : #include "access/relscan.h"
20 : #include "access/xact.h"
21 : #include "miscadmin.h"
22 : #include "pgstat.h"
23 : #include "storage/predicate.h"
24 : #include "utils/lsyscache.h"
25 : #include "utils/rel.h"
26 :
27 :
28 : static void _bt_drop_lock_and_maybe_pin(IndexScanDesc scan, BTScanPos sp);
29 : static Buffer _bt_moveright(Relation rel, Relation heaprel, BTScanInsert key,
30 : Buffer buf, bool forupdate, BTStack stack,
31 : int access);
32 : static OffsetNumber _bt_binsrch(Relation rel, BTScanInsert key, Buffer buf);
33 : static int _bt_binsrch_posting(BTScanInsert key, Page page,
34 : OffsetNumber offnum);
35 : static bool _bt_readpage(IndexScanDesc scan, ScanDirection dir,
36 : OffsetNumber offnum, bool firstPage);
37 : static void _bt_saveitem(BTScanOpaque so, int itemIndex,
38 : OffsetNumber offnum, IndexTuple itup);
39 : static int _bt_setuppostingitems(BTScanOpaque so, int itemIndex,
40 : OffsetNumber offnum, ItemPointer heapTid,
41 : IndexTuple itup);
42 : static inline void _bt_savepostingitem(BTScanOpaque so, int itemIndex,
43 : OffsetNumber offnum,
44 : ItemPointer heapTid, int tupleOffset);
45 : static inline void _bt_returnitem(IndexScanDesc scan, BTScanOpaque so);
46 : static bool _bt_steppage(IndexScanDesc scan, ScanDirection dir);
47 : static bool _bt_readfirstpage(IndexScanDesc scan, OffsetNumber offnum,
48 : ScanDirection dir);
49 : static bool _bt_readnextpage(IndexScanDesc scan, BlockNumber blkno,
50 : BlockNumber lastcurrblkno, ScanDirection dir,
51 : bool seized);
52 : static Buffer _bt_lock_and_validate_left(Relation rel, BlockNumber *blkno,
53 : BlockNumber lastcurrblkno);
54 : static bool _bt_endpoint(IndexScanDesc scan, ScanDirection dir);
55 :
56 :
57 : /*
58 : * _bt_drop_lock_and_maybe_pin()
59 : *
60 : * Unlock the buffer; and if it is safe to release the pin, do that, too.
61 : * This will prevent vacuum from stalling in a blocked state trying to read a
62 : * page when a cursor is sitting on it.
63 : *
64 : * See nbtree/README section on making concurrent TID recycling safe.
65 : */
66 : static void
67 9192582 : _bt_drop_lock_and_maybe_pin(IndexScanDesc scan, BTScanPos sp)
68 : {
69 9192582 : _bt_unlockbuf(scan->indexRelation, sp->buf);
70 :
71 9192582 : if (IsMVCCSnapshot(scan->xs_snapshot) &&
72 8893068 : RelationNeedsWAL(scan->indexRelation) &&
73 8888150 : !scan->xs_want_itup)
74 : {
75 8781306 : ReleaseBuffer(sp->buf);
76 8781306 : sp->buf = InvalidBuffer;
77 : }
78 9192582 : }
79 :
80 : /*
81 : * _bt_search() -- Search the tree for a particular scankey,
82 : * or more precisely for the first leaf page it could be on.
83 : *
84 : * The passed scankey is an insertion-type scankey (see nbtree/README),
85 : * but it can omit the rightmost column(s) of the index.
86 : *
87 : * Return value is a stack of parent-page pointers (i.e. there is no entry for
88 : * the leaf level/page). *bufP is set to the address of the leaf-page buffer,
89 : * which is locked and pinned. No locks are held on the parent pages,
90 : * however!
91 : *
92 : * The returned buffer is locked according to access parameter. Additionally,
93 : * access = BT_WRITE will allow an empty root page to be created and returned.
94 : * When access = BT_READ, an empty index will result in *bufP being set to
95 : * InvalidBuffer. Also, in BT_WRITE mode, any incomplete splits encountered
96 : * during the search will be finished.
97 : *
98 : * heaprel must be provided by callers that pass access = BT_WRITE, since we
99 : * might need to allocate a new root page for caller -- see _bt_allocbuf.
100 : */
101 : BTStack
102 20097758 : _bt_search(Relation rel, Relation heaprel, BTScanInsert key, Buffer *bufP,
103 : int access)
104 : {
105 20097758 : BTStack stack_in = NULL;
106 20097758 : int page_access = BT_READ;
107 :
108 : /* heaprel must be set whenever _bt_allocbuf is reachable */
109 : Assert(access == BT_READ || access == BT_WRITE);
110 : Assert(access == BT_READ || heaprel != NULL);
111 :
112 : /* Get the root page to start with */
113 20097758 : *bufP = _bt_getroot(rel, heaprel, access);
114 :
115 : /* If index is empty and access = BT_READ, no root page is created. */
116 20097758 : if (!BufferIsValid(*bufP))
117 524930 : return (BTStack) NULL;
118 :
119 : /* Loop iterates once per level descended in the tree */
120 : for (;;)
121 16242062 : {
122 : Page page;
123 : BTPageOpaque opaque;
124 : OffsetNumber offnum;
125 : ItemId itemid;
126 : IndexTuple itup;
127 : BlockNumber child;
128 : BTStack new_stack;
129 :
130 : /*
131 : * Race -- the page we just grabbed may have split since we read its
132 : * downlink in its parent page (or the metapage). If it has, we may
133 : * need to move right to its new sibling. Do that.
134 : *
135 : * In write-mode, allow _bt_moveright to finish any incomplete splits
136 : * along the way. Strictly speaking, we'd only need to finish an
137 : * incomplete split on the leaf page we're about to insert to, not on
138 : * any of the upper levels (internal pages with incomplete splits are
139 : * also taken care of in _bt_getstackbuf). But this is a good
140 : * opportunity to finish splits of internal pages too.
141 : */
142 35814890 : *bufP = _bt_moveright(rel, heaprel, key, *bufP, (access == BT_WRITE),
143 : stack_in, page_access);
144 :
145 : /* if this is a leaf page, we're done */
146 35814890 : page = BufferGetPage(*bufP);
147 35814890 : opaque = BTPageGetOpaque(page);
148 35814890 : if (P_ISLEAF(opaque))
149 19572828 : break;
150 :
151 : /*
152 : * Find the appropriate pivot tuple on this page. Its downlink points
153 : * to the child page that we're about to descend to.
154 : */
155 16242062 : offnum = _bt_binsrch(rel, key, *bufP);
156 16242062 : itemid = PageGetItemId(page, offnum);
157 16242062 : itup = (IndexTuple) PageGetItem(page, itemid);
158 : Assert(BTreeTupleIsPivot(itup) || !key->heapkeyspace);
159 16242062 : child = BTreeTupleGetDownLink(itup);
160 :
161 : /*
162 : * We need to save the location of the pivot tuple we chose in a new
163 : * stack entry for this page/level. If caller ends up splitting a
164 : * page one level down, it usually ends up inserting a new pivot
165 : * tuple/downlink immediately after the location recorded here.
166 : */
167 16242062 : new_stack = (BTStack) palloc(sizeof(BTStackData));
168 16242062 : new_stack->bts_blkno = BufferGetBlockNumber(*bufP);
169 16242062 : new_stack->bts_offset = offnum;
170 16242062 : new_stack->bts_parent = stack_in;
171 :
172 : /*
173 : * Page level 1 is lowest non-leaf page level prior to leaves. So, if
174 : * we're on the level 1 and asked to lock leaf page in write mode,
175 : * then lock next page in write mode, because it must be a leaf.
176 : */
177 16242062 : if (opaque->btpo_level == 1 && access == BT_WRITE)
178 5848656 : page_access = BT_WRITE;
179 :
180 : /* drop the read lock on the page, then acquire one on its child */
181 16242062 : *bufP = _bt_relandgetbuf(rel, *bufP, child, page_access);
182 :
183 : /* okay, all set to move down a level */
184 16242062 : stack_in = new_stack;
185 : }
186 :
187 : /*
188 : * If we're asked to lock leaf in write mode, but didn't manage to, then
189 : * relock. This should only happen when the root page is a leaf page (and
190 : * the only page in the index other than the metapage).
191 : */
192 19572828 : if (access == BT_WRITE && page_access == BT_READ)
193 : {
194 : /* trade in our read lock for a write lock */
195 815096 : _bt_unlockbuf(rel, *bufP);
196 815096 : _bt_lockbuf(rel, *bufP, BT_WRITE);
197 :
198 : /*
199 : * Race -- the leaf page may have split after we dropped the read lock
200 : * but before we acquired a write lock. If it has, we may need to
201 : * move right to its new sibling. Do that.
202 : */
203 815096 : *bufP = _bt_moveright(rel, heaprel, key, *bufP, true, stack_in, BT_WRITE);
204 : }
205 :
206 19572828 : return stack_in;
207 : }
208 :
209 : /*
210 : * _bt_moveright() -- move right in the btree if necessary.
211 : *
212 : * When we follow a pointer to reach a page, it is possible that
213 : * the page has changed in the meanwhile. If this happens, we're
214 : * guaranteed that the page has "split right" -- that is, that any
215 : * data that appeared on the page originally is either on the page
216 : * or strictly to the right of it.
217 : *
218 : * This routine decides whether or not we need to move right in the
219 : * tree by examining the high key entry on the page. If that entry is
220 : * strictly less than the scankey, or <= the scankey in the
221 : * key.nextkey=true case, then we followed the wrong link and we need
222 : * to move right.
223 : *
224 : * The passed insertion-type scankey can omit the rightmost column(s) of the
225 : * index. (see nbtree/README)
226 : *
227 : * When key.nextkey is false (the usual case), we are looking for the first
228 : * item >= key. When key.nextkey is true, we are looking for the first item
229 : * strictly greater than key.
230 : *
231 : * If forupdate is true, we will attempt to finish any incomplete splits
232 : * that we encounter. This is required when locking a target page for an
233 : * insertion, because we don't allow inserting on a page before the split is
234 : * completed. 'heaprel' and 'stack' are only used if forupdate is true.
235 : *
236 : * On entry, we have the buffer pinned and a lock of the type specified by
237 : * 'access'. If we move right, we release the buffer and lock and acquire
238 : * the same on the right sibling. Return value is the buffer we stop at.
239 : */
240 : static Buffer
241 36629986 : _bt_moveright(Relation rel,
242 : Relation heaprel,
243 : BTScanInsert key,
244 : Buffer buf,
245 : bool forupdate,
246 : BTStack stack,
247 : int access)
248 : {
249 : Page page;
250 : BTPageOpaque opaque;
251 : int32 cmpval;
252 :
253 : Assert(!forupdate || heaprel != NULL);
254 :
255 : /*
256 : * When nextkey = false (normal case): if the scan key that brought us to
257 : * this page is > the high key stored on the page, then the page has split
258 : * and we need to move right. (pg_upgrade'd !heapkeyspace indexes could
259 : * have some duplicates to the right as well as the left, but that's
260 : * something that's only ever dealt with on the leaf level, after
261 : * _bt_search has found an initial leaf page.)
262 : *
263 : * When nextkey = true: move right if the scan key is >= page's high key.
264 : * (Note that key.scantid cannot be set in this case.)
265 : *
266 : * The page could even have split more than once, so scan as far as
267 : * needed.
268 : *
269 : * We also have to move right if we followed a link that brought us to a
270 : * dead page.
271 : */
272 36629986 : cmpval = key->nextkey ? 0 : 1;
273 :
274 : for (;;)
275 : {
276 36631550 : page = BufferGetPage(buf);
277 36631550 : opaque = BTPageGetOpaque(page);
278 :
279 36631550 : if (P_RIGHTMOST(opaque))
280 27942174 : break;
281 :
282 : /*
283 : * Finish any incomplete splits we encounter along the way.
284 : */
285 8689376 : if (forupdate && P_INCOMPLETE_SPLIT(opaque))
286 : {
287 0 : BlockNumber blkno = BufferGetBlockNumber(buf);
288 :
289 : /* upgrade our lock if necessary */
290 0 : if (access == BT_READ)
291 : {
292 0 : _bt_unlockbuf(rel, buf);
293 0 : _bt_lockbuf(rel, buf, BT_WRITE);
294 : }
295 :
296 0 : if (P_INCOMPLETE_SPLIT(opaque))
297 0 : _bt_finish_split(rel, heaprel, buf, stack);
298 : else
299 0 : _bt_relbuf(rel, buf);
300 :
301 : /* re-acquire the lock in the right mode, and re-check */
302 0 : buf = _bt_getbuf(rel, blkno, access);
303 0 : continue;
304 : }
305 :
306 8689376 : if (P_IGNORE(opaque) || _bt_compare(rel, key, page, P_HIKEY) >= cmpval)
307 : {
308 : /* step right one page */
309 1564 : buf = _bt_relandgetbuf(rel, buf, opaque->btpo_next, access);
310 1564 : continue;
311 : }
312 : else
313 : break;
314 : }
315 :
316 36629986 : if (P_IGNORE(opaque))
317 0 : elog(ERROR, "fell off the end of index \"%s\"",
318 : RelationGetRelationName(rel));
319 :
320 36629986 : return buf;
321 : }
322 :
323 : /*
324 : * _bt_binsrch() -- Do a binary search for a key on a particular page.
325 : *
326 : * On an internal (non-leaf) page, _bt_binsrch() returns the OffsetNumber
327 : * of the last key < given scankey, or last key <= given scankey if nextkey
328 : * is true. (Since _bt_compare treats the first data key of such a page as
329 : * minus infinity, there will be at least one key < scankey, so the result
330 : * always points at one of the keys on the page.)
331 : *
332 : * On a leaf page, _bt_binsrch() returns the final result of the initial
333 : * positioning process that started with _bt_first's call to _bt_search.
334 : * We're returning a non-pivot tuple offset, so things are a little different.
335 : * It is possible that we'll return an offset that's either past the last
336 : * non-pivot slot, or (in the case of a backward scan) before the first slot.
337 : *
338 : * This procedure is not responsible for walking right, it just examines
339 : * the given page. _bt_binsrch() has no lock or refcount side effects
340 : * on the buffer.
341 : */
342 : static OffsetNumber
343 28743542 : _bt_binsrch(Relation rel,
344 : BTScanInsert key,
345 : Buffer buf)
346 : {
347 : Page page;
348 : BTPageOpaque opaque;
349 : OffsetNumber low,
350 : high;
351 : int32 result,
352 : cmpval;
353 :
354 28743542 : page = BufferGetPage(buf);
355 28743542 : opaque = BTPageGetOpaque(page);
356 :
357 : /* Requesting nextkey semantics while using scantid seems nonsensical */
358 : Assert(!key->nextkey || key->scantid == NULL);
359 : /* scantid-set callers must use _bt_binsrch_insert() on leaf pages */
360 : Assert(!P_ISLEAF(opaque) || key->scantid == NULL);
361 :
362 28743542 : low = P_FIRSTDATAKEY(opaque);
363 28743542 : high = PageGetMaxOffsetNumber(page);
364 :
365 : /*
366 : * If there are no keys on the page, return the first available slot. Note
367 : * this covers two cases: the page is really empty (no keys), or it
368 : * contains only a high key. The latter case is possible after vacuuming.
369 : * This can never happen on an internal page, however, since they are
370 : * never empty (an internal page must have at least one child).
371 : */
372 28743542 : if (unlikely(high < low))
373 3022 : return low;
374 :
375 : /*
376 : * Binary search to find the first key on the page >= scan key, or first
377 : * key > scankey when nextkey is true.
378 : *
379 : * For nextkey=false (cmpval=1), the loop invariant is: all slots before
380 : * 'low' are < scan key, all slots at or after 'high' are >= scan key.
381 : *
382 : * For nextkey=true (cmpval=0), the loop invariant is: all slots before
383 : * 'low' are <= scan key, all slots at or after 'high' are > scan key.
384 : *
385 : * We can fall out when high == low.
386 : */
387 28740520 : high++; /* establish the loop invariant for high */
388 :
389 28740520 : cmpval = key->nextkey ? 0 : 1; /* select comparison value */
390 :
391 187250480 : while (high > low)
392 : {
393 158509960 : OffsetNumber mid = low + ((high - low) / 2);
394 :
395 : /* We have low <= mid < high, so mid points at a real slot */
396 :
397 158509960 : result = _bt_compare(rel, key, page, mid);
398 :
399 158509960 : if (result >= cmpval)
400 99998056 : low = mid + 1;
401 : else
402 58511904 : high = mid;
403 : }
404 :
405 : /*
406 : * At this point we have high == low.
407 : *
408 : * On a leaf page we always return the first non-pivot tuple >= scan key
409 : * (resp. > scan key) for forward scan callers. For backward scans, it's
410 : * always the _last_ non-pivot tuple < scan key (resp. <= scan key).
411 : */
412 28740520 : if (P_ISLEAF(opaque))
413 : {
414 : /*
415 : * In the backward scan case we're supposed to locate the last
416 : * matching tuple on the leaf level -- not the first matching tuple
417 : * (the last tuple will be the first one returned by the scan).
418 : *
419 : * At this point we've located the first non-pivot tuple immediately
420 : * after the last matching tuple (which might just be maxoff + 1).
421 : * Compensate by stepping back.
422 : */
423 12498458 : if (key->backward)
424 43280 : return OffsetNumberPrev(low);
425 :
426 12455178 : return low;
427 : }
428 :
429 : /*
430 : * On a non-leaf page, return the last key < scan key (resp. <= scan key).
431 : * There must be one if _bt_compare() is playing by the rules.
432 : *
433 : * _bt_compare() will seldom see any exactly-matching pivot tuples, since
434 : * a truncated -inf heap TID is usually enough to prevent it altogether.
435 : * Even omitted scan key entries are treated as > truncated attributes.
436 : *
437 : * However, during backward scans _bt_compare() interprets omitted scan
438 : * key attributes as == corresponding truncated -inf attributes instead.
439 : * This works just like < would work here. Under this scheme, < strategy
440 : * backward scans will always directly descend to the correct leaf page.
441 : * In particular, they will never incur an "extra" leaf page access with a
442 : * scan key that happens to contain the same prefix of values as some
443 : * pivot tuple's untruncated prefix. VACUUM relies on this guarantee when
444 : * it uses a leaf page high key to "re-find" a page undergoing deletion.
445 : */
446 : Assert(low > P_FIRSTDATAKEY(opaque));
447 :
448 16242062 : return OffsetNumberPrev(low);
449 : }
450 :
451 : /*
452 : *
453 : * _bt_binsrch_insert() -- Cacheable, incremental leaf page binary search.
454 : *
455 : * Like _bt_binsrch(), but with support for caching the binary search
456 : * bounds. Only used during insertion, and only on the leaf page that it
457 : * looks like caller will insert tuple on. Exclusive-locked and pinned
458 : * leaf page is contained within insertstate.
459 : *
460 : * Caches the bounds fields in insertstate so that a subsequent call can
461 : * reuse the low and strict high bounds of original binary search. Callers
462 : * that use these fields directly must be prepared for the case where low
463 : * and/or stricthigh are not on the same page (one or both exceed maxoff
464 : * for the page). The case where there are no items on the page (high <
465 : * low) makes bounds invalid.
466 : *
467 : * Caller is responsible for invalidating bounds when it modifies the page
468 : * before calling here a second time, and for dealing with posting list
469 : * tuple matches (callers can use insertstate's postingoff field to
470 : * determine which existing heap TID will need to be replaced by a posting
471 : * list split).
472 : */
473 : OffsetNumber
474 11954650 : _bt_binsrch_insert(Relation rel, BTInsertState insertstate)
475 : {
476 11954650 : BTScanInsert key = insertstate->itup_key;
477 : Page page;
478 : BTPageOpaque opaque;
479 : OffsetNumber low,
480 : high,
481 : stricthigh;
482 : int32 result,
483 : cmpval;
484 :
485 11954650 : page = BufferGetPage(insertstate->buf);
486 11954650 : opaque = BTPageGetOpaque(page);
487 :
488 : Assert(P_ISLEAF(opaque));
489 : Assert(!key->nextkey);
490 : Assert(insertstate->postingoff == 0);
491 :
492 11954650 : if (!insertstate->bounds_valid)
493 : {
494 : /* Start new binary search */
495 7158164 : low = P_FIRSTDATAKEY(opaque);
496 7158164 : high = PageGetMaxOffsetNumber(page);
497 : }
498 : else
499 : {
500 : /* Restore result of previous binary search against same page */
501 4796486 : low = insertstate->low;
502 4796486 : high = insertstate->stricthigh;
503 : }
504 :
505 : /* If there are no keys on the page, return the first available slot */
506 11954650 : if (unlikely(high < low))
507 : {
508 : /* Caller can't reuse bounds */
509 21604 : insertstate->low = InvalidOffsetNumber;
510 21604 : insertstate->stricthigh = InvalidOffsetNumber;
511 21604 : insertstate->bounds_valid = false;
512 21604 : return low;
513 : }
514 :
515 : /*
516 : * Binary search to find the first key on the page >= scan key. (nextkey
517 : * is always false when inserting).
518 : *
519 : * The loop invariant is: all slots before 'low' are < scan key, all slots
520 : * at or after 'high' are >= scan key. 'stricthigh' is > scan key, and is
521 : * maintained to save additional search effort for caller.
522 : *
523 : * We can fall out when high == low.
524 : */
525 11933046 : if (!insertstate->bounds_valid)
526 7136560 : high++; /* establish the loop invariant for high */
527 11933046 : stricthigh = high; /* high initially strictly higher */
528 :
529 11933046 : cmpval = 1; /* !nextkey comparison value */
530 :
531 64207546 : while (high > low)
532 : {
533 52274500 : OffsetNumber mid = low + ((high - low) / 2);
534 :
535 : /* We have low <= mid < high, so mid points at a real slot */
536 :
537 52274500 : result = _bt_compare(rel, key, page, mid);
538 :
539 52274500 : if (result >= cmpval)
540 40181404 : low = mid + 1;
541 : else
542 : {
543 12093096 : high = mid;
544 12093096 : if (result != 0)
545 11033402 : stricthigh = high;
546 : }
547 :
548 : /*
549 : * If tuple at offset located by binary search is a posting list whose
550 : * TID range overlaps with caller's scantid, perform posting list
551 : * binary search to set postingoff for caller. Caller must split the
552 : * posting list when postingoff is set. This should happen
553 : * infrequently.
554 : */
555 52274500 : if (unlikely(result == 0 && key->scantid != NULL))
556 : {
557 : /*
558 : * postingoff should never be set more than once per leaf page
559 : * binary search. That would mean that there are duplicate table
560 : * TIDs in the index, which is never okay. Check for that here.
561 : */
562 419892 : if (insertstate->postingoff != 0)
563 0 : ereport(ERROR,
564 : (errcode(ERRCODE_INDEX_CORRUPTED),
565 : errmsg_internal("table tid from new index tuple (%u,%u) cannot find insert offset between offsets %u and %u of block %u in index \"%s\"",
566 : ItemPointerGetBlockNumber(key->scantid),
567 : ItemPointerGetOffsetNumber(key->scantid),
568 : low, stricthigh,
569 : BufferGetBlockNumber(insertstate->buf),
570 : RelationGetRelationName(rel))));
571 :
572 419892 : insertstate->postingoff = _bt_binsrch_posting(key, page, mid);
573 : }
574 : }
575 :
576 : /*
577 : * On a leaf page, a binary search always returns the first key >= scan
578 : * key (at least in !nextkey case), which could be the last slot + 1. This
579 : * is also the lower bound of cached search.
580 : *
581 : * stricthigh may also be the last slot + 1, which prevents caller from
582 : * using bounds directly, but is still useful to us if we're called a
583 : * second time with cached bounds (cached low will be < stricthigh when
584 : * that happens).
585 : */
586 11933046 : insertstate->low = low;
587 11933046 : insertstate->stricthigh = stricthigh;
588 11933046 : insertstate->bounds_valid = true;
589 :
590 11933046 : return low;
591 : }
592 :
593 : /*----------
594 : * _bt_binsrch_posting() -- posting list binary search.
595 : *
596 : * Helper routine for _bt_binsrch_insert().
597 : *
598 : * Returns offset into posting list where caller's scantid belongs.
599 : *----------
600 : */
601 : static int
602 419892 : _bt_binsrch_posting(BTScanInsert key, Page page, OffsetNumber offnum)
603 : {
604 : IndexTuple itup;
605 : ItemId itemid;
606 : int low,
607 : high,
608 : mid,
609 : res;
610 :
611 : /*
612 : * If this isn't a posting tuple, then the index must be corrupt (if it is
613 : * an ordinary non-pivot tuple then there must be an existing tuple with a
614 : * heap TID that equals inserter's new heap TID/scantid). Defensively
615 : * check that tuple is a posting list tuple whose posting list range
616 : * includes caller's scantid.
617 : *
618 : * (This is also needed because contrib/amcheck's rootdescend option needs
619 : * to be able to relocate a non-pivot tuple using _bt_binsrch_insert().)
620 : */
621 419892 : itemid = PageGetItemId(page, offnum);
622 419892 : itup = (IndexTuple) PageGetItem(page, itemid);
623 419892 : if (!BTreeTupleIsPosting(itup))
624 402196 : return 0;
625 :
626 : Assert(key->heapkeyspace && key->allequalimage);
627 :
628 : /*
629 : * In the event that posting list tuple has LP_DEAD bit set, indicate this
630 : * to _bt_binsrch_insert() caller by returning -1, a sentinel value. A
631 : * second call to _bt_binsrch_insert() can take place when its caller has
632 : * removed the dead item.
633 : */
634 17696 : if (ItemIdIsDead(itemid))
635 4 : return -1;
636 :
637 : /* "high" is past end of posting list for loop invariant */
638 17692 : low = 0;
639 17692 : high = BTreeTupleGetNPosting(itup);
640 : Assert(high >= 2);
641 :
642 141310 : while (high > low)
643 : {
644 123618 : mid = low + ((high - low) / 2);
645 123618 : res = ItemPointerCompare(key->scantid,
646 : BTreeTupleGetPostingN(itup, mid));
647 :
648 123618 : if (res > 0)
649 65344 : low = mid + 1;
650 58274 : else if (res < 0)
651 58274 : high = mid;
652 : else
653 0 : return mid;
654 : }
655 :
656 : /* Exact match not found */
657 17692 : return low;
658 : }
659 :
660 : /*----------
661 : * _bt_compare() -- Compare insertion-type scankey to tuple on a page.
662 : *
663 : * page/offnum: location of btree item to be compared to.
664 : *
665 : * This routine returns:
666 : * <0 if scankey < tuple at offnum;
667 : * 0 if scankey == tuple at offnum;
668 : * >0 if scankey > tuple at offnum.
669 : *
670 : * NULLs in the keys are treated as sortable values. Therefore
671 : * "equality" does not necessarily mean that the item should be returned
672 : * to the caller as a matching key. Similarly, an insertion scankey
673 : * with its scantid set is treated as equal to a posting tuple whose TID
674 : * range overlaps with their scantid. There generally won't be a
675 : * matching TID in the posting tuple, which caller must handle
676 : * themselves (e.g., by splitting the posting list tuple).
677 : *
678 : * CRUCIAL NOTE: on a non-leaf page, the first data key is assumed to be
679 : * "minus infinity": this routine will always claim it is less than the
680 : * scankey. The actual key value stored is explicitly truncated to 0
681 : * attributes (explicitly minus infinity) with version 3+ indexes, but
682 : * that isn't relied upon. This allows us to implement the Lehman and
683 : * Yao convention that the first down-link pointer is before the first
684 : * key. See backend/access/nbtree/README for details.
685 : *----------
686 : */
687 : int32
688 236514344 : _bt_compare(Relation rel,
689 : BTScanInsert key,
690 : Page page,
691 : OffsetNumber offnum)
692 : {
693 236514344 : TupleDesc itupdesc = RelationGetDescr(rel);
694 236514344 : BTPageOpaque opaque = BTPageGetOpaque(page);
695 : IndexTuple itup;
696 : ItemPointer heapTid;
697 : ScanKey scankey;
698 : int ncmpkey;
699 : int ntupatts;
700 : int32 result;
701 :
702 : Assert(_bt_check_natts(rel, key->heapkeyspace, page, offnum));
703 : Assert(key->keysz <= IndexRelationGetNumberOfKeyAttributes(rel));
704 : Assert(key->heapkeyspace || key->scantid == NULL);
705 :
706 : /*
707 : * Force result ">" if target item is first data item on an internal page
708 : * --- see NOTE above.
709 : */
710 236514344 : if (!P_ISLEAF(opaque) && offnum == P_FIRSTDATAKEY(opaque))
711 2921830 : return 1;
712 :
713 233592514 : itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, offnum));
714 233592514 : ntupatts = BTreeTupleGetNAtts(itup, rel);
715 :
716 : /*
717 : * The scan key is set up with the attribute number associated with each
718 : * term in the key. It is important that, if the index is multi-key, the
719 : * scan contain the first k key attributes, and that they be in order. If
720 : * you think about how multi-key ordering works, you'll understand why
721 : * this is.
722 : *
723 : * We don't test for violation of this condition here, however. The
724 : * initial setup for the index scan had better have gotten it right (see
725 : * _bt_first).
726 : */
727 :
728 233592514 : ncmpkey = Min(ntupatts, key->keysz);
729 : Assert(key->heapkeyspace || ncmpkey == key->keysz);
730 : Assert(!BTreeTupleIsPosting(itup) || key->allequalimage);
731 233592514 : scankey = key->scankeys;
732 294742848 : for (int i = 1; i <= ncmpkey; i++)
733 : {
734 : Datum datum;
735 : bool isNull;
736 :
737 273635202 : datum = index_getattr(itup, scankey->sk_attno, itupdesc, &isNull);
738 :
739 273635202 : if (scankey->sk_flags & SK_ISNULL) /* key is NULL */
740 : {
741 468886 : if (isNull)
742 157024 : result = 0; /* NULL "=" NULL */
743 311862 : else if (scankey->sk_flags & SK_BT_NULLS_FIRST)
744 264 : result = -1; /* NULL "<" NOT_NULL */
745 : else
746 311598 : result = 1; /* NULL ">" NOT_NULL */
747 : }
748 273166316 : else if (isNull) /* key is NOT_NULL and item is NULL */
749 : {
750 198 : if (scankey->sk_flags & SK_BT_NULLS_FIRST)
751 0 : result = 1; /* NOT_NULL ">" NULL */
752 : else
753 198 : result = -1; /* NOT_NULL "<" NULL */
754 : }
755 : else
756 : {
757 : /*
758 : * The sk_func needs to be passed the index value as left arg and
759 : * the sk_argument as right arg (they might be of different
760 : * types). Since it is convenient for callers to think of
761 : * _bt_compare as comparing the scankey to the index item, we have
762 : * to flip the sign of the comparison result. (Unless it's a DESC
763 : * column, in which case we *don't* flip the sign.)
764 : */
765 273166118 : result = DatumGetInt32(FunctionCall2Coll(&scankey->sk_func,
766 : scankey->sk_collation,
767 : datum,
768 : scankey->sk_argument));
769 :
770 273166118 : if (!(scankey->sk_flags & SK_BT_DESC))
771 273166052 : INVERT_COMPARE_RESULT(result);
772 : }
773 :
774 : /* if the keys are unequal, return the difference */
775 273635202 : if (result != 0)
776 212484868 : return result;
777 :
778 61150334 : scankey++;
779 : }
780 :
781 : /*
782 : * All non-truncated attributes (other than heap TID) were found to be
783 : * equal. Treat truncated attributes as minus infinity when scankey has a
784 : * key attribute value that would otherwise be compared directly.
785 : *
786 : * Note: it doesn't matter if ntupatts includes non-key attributes;
787 : * scankey won't, so explicitly excluding non-key attributes isn't
788 : * necessary.
789 : */
790 21107646 : if (key->keysz > ntupatts)
791 204682 : return 1;
792 :
793 : /*
794 : * Use the heap TID attribute and scantid to try to break the tie. The
795 : * rules are the same as any other key attribute -- only the
796 : * representation differs.
797 : */
798 20902964 : heapTid = BTreeTupleGetHeapTID(itup);
799 20902964 : if (key->scantid == NULL)
800 : {
801 : /*
802 : * Forward scans have a scankey that is considered greater than a
803 : * truncated pivot tuple if and when the scankey has equal values for
804 : * attributes up to and including the least significant untruncated
805 : * attribute in tuple. Even attributes that were omitted from the
806 : * scan key are considered greater than -inf truncated attributes.
807 : * (See _bt_binsrch for an explanation of our backward scan behavior.)
808 : *
809 : * For example, if an index has the minimum two attributes (single
810 : * user key attribute, plus heap TID attribute), and a page's high key
811 : * is ('foo', -inf), and scankey is ('foo', <omitted>), the search
812 : * will not descend to the page to the left. The search will descend
813 : * right instead. The truncated attribute in pivot tuple means that
814 : * all non-pivot tuples on the page to the left are strictly < 'foo',
815 : * so it isn't necessary to descend left. In other words, search
816 : * doesn't have to descend left because it isn't interested in a match
817 : * that has a heap TID value of -inf.
818 : *
819 : * Note: the heap TID part of the test ensures that scankey is being
820 : * compared to a pivot tuple with one or more truncated -inf key
821 : * attributes. The heap TID attribute is the last key attribute in
822 : * every index, of course, but other than that it isn't special.
823 : */
824 16678758 : if (!key->backward && key->keysz == ntupatts && heapTid == NULL &&
825 7824 : key->heapkeyspace)
826 7824 : return 1;
827 :
828 : /* All provided scankey arguments found to be equal */
829 16670934 : return 0;
830 : }
831 :
832 : /*
833 : * Treat truncated heap TID as minus infinity, since scankey has a key
834 : * attribute value (scantid) that would otherwise be compared directly
835 : */
836 : Assert(key->keysz == IndexRelationGetNumberOfKeyAttributes(rel));
837 4224206 : if (heapTid == NULL)
838 3962 : return 1;
839 :
840 : /*
841 : * Scankey must be treated as equal to a posting list tuple if its scantid
842 : * value falls within the range of the posting list. In all other cases
843 : * there can only be a single heap TID value, which is compared directly
844 : * with scantid.
845 : */
846 : Assert(ntupatts >= IndexRelationGetNumberOfKeyAttributes(rel));
847 4220244 : result = ItemPointerCompare(key->scantid, heapTid);
848 4220244 : if (result <= 0 || !BTreeTupleIsPosting(itup))
849 4071040 : return result;
850 : else
851 : {
852 149204 : result = ItemPointerCompare(key->scantid,
853 : BTreeTupleGetMaxHeapTID(itup));
854 149204 : if (result > 0)
855 131508 : return 1;
856 : }
857 :
858 17696 : return 0;
859 : }
860 :
861 : /*
862 : * _bt_first() -- Find the first item in a scan.
863 : *
864 : * We need to be clever about the direction of scan, the search
865 : * conditions, and the tree ordering. We find the first item (or,
866 : * if backwards scan, the last item) in the tree that satisfies the
867 : * qualifications in the scan key. On success exit, data about the
868 : * matching tuple(s) on the page has been loaded into so->currPos. We'll
869 : * drop all locks and hold onto a pin on page's buffer, except when
870 : * _bt_drop_lock_and_maybe_pin dropped the pin to avoid blocking VACUUM.
871 : * _bt_returnitem sets the next item to return to scan on success exit.
872 : *
873 : * If there are no matching items in the index, we return false, with no
874 : * pins or locks held. so->currPos will remain invalid.
875 : *
876 : * Note that scan->keyData[], and the so->keyData[] scankey built from it,
877 : * are both search-type scankeys (see nbtree/README for more about this).
878 : * Within this routine, we build a temporary insertion-type scankey to use
879 : * in locating the scan start position.
880 : */
881 : bool
882 13099090 : _bt_first(IndexScanDesc scan, ScanDirection dir)
883 : {
884 13099090 : Relation rel = scan->indexRelation;
885 13099090 : BTScanOpaque so = (BTScanOpaque) scan->opaque;
886 : BTStack stack;
887 : OffsetNumber offnum;
888 : BTScanInsertData inskey;
889 : ScanKey startKeys[INDEX_MAX_KEYS];
890 : ScanKeyData notnullkeys[INDEX_MAX_KEYS];
891 13099090 : int keysz = 0;
892 : StrategyNumber strat_total;
893 :
894 : Assert(!BTScanPosIsValid(so->currPos));
895 :
896 : /*
897 : * Examine the scan keys and eliminate any redundant keys; also mark the
898 : * keys that must be matched to continue the scan.
899 : */
900 13099090 : _bt_preprocess_keys(scan);
901 :
902 : /*
903 : * Quit now if _bt_preprocess_keys() discovered that the scan keys can
904 : * never be satisfied (eg, x == 1 AND x > 2).
905 : */
906 13099090 : if (!so->qual_ok)
907 : {
908 1020 : _bt_parallel_done(scan);
909 1020 : return false;
910 : }
911 :
912 : /*
913 : * For parallel scans, get the starting page from shared state. If the
914 : * scan has not started, proceed to find out first leaf page in the usual
915 : * way while keeping other participating processes waiting. If the scan
916 : * has already begun, use the page number from the shared structure.
917 : *
918 : * When a parallel scan has another primitive index scan scheduled, a
919 : * parallel worker will seize the scan for that purpose now. This is
920 : * similar to the case where the top-level scan hasn't started.
921 : */
922 13098070 : if (scan->parallel_scan != NULL)
923 : {
924 : BlockNumber blkno,
925 : lastcurrblkno;
926 :
927 434 : if (!_bt_parallel_seize(scan, &blkno, &lastcurrblkno, true))
928 316 : return false;
929 :
930 : /*
931 : * Successfully seized the scan, which _bt_readfirstpage or possibly
932 : * _bt_readnextpage will release (unless the scan ends right away, in
933 : * which case we'll call _bt_parallel_done directly).
934 : *
935 : * Initialize arrays (when _bt_parallel_seize didn't already set up
936 : * the next primitive index scan).
937 : */
938 126 : if (so->numArrayKeys && !so->needPrimScan)
939 6 : _bt_start_array_keys(scan, dir);
940 :
941 : Assert(blkno != P_NONE);
942 126 : if (blkno != InvalidBlockNumber)
943 : {
944 : Assert(!so->needPrimScan);
945 :
946 : /*
947 : * We anticipated starting another primitive scan, but some other
948 : * worker bet us to it
949 : */
950 8 : if (!_bt_readnextpage(scan, blkno, lastcurrblkno, dir, true))
951 0 : return false;
952 :
953 8 : _bt_returnitem(scan, so);
954 8 : return true;
955 : }
956 : }
957 13097636 : else if (so->numArrayKeys && !so->needPrimScan)
958 : {
959 : /*
960 : * First _bt_first call (for current btrescan) without parallelism.
961 : *
962 : * Initialize arrays, and the corresponding scan keys that were just
963 : * output by _bt_preprocess_keys.
964 : */
965 942 : _bt_start_array_keys(scan, dir);
966 : }
967 :
968 : /*
969 : * Count an indexscan for stats, now that we know that we'll call
970 : * _bt_search/_bt_endpoint below
971 : */
972 13097754 : pgstat_count_index_scan(rel);
973 :
974 : /*----------
975 : * Examine the scan keys to discover where we need to start the scan.
976 : *
977 : * We want to identify the keys that can be used as starting boundaries;
978 : * these are =, >, or >= keys for a forward scan or =, <, <= keys for
979 : * a backwards scan. We can use keys for multiple attributes so long as
980 : * the prior attributes had only =, >= (resp. =, <=) keys. Once we accept
981 : * a > or < boundary or find an attribute with no boundary (which can be
982 : * thought of as the same as "> -infinity"), we can't use keys for any
983 : * attributes to its right, because it would break our simplistic notion
984 : * of what initial positioning strategy to use.
985 : *
986 : * When the scan keys include cross-type operators, _bt_preprocess_keys
987 : * may not be able to eliminate redundant keys; in such cases we will
988 : * arbitrarily pick a usable one for each attribute. This is correct
989 : * but possibly not optimal behavior. (For example, with keys like
990 : * "x >= 4 AND x >= 5" we would elect to scan starting at x=4 when
991 : * x=5 would be more efficient.) Since the situation only arises given
992 : * a poorly-worded query plus an incomplete opfamily, live with it.
993 : *
994 : * When both equality and inequality keys appear for a single attribute
995 : * (again, only possible when cross-type operators appear), we *must*
996 : * select one of the equality keys for the starting point, because
997 : * _bt_checkkeys() will stop the scan as soon as an equality qual fails.
998 : * For example, if we have keys like "x >= 4 AND x = 10" and we elect to
999 : * start at x=4, we will fail and stop before reaching x=10. If multiple
1000 : * equality quals survive preprocessing, however, it doesn't matter which
1001 : * one we use --- by definition, they are either redundant or
1002 : * contradictory.
1003 : *
1004 : * Any regular (not SK_SEARCHNULL) key implies a NOT NULL qualifier.
1005 : * If the index stores nulls at the end of the index we'll be starting
1006 : * from, and we have no boundary key for the column (which means the key
1007 : * we deduced NOT NULL from is an inequality key that constrains the other
1008 : * end of the index), then we cons up an explicit SK_SEARCHNOTNULL key to
1009 : * use as a boundary key. If we didn't do this, we might find ourselves
1010 : * traversing a lot of null entries at the start of the scan.
1011 : *
1012 : * In this loop, row-comparison keys are treated the same as keys on their
1013 : * first (leftmost) columns. We'll add on lower-order columns of the row
1014 : * comparison below, if possible.
1015 : *
1016 : * The selected scan keys (at most one per index column) are remembered by
1017 : * storing their addresses into the local startKeys[] array.
1018 : *
1019 : * _bt_checkkeys/_bt_advance_array_keys decide whether and when to start
1020 : * the next primitive index scan (for scans with array keys) based in part
1021 : * on an understanding of how it'll enable us to reposition the scan.
1022 : * They're directly aware of how we'll sometimes cons up an explicit
1023 : * SK_SEARCHNOTNULL key. They'll even end primitive scans by applying a
1024 : * symmetric "deduce NOT NULL" rule of their own. This allows top-level
1025 : * scans to skip large groups of NULLs through repeated deductions about
1026 : * key strictness (for a required inequality key) and whether NULLs in the
1027 : * key's index column are stored last or first (relative to non-NULLs).
1028 : * If you update anything here, _bt_checkkeys/_bt_advance_array_keys might
1029 : * need to be kept in sync.
1030 : *----------
1031 : */
1032 13097754 : strat_total = BTEqualStrategyNumber;
1033 13097754 : if (so->numberOfKeys > 0)
1034 : {
1035 : AttrNumber curattr;
1036 : ScanKey chosen;
1037 : ScanKey impliesNN;
1038 : ScanKey cur;
1039 :
1040 : /*
1041 : * chosen is the so-far-chosen key for the current attribute, if any.
1042 : * We don't cast the decision in stone until we reach keys for the
1043 : * next attribute.
1044 : */
1045 13085882 : cur = so->keyData;
1046 13085882 : curattr = 1;
1047 13085882 : chosen = NULL;
1048 : /* Also remember any scankey that implies a NOT NULL constraint */
1049 13085882 : impliesNN = NULL;
1050 :
1051 : /*
1052 : * Loop iterates from 0 to numberOfKeys inclusive; we use the last
1053 : * pass to handle after-last-key processing. Actual exit from the
1054 : * loop is at one of the "break" statements below.
1055 : */
1056 13085882 : for (int i = 0;; cur++, i++)
1057 : {
1058 33959944 : if (i >= so->numberOfKeys || cur->sk_attno != curattr)
1059 : {
1060 : /*
1061 : * Done looking at keys for curattr. If we didn't find a
1062 : * usable boundary key, see if we can deduce a NOT NULL key.
1063 : */
1064 20937400 : if (chosen == NULL && impliesNN != NULL &&
1065 62446 : ((impliesNN->sk_flags & SK_BT_NULLS_FIRST) ?
1066 : ScanDirectionIsForward(dir) :
1067 : ScanDirectionIsBackward(dir)))
1068 : {
1069 : /* Yes, so build the key in notnullkeys[keysz] */
1070 6 : chosen = ¬nullkeys[keysz];
1071 6 : ScanKeyEntryInitialize(chosen,
1072 : (SK_SEARCHNOTNULL | SK_ISNULL |
1073 6 : (impliesNN->sk_flags &
1074 : (SK_BT_DESC | SK_BT_NULLS_FIRST))),
1075 : curattr,
1076 6 : ((impliesNN->sk_flags & SK_BT_NULLS_FIRST) ?
1077 : BTGreaterStrategyNumber :
1078 : BTLessStrategyNumber),
1079 : InvalidOid,
1080 : InvalidOid,
1081 : InvalidOid,
1082 : (Datum) 0);
1083 : }
1084 :
1085 : /*
1086 : * If we still didn't find a usable boundary key, quit; else
1087 : * save the boundary key pointer in startKeys.
1088 : */
1089 20874954 : if (chosen == NULL)
1090 65186 : break;
1091 20809768 : startKeys[keysz++] = chosen;
1092 :
1093 : /* Quit if we have stored a > or < key */
1094 20809768 : strat_total = chosen->sk_strategy;
1095 20809768 : if (strat_total == BTGreaterStrategyNumber ||
1096 : strat_total == BTLessStrategyNumber)
1097 : break;
1098 :
1099 : /*
1100 : * Done if that was the last attribute, or if next key is not
1101 : * in sequence (implying no boundary key is available for the
1102 : * next attribute).
1103 : */
1104 19463296 : if (i >= so->numberOfKeys ||
1105 7789784 : cur->sk_attno != curattr + 1)
1106 : break;
1107 :
1108 : /*
1109 : * Reset for next attr.
1110 : */
1111 7789072 : curattr = cur->sk_attno;
1112 7789072 : chosen = NULL;
1113 7789072 : impliesNN = NULL;
1114 : }
1115 :
1116 : /*
1117 : * Can we use this key as a starting boundary for this attr?
1118 : *
1119 : * If not, does it imply a NOT NULL constraint? (Because
1120 : * SK_SEARCHNULL keys are always assigned BTEqualStrategyNumber,
1121 : * *any* inequality key works for that; we need not test.)
1122 : */
1123 20874062 : switch (cur->sk_strategy)
1124 : {
1125 107410 : case BTLessStrategyNumber:
1126 : case BTLessEqualStrategyNumber:
1127 107410 : if (chosen == NULL)
1128 : {
1129 105568 : if (ScanDirectionIsBackward(dir))
1130 43140 : chosen = cur;
1131 : else
1132 62428 : impliesNN = cur;
1133 : }
1134 107410 : break;
1135 19458954 : case BTEqualStrategyNumber:
1136 : /* override any non-equality choice */
1137 19458954 : chosen = cur;
1138 19458954 : break;
1139 1307698 : case BTGreaterEqualStrategyNumber:
1140 : case BTGreaterStrategyNumber:
1141 1307698 : if (chosen == NULL)
1142 : {
1143 1307698 : if (ScanDirectionIsForward(dir))
1144 1307668 : chosen = cur;
1145 : else
1146 30 : impliesNN = cur;
1147 : }
1148 1307698 : break;
1149 : }
1150 20874062 : }
1151 : }
1152 :
1153 : /*
1154 : * If we found no usable boundary keys, we have to start from one end of
1155 : * the tree. Walk down that edge to the first or last key, and scan from
1156 : * there.
1157 : *
1158 : * Note: calls _bt_readfirstpage for us, which releases the parallel scan.
1159 : */
1160 13097754 : if (keysz == 0)
1161 76906 : return _bt_endpoint(scan, dir);
1162 :
1163 : /*
1164 : * We want to start the scan somewhere within the index. Set up an
1165 : * insertion scankey we can use to search for the boundary point we
1166 : * identified above. The insertion scankey is built using the keys
1167 : * identified by startKeys[]. (Remaining insertion scankey fields are
1168 : * initialized after initial-positioning scan keys are finalized.)
1169 : */
1170 : Assert(keysz <= INDEX_MAX_KEYS);
1171 33830592 : for (int i = 0; i < keysz; i++)
1172 : {
1173 20809768 : ScanKey cur = startKeys[i];
1174 :
1175 : Assert(cur->sk_attno == i + 1);
1176 :
1177 20809768 : if (cur->sk_flags & SK_ROW_HEADER)
1178 : {
1179 : /*
1180 : * Row comparison header: look to the first row member instead.
1181 : *
1182 : * The member scankeys are already in insertion format (ie, they
1183 : * have sk_func = 3-way-comparison function), but we have to watch
1184 : * out for nulls, which _bt_preprocess_keys didn't check. A null
1185 : * in the first row member makes the condition unmatchable, just
1186 : * like qual_ok = false.
1187 : */
1188 24 : ScanKey subkey = (ScanKey) DatumGetPointer(cur->sk_argument);
1189 :
1190 : Assert(subkey->sk_flags & SK_ROW_MEMBER);
1191 24 : if (subkey->sk_flags & SK_ISNULL)
1192 : {
1193 0 : _bt_parallel_done(scan);
1194 0 : return false;
1195 : }
1196 24 : memcpy(inskey.scankeys + i, subkey, sizeof(ScanKeyData));
1197 :
1198 : /*
1199 : * If the row comparison is the last positioning key we accepted,
1200 : * try to add additional keys from the lower-order row members.
1201 : * (If we accepted independent conditions on additional index
1202 : * columns, we use those instead --- doesn't seem worth trying to
1203 : * determine which is more restrictive.) Note that this is OK
1204 : * even if the row comparison is of ">" or "<" type, because the
1205 : * condition applied to all but the last row member is effectively
1206 : * ">=" or "<=", and so the extra keys don't break the positioning
1207 : * scheme. But, by the same token, if we aren't able to use all
1208 : * the row members, then the part of the row comparison that we
1209 : * did use has to be treated as just a ">=" or "<=" condition, and
1210 : * so we'd better adjust strat_total accordingly.
1211 : */
1212 24 : if (i == keysz - 1)
1213 : {
1214 24 : bool used_all_subkeys = false;
1215 :
1216 : Assert(!(subkey->sk_flags & SK_ROW_END));
1217 : for (;;)
1218 : {
1219 24 : subkey++;
1220 : Assert(subkey->sk_flags & SK_ROW_MEMBER);
1221 24 : if (subkey->sk_attno != keysz + 1)
1222 0 : break; /* out-of-sequence, can't use it */
1223 24 : if (subkey->sk_strategy != cur->sk_strategy)
1224 0 : break; /* wrong direction, can't use it */
1225 24 : if (subkey->sk_flags & SK_ISNULL)
1226 0 : break; /* can't use null keys */
1227 : Assert(keysz < INDEX_MAX_KEYS);
1228 24 : memcpy(inskey.scankeys + keysz, subkey,
1229 : sizeof(ScanKeyData));
1230 24 : keysz++;
1231 24 : if (subkey->sk_flags & SK_ROW_END)
1232 : {
1233 24 : used_all_subkeys = true;
1234 24 : break;
1235 : }
1236 : }
1237 24 : if (!used_all_subkeys)
1238 : {
1239 0 : switch (strat_total)
1240 : {
1241 0 : case BTLessStrategyNumber:
1242 0 : strat_total = BTLessEqualStrategyNumber;
1243 0 : break;
1244 0 : case BTGreaterStrategyNumber:
1245 0 : strat_total = BTGreaterEqualStrategyNumber;
1246 0 : break;
1247 : }
1248 24 : }
1249 24 : break; /* done with outer loop */
1250 : }
1251 : }
1252 : else
1253 : {
1254 : /*
1255 : * Ordinary comparison key. Transform the search-style scan key
1256 : * to an insertion scan key by replacing the sk_func with the
1257 : * appropriate btree comparison function.
1258 : *
1259 : * If scankey operator is not a cross-type comparison, we can use
1260 : * the cached comparison function; otherwise gotta look it up in
1261 : * the catalogs. (That can't lead to infinite recursion, since no
1262 : * indexscan initiated by syscache lookup will use cross-data-type
1263 : * operators.)
1264 : *
1265 : * We support the convention that sk_subtype == InvalidOid means
1266 : * the opclass input type; this is a hack to simplify life for
1267 : * ScanKeyInit().
1268 : */
1269 20809744 : if (cur->sk_subtype == rel->rd_opcintype[i] ||
1270 20187084 : cur->sk_subtype == InvalidOid)
1271 20795904 : {
1272 : FmgrInfo *procinfo;
1273 :
1274 20795904 : procinfo = index_getprocinfo(rel, cur->sk_attno, BTORDER_PROC);
1275 20795904 : ScanKeyEntryInitializeWithInfo(inskey.scankeys + i,
1276 : cur->sk_flags,
1277 20795904 : cur->sk_attno,
1278 : InvalidStrategy,
1279 : cur->sk_subtype,
1280 : cur->sk_collation,
1281 : procinfo,
1282 : cur->sk_argument);
1283 : }
1284 : else
1285 : {
1286 : RegProcedure cmp_proc;
1287 :
1288 13840 : cmp_proc = get_opfamily_proc(rel->rd_opfamily[i],
1289 13840 : rel->rd_opcintype[i],
1290 : cur->sk_subtype,
1291 : BTORDER_PROC);
1292 13840 : if (!RegProcedureIsValid(cmp_proc))
1293 0 : elog(ERROR, "missing support function %d(%u,%u) for attribute %d of index \"%s\"",
1294 : BTORDER_PROC, rel->rd_opcintype[i], cur->sk_subtype,
1295 : cur->sk_attno, RelationGetRelationName(rel));
1296 13840 : ScanKeyEntryInitialize(inskey.scankeys + i,
1297 : cur->sk_flags,
1298 13840 : cur->sk_attno,
1299 : InvalidStrategy,
1300 : cur->sk_subtype,
1301 : cur->sk_collation,
1302 : cmp_proc,
1303 : cur->sk_argument);
1304 : }
1305 : }
1306 : }
1307 :
1308 : /*----------
1309 : * Examine the selected initial-positioning strategy to determine exactly
1310 : * where we need to start the scan, and set flag variables to control the
1311 : * initial descent by _bt_search (and our _bt_binsrch call for the leaf
1312 : * page _bt_search returns).
1313 : *----------
1314 : */
1315 13020848 : _bt_metaversion(rel, &inskey.heapkeyspace, &inskey.allequalimage);
1316 13020848 : inskey.anynullkeys = false; /* unused */
1317 13020848 : inskey.scantid = NULL;
1318 13020848 : inskey.keysz = keysz;
1319 13020848 : switch (strat_total)
1320 : {
1321 43140 : case BTLessStrategyNumber:
1322 :
1323 43140 : inskey.nextkey = false;
1324 43140 : inskey.backward = true;
1325 43140 : break;
1326 :
1327 6 : case BTLessEqualStrategyNumber:
1328 :
1329 6 : inskey.nextkey = true;
1330 6 : inskey.backward = true;
1331 6 : break;
1332 :
1333 11670140 : case BTEqualStrategyNumber:
1334 :
1335 : /*
1336 : * If a backward scan was specified, need to start with last equal
1337 : * item not first one.
1338 : */
1339 11670140 : if (ScanDirectionIsBackward(dir))
1340 : {
1341 : /*
1342 : * This is the same as the <= strategy
1343 : */
1344 148 : inskey.nextkey = true;
1345 148 : inskey.backward = true;
1346 : }
1347 : else
1348 : {
1349 : /*
1350 : * This is the same as the >= strategy
1351 : */
1352 11669992 : inskey.nextkey = false;
1353 11669992 : inskey.backward = false;
1354 : }
1355 11670140 : break;
1356 :
1357 4230 : case BTGreaterEqualStrategyNumber:
1358 :
1359 : /*
1360 : * Find first item >= scankey
1361 : */
1362 4230 : inskey.nextkey = false;
1363 4230 : inskey.backward = false;
1364 4230 : break;
1365 :
1366 1303332 : case BTGreaterStrategyNumber:
1367 :
1368 : /*
1369 : * Find first item > scankey
1370 : */
1371 1303332 : inskey.nextkey = true;
1372 1303332 : inskey.backward = false;
1373 1303332 : break;
1374 :
1375 0 : default:
1376 : /* can't get here, but keep compiler quiet */
1377 0 : elog(ERROR, "unrecognized strat_total: %d", (int) strat_total);
1378 : return false;
1379 : }
1380 :
1381 : /*
1382 : * Use the manufactured insertion scan key to descend the tree and
1383 : * position ourselves on the target leaf page.
1384 : */
1385 : Assert(ScanDirectionIsBackward(dir) == inskey.backward);
1386 13020848 : stack = _bt_search(rel, NULL, &inskey, &so->currPos.buf, BT_READ);
1387 :
1388 : /* don't need to keep the stack around... */
1389 13020848 : _bt_freestack(stack);
1390 :
1391 13020848 : if (!BufferIsValid(so->currPos.buf))
1392 : {
1393 : /*
1394 : * We only get here if the index is completely empty. Lock relation
1395 : * because nothing finer to lock exists. Without a buffer lock, it's
1396 : * possible for another transaction to insert data between
1397 : * _bt_search() and PredicateLockRelation(). We have to try again
1398 : * after taking the relation-level predicate lock, to close a narrow
1399 : * window where we wouldn't scan concurrently inserted tuples, but the
1400 : * writer wouldn't see our predicate lock.
1401 : */
1402 519368 : if (IsolationIsSerializable())
1403 : {
1404 5562 : PredicateLockRelation(rel, scan->xs_snapshot);
1405 5562 : stack = _bt_search(rel, NULL, &inskey, &so->currPos.buf, BT_READ);
1406 5562 : _bt_freestack(stack);
1407 : }
1408 :
1409 519368 : if (!BufferIsValid(so->currPos.buf))
1410 : {
1411 519368 : _bt_parallel_done(scan);
1412 519368 : return false;
1413 : }
1414 : }
1415 :
1416 : /* position to the precise item on the page */
1417 12501480 : offnum = _bt_binsrch(rel, &inskey, so->currPos.buf);
1418 :
1419 : /*
1420 : * Now load data from the first page of the scan (usually the page
1421 : * currently in so->currPos.buf).
1422 : *
1423 : * If inskey.nextkey = false and inskey.backward = false, offnum is
1424 : * positioned at the first non-pivot tuple >= inskey.scankeys.
1425 : *
1426 : * If inskey.nextkey = false and inskey.backward = true, offnum is
1427 : * positioned at the last non-pivot tuple < inskey.scankeys.
1428 : *
1429 : * If inskey.nextkey = true and inskey.backward = false, offnum is
1430 : * positioned at the first non-pivot tuple > inskey.scankeys.
1431 : *
1432 : * If inskey.nextkey = true and inskey.backward = true, offnum is
1433 : * positioned at the last non-pivot tuple <= inskey.scankeys.
1434 : *
1435 : * It's possible that _bt_binsrch returned an offnum that is out of bounds
1436 : * for the page. For example, when inskey is both < the leaf page's high
1437 : * key and > all of its non-pivot tuples, offnum will be "maxoff + 1".
1438 : */
1439 12501480 : if (!_bt_readfirstpage(scan, offnum, dir))
1440 3403542 : return false;
1441 :
1442 9097938 : _bt_returnitem(scan, so);
1443 9097938 : return true;
1444 : }
1445 :
1446 : /*
1447 : * _bt_next() -- Get the next item in a scan.
1448 : *
1449 : * On entry, so->currPos describes the current page, which may be pinned
1450 : * but is not locked, and so->currPos.itemIndex identifies which item was
1451 : * previously returned.
1452 : *
1453 : * On success exit, so->currPos is updated as needed, and _bt_returnitem
1454 : * sets the next item to return to the scan. so->currPos remains valid.
1455 : *
1456 : * On failure exit (no more tuples), we invalidate so->currPos. It'll
1457 : * still be possible for the scan to return tuples by changing direction,
1458 : * though we'll need to call _bt_first anew in that other direction.
1459 : */
1460 : bool
1461 16812760 : _bt_next(IndexScanDesc scan, ScanDirection dir)
1462 : {
1463 16812760 : BTScanOpaque so = (BTScanOpaque) scan->opaque;
1464 :
1465 : Assert(BTScanPosIsValid(so->currPos));
1466 :
1467 : /*
1468 : * Advance to next tuple on current page; or if there's no more, try to
1469 : * step to the next page with data.
1470 : */
1471 16812760 : if (ScanDirectionIsForward(dir))
1472 : {
1473 16780226 : if (++so->currPos.itemIndex > so->currPos.lastItem)
1474 : {
1475 2064520 : if (!_bt_steppage(scan, dir))
1476 2038648 : return false;
1477 : }
1478 : }
1479 : else
1480 : {
1481 32534 : if (--so->currPos.itemIndex < so->currPos.firstItem)
1482 : {
1483 114 : if (!_bt_steppage(scan, dir))
1484 80 : return false;
1485 : }
1486 : }
1487 :
1488 14774032 : _bt_returnitem(scan, so);
1489 14774032 : return true;
1490 : }
1491 :
1492 : /*
1493 : * _bt_readpage() -- Load data from current index page into so->currPos
1494 : *
1495 : * Caller must have pinned and read-locked so->currPos.buf; the buffer's state
1496 : * is not changed here. Also, currPos.moreLeft and moreRight must be valid;
1497 : * they are updated as appropriate. All other fields of so->currPos are
1498 : * initialized from scratch here.
1499 : *
1500 : * We scan the current page starting at offnum and moving in the indicated
1501 : * direction. All items matching the scan keys are loaded into currPos.items.
1502 : * moreLeft or moreRight (as appropriate) is cleared if _bt_checkkeys reports
1503 : * that there can be no more matching tuples in the current scan direction
1504 : * (could just be for the current primitive index scan when scan has arrays).
1505 : *
1506 : * In the case of a parallel scan, caller must have called _bt_parallel_seize
1507 : * prior to calling this function; this function will invoke
1508 : * _bt_parallel_release before returning.
1509 : *
1510 : * Returns true if any matching items found on the page, false if none.
1511 : */
1512 : static bool
1513 12599466 : _bt_readpage(IndexScanDesc scan, ScanDirection dir, OffsetNumber offnum,
1514 : bool firstPage)
1515 : {
1516 12599466 : Relation rel = scan->indexRelation;
1517 12599466 : BTScanOpaque so = (BTScanOpaque) scan->opaque;
1518 : Page page;
1519 : BTPageOpaque opaque;
1520 : OffsetNumber minoff;
1521 : OffsetNumber maxoff;
1522 : BTReadPageState pstate;
1523 : bool arrayKeys;
1524 : int itemIndex,
1525 : indnatts;
1526 :
1527 : /* save the page/buffer block number, along with its sibling links */
1528 12599466 : page = BufferGetPage(so->currPos.buf);
1529 12599466 : opaque = BTPageGetOpaque(page);
1530 12599466 : so->currPos.currPage = BufferGetBlockNumber(so->currPos.buf);
1531 12599466 : so->currPos.prevPage = opaque->btpo_prev;
1532 12599466 : so->currPos.nextPage = opaque->btpo_next;
1533 :
1534 : Assert(!P_IGNORE(opaque));
1535 : Assert(BTScanPosIsPinned(so->currPos));
1536 : Assert(!so->needPrimScan);
1537 :
1538 12599466 : if (scan->parallel_scan)
1539 : {
1540 : /* allow next/prev page to be read by other worker without delay */
1541 1330 : if (ScanDirectionIsForward(dir))
1542 1330 : _bt_parallel_release(scan, so->currPos.nextPage,
1543 : so->currPos.currPage);
1544 : else
1545 0 : _bt_parallel_release(scan, so->currPos.prevPage,
1546 : so->currPos.currPage);
1547 : }
1548 :
1549 : /* initialize remaining currPos fields related to current page */
1550 12599466 : so->currPos.lsn = BufferGetLSNAtomic(so->currPos.buf);
1551 12599466 : so->currPos.dir = dir;
1552 12599466 : so->currPos.nextTupleOffset = 0;
1553 : /* either moreLeft or moreRight should be set now (may be unset later) */
1554 : Assert(ScanDirectionIsForward(dir) ? so->currPos.moreRight :
1555 : so->currPos.moreLeft);
1556 :
1557 12599466 : PredicateLockPage(rel, so->currPos.currPage, scan->xs_snapshot);
1558 :
1559 : /* initialize local variables */
1560 12599466 : indnatts = IndexRelationGetNumberOfAttributes(rel);
1561 12599466 : arrayKeys = so->numArrayKeys != 0;
1562 12599466 : minoff = P_FIRSTDATAKEY(opaque);
1563 12599466 : maxoff = PageGetMaxOffsetNumber(page);
1564 :
1565 : /* initialize page-level state that we'll pass to _bt_checkkeys */
1566 12599466 : pstate.minoff = minoff;
1567 12599466 : pstate.maxoff = maxoff;
1568 12599466 : pstate.finaltup = NULL;
1569 12599466 : pstate.page = page;
1570 12599466 : pstate.offnum = InvalidOffsetNumber;
1571 12599466 : pstate.skip = InvalidOffsetNumber;
1572 12599466 : pstate.continuescan = true; /* default assumption */
1573 12599466 : pstate.prechecked = false;
1574 12599466 : pstate.firstmatch = false;
1575 12599466 : pstate.rechecks = 0;
1576 12599466 : pstate.targetdistance = 0;
1577 :
1578 : /*
1579 : * Prechecking the value of the continuescan flag for the last item on the
1580 : * page (for backwards scan it will be the first item on a page). If we
1581 : * observe it to be true, then it should be true for all other items. This
1582 : * allows us to do significant optimizations in the _bt_checkkeys()
1583 : * function for all the items on the page.
1584 : *
1585 : * With the forward scan, we do this check for the last item on the page
1586 : * instead of the high key. It's relatively likely that the most
1587 : * significant column in the high key will be different from the
1588 : * corresponding value from the last item on the page. So checking with
1589 : * the last item on the page would give a more precise answer.
1590 : *
1591 : * We skip this for the first page read by each (primitive) scan, to avoid
1592 : * slowing down point queries. They typically don't stand to gain much
1593 : * when the optimization can be applied, and are more likely to notice the
1594 : * overhead of the precheck.
1595 : *
1596 : * The optimization is unsafe and must be avoided whenever _bt_checkkeys
1597 : * just set a low-order required array's key to the best available match
1598 : * for a truncated -inf attribute value from the prior page's high key
1599 : * (array element 0 is always the best available match in this scenario).
1600 : * It's quite likely that matches for array element 0 begin on this page,
1601 : * but the start of matches won't necessarily align with page boundaries.
1602 : * When the start of matches is somewhere in the middle of this page, it
1603 : * would be wrong to treat page's final non-pivot tuple as representative.
1604 : * Doing so might lead us to treat some of the page's earlier tuples as
1605 : * being part of a group of tuples thought to satisfy the required keys.
1606 : *
1607 : * Note: Conversely, in the case where the scan's arrays just advanced
1608 : * using the prior page's HIKEY _without_ advancement setting scanBehind,
1609 : * the start of matches must be aligned with page boundaries, which makes
1610 : * it safe to attempt the optimization here now. It's also safe when the
1611 : * prior page's HIKEY simply didn't need to advance any required array. In
1612 : * both cases we can safely assume that the _first_ tuple from this page
1613 : * must be >= the current set of array keys/equality constraints. And so
1614 : * if the final tuple is == those same keys (and also satisfies any
1615 : * required < or <= strategy scan keys) during the precheck, we can safely
1616 : * assume that this must also be true of all earlier tuples from the page.
1617 : */
1618 12599466 : if (!firstPage && !so->scanBehind && minoff < maxoff)
1619 : {
1620 : ItemId iid;
1621 : IndexTuple itup;
1622 :
1623 27634 : iid = PageGetItemId(page, ScanDirectionIsForward(dir) ? maxoff : minoff);
1624 27634 : itup = (IndexTuple) PageGetItem(page, iid);
1625 :
1626 : /* Call with arrayKeys=false to avoid undesirable side-effects */
1627 27634 : _bt_checkkeys(scan, &pstate, false, itup, indnatts);
1628 27634 : pstate.prechecked = pstate.continuescan;
1629 27634 : pstate.continuescan = true; /* reset */
1630 : }
1631 :
1632 12599466 : if (ScanDirectionIsForward(dir))
1633 : {
1634 : /* SK_SEARCHARRAY forward scans must provide high key up front */
1635 12556014 : if (arrayKeys && !P_RIGHTMOST(opaque))
1636 : {
1637 1228 : ItemId iid = PageGetItemId(page, P_HIKEY);
1638 :
1639 1228 : pstate.finaltup = (IndexTuple) PageGetItem(page, iid);
1640 :
1641 1228 : if (unlikely(so->oppositeDirCheck))
1642 : {
1643 : Assert(so->scanBehind);
1644 :
1645 : /*
1646 : * Last _bt_readpage call scheduled a recheck of finaltup for
1647 : * required scan keys up to and including a > or >= scan key.
1648 : *
1649 : * _bt_checkkeys won't consider the scanBehind flag unless the
1650 : * scan is stopped by a scan key required in the current scan
1651 : * direction. We need this recheck so that we'll notice when
1652 : * all tuples on this page are still before the _bt_first-wise
1653 : * start of matches for the current set of array keys.
1654 : */
1655 0 : if (!_bt_oppodir_checkkeys(scan, dir, pstate.finaltup))
1656 : {
1657 : /* Schedule another primitive index scan after all */
1658 0 : so->currPos.moreRight = false;
1659 0 : so->needPrimScan = true;
1660 0 : return false;
1661 : }
1662 :
1663 : /* Deliberately don't unset scanBehind flag just yet */
1664 : }
1665 : }
1666 :
1667 : /* load items[] in ascending order */
1668 12556014 : itemIndex = 0;
1669 :
1670 12556014 : offnum = Max(offnum, minoff);
1671 :
1672 50330330 : while (offnum <= maxoff)
1673 : {
1674 47485160 : ItemId iid = PageGetItemId(page, offnum);
1675 : IndexTuple itup;
1676 : bool passes_quals;
1677 :
1678 : /*
1679 : * If the scan specifies not to return killed tuples, then we
1680 : * treat a killed tuple as not passing the qual
1681 : */
1682 47485160 : if (scan->ignore_killed_tuples && ItemIdIsDead(iid))
1683 : {
1684 4223460 : offnum = OffsetNumberNext(offnum);
1685 4223460 : continue;
1686 : }
1687 :
1688 43261700 : itup = (IndexTuple) PageGetItem(page, iid);
1689 : Assert(!BTreeTupleIsPivot(itup));
1690 :
1691 43261700 : pstate.offnum = offnum;
1692 43261700 : passes_quals = _bt_checkkeys(scan, &pstate, arrayKeys,
1693 : itup, indnatts);
1694 :
1695 : /*
1696 : * Check if we need to skip ahead to a later tuple (only possible
1697 : * when the scan uses array keys)
1698 : */
1699 43261700 : if (arrayKeys && OffsetNumberIsValid(pstate.skip))
1700 : {
1701 : Assert(!passes_quals && pstate.continuescan);
1702 : Assert(offnum < pstate.skip);
1703 :
1704 470 : offnum = pstate.skip;
1705 470 : pstate.skip = InvalidOffsetNumber;
1706 470 : continue;
1707 : }
1708 :
1709 43261230 : if (passes_quals)
1710 : {
1711 : /* tuple passes all scan key conditions */
1712 33127848 : pstate.firstmatch = true;
1713 33127848 : if (!BTreeTupleIsPosting(itup))
1714 : {
1715 : /* Remember it */
1716 32732524 : _bt_saveitem(so, itemIndex, offnum, itup);
1717 32732524 : itemIndex++;
1718 : }
1719 : else
1720 : {
1721 : int tupleOffset;
1722 :
1723 : /*
1724 : * Set up state to return posting list, and remember first
1725 : * TID
1726 : */
1727 : tupleOffset =
1728 395324 : _bt_setuppostingitems(so, itemIndex, offnum,
1729 : BTreeTupleGetPostingN(itup, 0),
1730 : itup);
1731 395324 : itemIndex++;
1732 : /* Remember additional TIDs */
1733 2568014 : for (int i = 1; i < BTreeTupleGetNPosting(itup); i++)
1734 : {
1735 2172690 : _bt_savepostingitem(so, itemIndex, offnum,
1736 : BTreeTupleGetPostingN(itup, i),
1737 : tupleOffset);
1738 2172690 : itemIndex++;
1739 : }
1740 : }
1741 : }
1742 : /* When !continuescan, there can't be any more matches, so stop */
1743 43261230 : if (!pstate.continuescan)
1744 9710844 : break;
1745 :
1746 33550386 : offnum = OffsetNumberNext(offnum);
1747 : }
1748 :
1749 : /*
1750 : * We don't need to visit page to the right when the high key
1751 : * indicates that no more matches will be found there.
1752 : *
1753 : * Checking the high key like this works out more often than you might
1754 : * think. Leaf page splits pick a split point between the two most
1755 : * dissimilar tuples (this is weighed against the need to evenly share
1756 : * free space). Leaf pages with high key attribute values that can
1757 : * only appear on non-pivot tuples on the right sibling page are
1758 : * common.
1759 : */
1760 12556014 : if (pstate.continuescan && !P_RIGHTMOST(opaque))
1761 : {
1762 118612 : ItemId iid = PageGetItemId(page, P_HIKEY);
1763 118612 : IndexTuple itup = (IndexTuple) PageGetItem(page, iid);
1764 : int truncatt;
1765 :
1766 118612 : truncatt = BTreeTupleGetNAtts(itup, rel);
1767 118612 : pstate.prechecked = false; /* precheck didn't cover HIKEY */
1768 118612 : _bt_checkkeys(scan, &pstate, arrayKeys, itup, truncatt);
1769 : }
1770 :
1771 12556014 : if (!pstate.continuescan)
1772 9784952 : so->currPos.moreRight = false;
1773 :
1774 : Assert(itemIndex <= MaxTIDsPerBTreePage);
1775 12556014 : so->currPos.firstItem = 0;
1776 12556014 : so->currPos.lastItem = itemIndex - 1;
1777 12556014 : so->currPos.itemIndex = 0;
1778 : }
1779 : else
1780 : {
1781 : /* SK_SEARCHARRAY backward scans must provide final tuple up front */
1782 43452 : if (arrayKeys && minoff <= maxoff && !P_LEFTMOST(opaque))
1783 : {
1784 24 : ItemId iid = PageGetItemId(page, minoff);
1785 :
1786 24 : pstate.finaltup = (IndexTuple) PageGetItem(page, iid);
1787 : }
1788 :
1789 : /* load items[] in descending order */
1790 43452 : itemIndex = MaxTIDsPerBTreePage;
1791 :
1792 43452 : offnum = Min(offnum, maxoff);
1793 :
1794 7449854 : while (offnum >= minoff)
1795 : {
1796 7406498 : ItemId iid = PageGetItemId(page, offnum);
1797 : IndexTuple itup;
1798 : bool tuple_alive;
1799 : bool passes_quals;
1800 :
1801 : /*
1802 : * If the scan specifies not to return killed tuples, then we
1803 : * treat a killed tuple as not passing the qual. Most of the
1804 : * time, it's a win to not bother examining the tuple's index
1805 : * keys, but just skip to the next tuple (previous, actually,
1806 : * since we're scanning backwards). However, if this is the first
1807 : * tuple on the page, we do check the index keys, to prevent
1808 : * uselessly advancing to the page to the left. This is similar
1809 : * to the high key optimization used by forward scans.
1810 : */
1811 7406498 : if (scan->ignore_killed_tuples && ItemIdIsDead(iid))
1812 : {
1813 325574 : if (offnum > minoff)
1814 : {
1815 324848 : offnum = OffsetNumberPrev(offnum);
1816 324848 : continue;
1817 : }
1818 :
1819 726 : tuple_alive = false;
1820 : }
1821 : else
1822 7080924 : tuple_alive = true;
1823 :
1824 7081650 : itup = (IndexTuple) PageGetItem(page, iid);
1825 : Assert(!BTreeTupleIsPivot(itup));
1826 :
1827 7081650 : pstate.offnum = offnum;
1828 7081650 : passes_quals = _bt_checkkeys(scan, &pstate, arrayKeys,
1829 : itup, indnatts);
1830 :
1831 : /*
1832 : * Check if we need to skip ahead to a later tuple (only possible
1833 : * when the scan uses array keys)
1834 : */
1835 7081650 : if (arrayKeys && OffsetNumberIsValid(pstate.skip))
1836 : {
1837 : Assert(!passes_quals && pstate.continuescan);
1838 : Assert(offnum > pstate.skip);
1839 :
1840 6 : offnum = pstate.skip;
1841 6 : pstate.skip = InvalidOffsetNumber;
1842 6 : continue;
1843 : }
1844 :
1845 7081644 : if (passes_quals && tuple_alive)
1846 : {
1847 : /* tuple passes all scan key conditions */
1848 7080696 : pstate.firstmatch = true;
1849 7080696 : if (!BTreeTupleIsPosting(itup))
1850 : {
1851 : /* Remember it */
1852 7039266 : itemIndex--;
1853 7039266 : _bt_saveitem(so, itemIndex, offnum, itup);
1854 : }
1855 : else
1856 : {
1857 : int tupleOffset;
1858 :
1859 : /*
1860 : * Set up state to return posting list, and remember first
1861 : * TID.
1862 : *
1863 : * Note that we deliberately save/return items from
1864 : * posting lists in ascending heap TID order for backwards
1865 : * scans. This allows _bt_killitems() to make a
1866 : * consistent assumption about the order of items
1867 : * associated with the same posting list tuple.
1868 : */
1869 41430 : itemIndex--;
1870 : tupleOffset =
1871 41430 : _bt_setuppostingitems(so, itemIndex, offnum,
1872 : BTreeTupleGetPostingN(itup, 0),
1873 : itup);
1874 : /* Remember additional TIDs */
1875 156136 : for (int i = 1; i < BTreeTupleGetNPosting(itup); i++)
1876 : {
1877 114706 : itemIndex--;
1878 114706 : _bt_savepostingitem(so, itemIndex, offnum,
1879 : BTreeTupleGetPostingN(itup, i),
1880 : tupleOffset);
1881 : }
1882 : }
1883 : }
1884 : /* When !continuescan, there can't be any more matches, so stop */
1885 7081644 : if (!pstate.continuescan)
1886 96 : break;
1887 :
1888 7081548 : offnum = OffsetNumberPrev(offnum);
1889 : }
1890 :
1891 : /*
1892 : * We don't need to visit page to the left when no more matches will
1893 : * be found there
1894 : */
1895 43452 : if (!pstate.continuescan)
1896 96 : so->currPos.moreLeft = false;
1897 :
1898 : Assert(itemIndex >= 0);
1899 43452 : so->currPos.firstItem = itemIndex;
1900 43452 : so->currPos.lastItem = MaxTIDsPerBTreePage - 1;
1901 43452 : so->currPos.itemIndex = MaxTIDsPerBTreePage - 1;
1902 : }
1903 :
1904 12599466 : return (so->currPos.firstItem <= so->currPos.lastItem);
1905 : }
1906 :
1907 : /* Save an index item into so->currPos.items[itemIndex] */
1908 : static void
1909 39771790 : _bt_saveitem(BTScanOpaque so, int itemIndex,
1910 : OffsetNumber offnum, IndexTuple itup)
1911 : {
1912 39771790 : BTScanPosItem *currItem = &so->currPos.items[itemIndex];
1913 :
1914 : Assert(!BTreeTupleIsPivot(itup) && !BTreeTupleIsPosting(itup));
1915 :
1916 39771790 : currItem->heapTid = itup->t_tid;
1917 39771790 : currItem->indexOffset = offnum;
1918 39771790 : if (so->currTuples)
1919 : {
1920 19059504 : Size itupsz = IndexTupleSize(itup);
1921 :
1922 19059504 : currItem->tupleOffset = so->currPos.nextTupleOffset;
1923 19059504 : memcpy(so->currTuples + so->currPos.nextTupleOffset, itup, itupsz);
1924 19059504 : so->currPos.nextTupleOffset += MAXALIGN(itupsz);
1925 : }
1926 39771790 : }
1927 :
1928 : /*
1929 : * Setup state to save TIDs/items from a single posting list tuple.
1930 : *
1931 : * Saves an index item into so->currPos.items[itemIndex] for TID that is
1932 : * returned to scan first. Second or subsequent TIDs for posting list should
1933 : * be saved by calling _bt_savepostingitem().
1934 : *
1935 : * Returns an offset into tuple storage space that main tuple is stored at if
1936 : * needed.
1937 : */
1938 : static int
1939 436754 : _bt_setuppostingitems(BTScanOpaque so, int itemIndex, OffsetNumber offnum,
1940 : ItemPointer heapTid, IndexTuple itup)
1941 : {
1942 436754 : BTScanPosItem *currItem = &so->currPos.items[itemIndex];
1943 :
1944 : Assert(BTreeTupleIsPosting(itup));
1945 :
1946 436754 : currItem->heapTid = *heapTid;
1947 436754 : currItem->indexOffset = offnum;
1948 436754 : if (so->currTuples)
1949 : {
1950 : /* Save base IndexTuple (truncate posting list) */
1951 : IndexTuple base;
1952 135060 : Size itupsz = BTreeTupleGetPostingOffset(itup);
1953 :
1954 135060 : itupsz = MAXALIGN(itupsz);
1955 135060 : currItem->tupleOffset = so->currPos.nextTupleOffset;
1956 135060 : base = (IndexTuple) (so->currTuples + so->currPos.nextTupleOffset);
1957 135060 : memcpy(base, itup, itupsz);
1958 : /* Defensively reduce work area index tuple header size */
1959 135060 : base->t_info &= ~INDEX_SIZE_MASK;
1960 135060 : base->t_info |= itupsz;
1961 135060 : so->currPos.nextTupleOffset += itupsz;
1962 :
1963 135060 : return currItem->tupleOffset;
1964 : }
1965 :
1966 301694 : return 0;
1967 : }
1968 :
1969 : /*
1970 : * Save an index item into so->currPos.items[itemIndex] for current posting
1971 : * tuple.
1972 : *
1973 : * Assumes that _bt_setuppostingitems() has already been called for current
1974 : * posting list tuple. Caller passes its return value as tupleOffset.
1975 : */
1976 : static inline void
1977 2287396 : _bt_savepostingitem(BTScanOpaque so, int itemIndex, OffsetNumber offnum,
1978 : ItemPointer heapTid, int tupleOffset)
1979 : {
1980 2287396 : BTScanPosItem *currItem = &so->currPos.items[itemIndex];
1981 :
1982 2287396 : currItem->heapTid = *heapTid;
1983 2287396 : currItem->indexOffset = offnum;
1984 :
1985 : /*
1986 : * Have index-only scans return the same base IndexTuple for every TID
1987 : * that originates from the same posting list
1988 : */
1989 2287396 : if (so->currTuples)
1990 849904 : currItem->tupleOffset = tupleOffset;
1991 2287396 : }
1992 :
1993 : /*
1994 : * Return the index item from so->currPos.items[so->currPos.itemIndex] to the
1995 : * index scan by setting the relevant fields in caller's index scan descriptor
1996 : */
1997 : static inline void
1998 23940708 : _bt_returnitem(IndexScanDesc scan, BTScanOpaque so)
1999 : {
2000 23940708 : BTScanPosItem *currItem = &so->currPos.items[so->currPos.itemIndex];
2001 :
2002 : /* Most recent _bt_readpage must have succeeded */
2003 : Assert(BTScanPosIsValid(so->currPos));
2004 : Assert(so->currPos.itemIndex >= so->currPos.firstItem);
2005 : Assert(so->currPos.itemIndex <= so->currPos.lastItem);
2006 :
2007 : /* Return next item, per amgettuple contract */
2008 23940708 : scan->xs_heaptid = currItem->heapTid;
2009 23940708 : if (so->currTuples)
2010 4089380 : scan->xs_itup = (IndexTuple) (so->currTuples + currItem->tupleOffset);
2011 23940708 : }
2012 :
2013 : /*
2014 : * _bt_steppage() -- Step to next page containing valid data for scan
2015 : *
2016 : * Wrapper on _bt_readnextpage that performs final steps for the current page.
2017 : *
2018 : * On entry, if so->currPos.buf is valid the buffer is pinned but not locked.
2019 : * If there's no pin held, it's because _bt_drop_lock_and_maybe_pin dropped
2020 : * the pin eagerly earlier on. The scan must have so->currPos.currPage set to
2021 : * a valid block, in any case.
2022 : */
2023 : static bool
2024 5469944 : _bt_steppage(IndexScanDesc scan, ScanDirection dir)
2025 : {
2026 5469944 : BTScanOpaque so = (BTScanOpaque) scan->opaque;
2027 : BlockNumber blkno,
2028 : lastcurrblkno;
2029 :
2030 : Assert(BTScanPosIsValid(so->currPos));
2031 :
2032 : /* Before leaving current page, deal with any killed items */
2033 5469944 : if (so->numKilled > 0)
2034 77120 : _bt_killitems(scan);
2035 :
2036 : /*
2037 : * Before we modify currPos, make a copy of the page data if there was a
2038 : * mark position that needs it.
2039 : */
2040 5469944 : if (so->markItemIndex >= 0)
2041 : {
2042 : /* bump pin on current buffer for assignment to mark buffer */
2043 362 : if (BTScanPosIsPinned(so->currPos))
2044 348 : IncrBufferRefCount(so->currPos.buf);
2045 362 : memcpy(&so->markPos, &so->currPos,
2046 : offsetof(BTScanPosData, items[1]) +
2047 362 : so->currPos.lastItem * sizeof(BTScanPosItem));
2048 362 : if (so->markTuples)
2049 348 : memcpy(so->markTuples, so->currTuples,
2050 348 : so->currPos.nextTupleOffset);
2051 362 : so->markPos.itemIndex = so->markItemIndex;
2052 362 : so->markItemIndex = -1;
2053 :
2054 : /*
2055 : * If we're just about to start the next primitive index scan
2056 : * (possible with a scan that has arrays keys, and needs to skip to
2057 : * continue in the current scan direction), moreLeft/moreRight only
2058 : * indicate the end of the current primitive index scan. They must
2059 : * never be taken to indicate that the top-level index scan has ended
2060 : * (that would be wrong).
2061 : *
2062 : * We could handle this case by treating the current array keys as
2063 : * markPos state. But depending on the current array state like this
2064 : * would add complexity. Instead, we just unset markPos's copy of
2065 : * moreRight or moreLeft (whichever might be affected), while making
2066 : * btrestpos reset the scan's arrays to their initial scan positions.
2067 : * In effect, btrestpos leaves advancing the arrays up to the first
2068 : * _bt_readpage call (that takes place after it has restored markPos).
2069 : */
2070 362 : if (so->needPrimScan)
2071 : {
2072 0 : if (ScanDirectionIsForward(so->currPos.dir))
2073 0 : so->markPos.moreRight = true;
2074 : else
2075 0 : so->markPos.moreLeft = true;
2076 : }
2077 :
2078 : /* mark/restore not supported by parallel scans */
2079 : Assert(!scan->parallel_scan);
2080 : }
2081 :
2082 5469944 : BTScanPosUnpinIfPinned(so->currPos);
2083 :
2084 : /* Walk to the next page with data */
2085 5469944 : if (ScanDirectionIsForward(dir))
2086 5469764 : blkno = so->currPos.nextPage;
2087 : else
2088 180 : blkno = so->currPos.prevPage;
2089 5469944 : lastcurrblkno = so->currPos.currPage;
2090 :
2091 : /*
2092 : * Cancel primitive index scans that were scheduled when the call to
2093 : * _bt_readpage for currPos happened to use the opposite direction to the
2094 : * one that we're stepping in now. (It's okay to leave the scan's array
2095 : * keys as-is, since the next _bt_readpage will advance them.)
2096 : */
2097 5469944 : if (so->currPos.dir != dir)
2098 36 : so->needPrimScan = false;
2099 :
2100 5469944 : return _bt_readnextpage(scan, blkno, lastcurrblkno, dir, false);
2101 : }
2102 :
2103 : /*
2104 : * _bt_readfirstpage() -- Read first page containing valid data for _bt_first
2105 : *
2106 : * _bt_first caller passes us an offnum returned by _bt_binsrch, which might
2107 : * be an out of bounds offnum such as "maxoff + 1" in certain corner cases.
2108 : * _bt_checkkeys will stop the scan as soon as an equality qual fails (when
2109 : * its scan key was marked required), so _bt_first _must_ pass us an offnum
2110 : * exactly at the beginning of where equal tuples are to be found. When we're
2111 : * passed an offnum past the end of the page, we might still manage to stop
2112 : * the scan on this page by calling _bt_checkkeys against the high key. See
2113 : * _bt_readpage for full details.
2114 : *
2115 : * On entry, so->currPos must be pinned and locked (so offnum stays valid).
2116 : * Parallel scan callers must have seized the scan before calling here.
2117 : *
2118 : * On exit, we'll have updated so->currPos and retained locks and pins
2119 : * according to the same rules as those laid out for _bt_readnextpage exit.
2120 : * Like _bt_readnextpage, our return value indicates if there are any matching
2121 : * records in the given direction.
2122 : *
2123 : * We always release the scan for a parallel scan caller, regardless of
2124 : * success or failure; we'll call _bt_parallel_release as soon as possible.
2125 : */
2126 : static bool
2127 12571820 : _bt_readfirstpage(IndexScanDesc scan, OffsetNumber offnum, ScanDirection dir)
2128 : {
2129 12571820 : BTScanOpaque so = (BTScanOpaque) scan->opaque;
2130 :
2131 12571820 : so->numKilled = 0; /* just paranoia */
2132 12571820 : so->markItemIndex = -1; /* ditto */
2133 :
2134 : /* Initialize so->currPos for the first page (page in so->currPos.buf) */
2135 12571820 : if (so->needPrimScan)
2136 : {
2137 : Assert(so->numArrayKeys);
2138 :
2139 600 : so->currPos.moreLeft = true;
2140 600 : so->currPos.moreRight = true;
2141 600 : so->needPrimScan = false;
2142 : }
2143 12571220 : else if (ScanDirectionIsForward(dir))
2144 : {
2145 12527880 : so->currPos.moreLeft = false;
2146 12527880 : so->currPos.moreRight = true;
2147 : }
2148 : else
2149 : {
2150 43340 : so->currPos.moreLeft = true;
2151 43340 : so->currPos.moreRight = false;
2152 : }
2153 :
2154 : /*
2155 : * Attempt to load matching tuples from the first page.
2156 : *
2157 : * Note that _bt_readpage will finish initializing the so->currPos fields.
2158 : * _bt_readpage also releases parallel scan (even when it returns false).
2159 : */
2160 12571820 : if (_bt_readpage(scan, dir, offnum, true))
2161 : {
2162 : /*
2163 : * _bt_readpage succeeded. Drop the lock (and maybe the pin) on
2164 : * so->currPos.buf in preparation for btgettuple returning tuples.
2165 : */
2166 : Assert(BTScanPosIsPinned(so->currPos));
2167 9166510 : _bt_drop_lock_and_maybe_pin(scan, &so->currPos);
2168 9166510 : return true;
2169 : }
2170 :
2171 : /* There's no actually-matching data on the page in so->currPos.buf */
2172 3405310 : _bt_unlockbuf(scan->indexRelation, so->currPos.buf);
2173 :
2174 : /* Call _bt_readnextpage using its _bt_steppage wrapper function */
2175 3405310 : if (!_bt_steppage(scan, dir))
2176 3405152 : return false;
2177 :
2178 : /* _bt_readpage for a later page (now in so->currPos) succeeded */
2179 158 : return true;
2180 : }
2181 :
2182 : /*
2183 : * _bt_readnextpage() -- Read next page containing valid data for _bt_next
2184 : *
2185 : * Caller's blkno is the next interesting page's link, taken from either the
2186 : * previously-saved right link or left link. lastcurrblkno is the page that
2187 : * was current at the point where the blkno link was saved, which we use to
2188 : * reason about concurrent page splits/page deletions during backwards scans.
2189 : *
2190 : * On entry, caller shouldn't hold any locks or pins on any page (we work
2191 : * directly off of blkno and lastcurrblkno instead). Parallel scan callers
2192 : * that seized the scan before calling here should pass seized=true; such a
2193 : * caller's blkno and lastcurrblkno arguments come from the seized scan.
2194 : * seized=false callers just pass us the blkno/lastcurrblkno taken from their
2195 : * so->currPos, which (along with so->currPos itself) can be used to end the
2196 : * scan. A seized=false caller's blkno can never be assumed to be the page
2197 : * that must be read next during a parallel scan, though. We must figure that
2198 : * part out for ourselves by seizing the scan (the correct page to read might
2199 : * already be beyond the seized=false caller's blkno during a parallel scan).
2200 : *
2201 : * On success exit, so->currPos is updated to contain data from the next
2202 : * interesting page, and we return true. We hold a pin on the buffer on
2203 : * success exit, except when _bt_drop_lock_and_maybe_pin decided it was safe
2204 : * to eagerly drop the pin (to avoid blocking VACUUM).
2205 : *
2206 : * If there are no more matching records in the given direction, we drop all
2207 : * locks and pins, invalidate so->currPos, and return false.
2208 : *
2209 : * We always release the scan for a parallel scan caller, regardless of
2210 : * success or failure; we'll call _bt_parallel_release as soon as possible.
2211 : */
2212 : static bool
2213 5469952 : _bt_readnextpage(IndexScanDesc scan, BlockNumber blkno,
2214 : BlockNumber lastcurrblkno, ScanDirection dir, bool seized)
2215 : {
2216 5469952 : Relation rel = scan->indexRelation;
2217 5469952 : BTScanOpaque so = (BTScanOpaque) scan->opaque;
2218 :
2219 : Assert(so->currPos.currPage == lastcurrblkno || seized);
2220 : Assert(!BTScanPosIsPinned(so->currPos));
2221 :
2222 : /*
2223 : * Remember that the scan already read lastcurrblkno, a page to the left
2224 : * of blkno (or remember reading a page to the right, for backwards scans)
2225 : */
2226 5469952 : if (ScanDirectionIsForward(dir))
2227 5469772 : so->currPos.moreLeft = true;
2228 : else
2229 180 : so->currPos.moreRight = true;
2230 :
2231 : for (;;)
2232 1574 : {
2233 : Page page;
2234 : BTPageOpaque opaque;
2235 :
2236 5471526 : if (blkno == P_NONE ||
2237 : (ScanDirectionIsForward(dir) ?
2238 1807522 : !so->currPos.moreRight : !so->currPos.moreLeft))
2239 : {
2240 : /* most recent _bt_readpage call (for lastcurrblkno) ended scan */
2241 : Assert(so->currPos.currPage == lastcurrblkno && !seized);
2242 5443872 : BTScanPosInvalidate(so->currPos);
2243 5443872 : _bt_parallel_done(scan); /* iff !so->needPrimScan */
2244 5443872 : return false;
2245 : }
2246 :
2247 : Assert(!so->needPrimScan);
2248 :
2249 : /* parallel scan must never actually visit so->currPos blkno */
2250 27654 : if (!seized && scan->parallel_scan != NULL &&
2251 1212 : !_bt_parallel_seize(scan, &blkno, &lastcurrblkno, false))
2252 : {
2253 : /* whole scan is now done (or another primitive scan required) */
2254 8 : BTScanPosInvalidate(so->currPos);
2255 8 : return false;
2256 : }
2257 :
2258 27646 : if (ScanDirectionIsForward(dir))
2259 : {
2260 : /* read blkno, but check for interrupts first */
2261 27540 : CHECK_FOR_INTERRUPTS();
2262 27540 : so->currPos.buf = _bt_getbuf(rel, blkno, BT_READ);
2263 : }
2264 : else
2265 : {
2266 : /* read blkno, avoiding race (also checks for interrupts) */
2267 106 : so->currPos.buf = _bt_lock_and_validate_left(rel, &blkno,
2268 : lastcurrblkno);
2269 106 : if (so->currPos.buf == InvalidBuffer)
2270 : {
2271 : /* must have been a concurrent deletion of leftmost page */
2272 0 : BTScanPosInvalidate(so->currPos);
2273 0 : _bt_parallel_done(scan);
2274 0 : return false;
2275 : }
2276 : }
2277 :
2278 27646 : page = BufferGetPage(so->currPos.buf);
2279 27646 : opaque = BTPageGetOpaque(page);
2280 27646 : lastcurrblkno = blkno;
2281 27646 : if (likely(!P_IGNORE(opaque)))
2282 : {
2283 : /* see if there are any matches on this page */
2284 27646 : if (ScanDirectionIsForward(dir))
2285 : {
2286 : /* note that this will clear moreRight if we can stop */
2287 27540 : if (_bt_readpage(scan, dir, P_FIRSTDATAKEY(opaque), false))
2288 25978 : break;
2289 1562 : blkno = so->currPos.nextPage;
2290 : }
2291 : else
2292 : {
2293 : /* note that this will clear moreLeft if we can stop */
2294 106 : if (_bt_readpage(scan, dir, PageGetMaxOffsetNumber(page), false))
2295 94 : break;
2296 12 : blkno = so->currPos.prevPage;
2297 : }
2298 : }
2299 : else
2300 : {
2301 : /* _bt_readpage not called, so do all this for ourselves */
2302 0 : if (ScanDirectionIsForward(dir))
2303 0 : blkno = opaque->btpo_next;
2304 : else
2305 0 : blkno = opaque->btpo_prev;
2306 0 : if (scan->parallel_scan != NULL)
2307 0 : _bt_parallel_release(scan, blkno, lastcurrblkno);
2308 : }
2309 :
2310 : /* no matching tuples on this page */
2311 1574 : _bt_relbuf(rel, so->currPos.buf);
2312 1574 : seized = false; /* released by _bt_readpage (or by us) */
2313 : }
2314 :
2315 : /*
2316 : * _bt_readpage succeeded. Drop the lock (and maybe the pin) on
2317 : * so->currPos.buf in preparation for btgettuple returning tuples.
2318 : */
2319 : Assert(so->currPos.currPage == blkno);
2320 : Assert(BTScanPosIsPinned(so->currPos));
2321 26072 : _bt_drop_lock_and_maybe_pin(scan, &so->currPos);
2322 :
2323 26072 : return true;
2324 : }
2325 :
2326 : /*
2327 : * _bt_lock_and_validate_left() -- lock caller's left sibling blkno,
2328 : * recovering from concurrent page splits/page deletions when necessary
2329 : *
2330 : * Called during backwards scans, to deal with their unique concurrency rules.
2331 : *
2332 : * blkno points to the block number of the page that we expect to move the
2333 : * scan to. We'll successfully move the scan there when we find that its
2334 : * right sibling link still points to lastcurrblkno (the page we just read).
2335 : * Otherwise, we have to figure out which page is the correct one for the scan
2336 : * to now read the hard way, reasoning about concurrent splits and deletions.
2337 : * See nbtree/README.
2338 : *
2339 : * On return, we have both a pin and a read lock on the returned page, whose
2340 : * block number will be set in *blkno. Returns InvalidBuffer if there is no
2341 : * page to the left (no lock or pin is held in that case).
2342 : *
2343 : * It is possible for the returned leaf page to be half-dead; caller must
2344 : * check that condition and step left again when required.
2345 : */
2346 : static Buffer
2347 106 : _bt_lock_and_validate_left(Relation rel, BlockNumber *blkno,
2348 : BlockNumber lastcurrblkno)
2349 : {
2350 106 : BlockNumber origblkno = *blkno; /* detects circular links */
2351 :
2352 : for (;;)
2353 0 : {
2354 : Buffer buf;
2355 : Page page;
2356 : BTPageOpaque opaque;
2357 : int tries;
2358 :
2359 : /* check for interrupts while we're not holding any buffer lock */
2360 106 : CHECK_FOR_INTERRUPTS();
2361 106 : buf = _bt_getbuf(rel, *blkno, BT_READ);
2362 106 : page = BufferGetPage(buf);
2363 106 : opaque = BTPageGetOpaque(page);
2364 :
2365 : /*
2366 : * If this isn't the page we want, walk right till we find what we
2367 : * want --- but go no more than four hops (an arbitrary limit). If we
2368 : * don't find the correct page by then, the most likely bet is that
2369 : * lastcurrblkno got deleted and isn't in the sibling chain at all
2370 : * anymore, not that its left sibling got split more than four times.
2371 : *
2372 : * Note that it is correct to test P_ISDELETED not P_IGNORE here,
2373 : * because half-dead pages are still in the sibling chain.
2374 : */
2375 106 : tries = 0;
2376 : for (;;)
2377 : {
2378 106 : if (likely(!P_ISDELETED(opaque) &&
2379 : opaque->btpo_next == lastcurrblkno))
2380 : {
2381 : /* Found desired page, return it */
2382 106 : return buf;
2383 : }
2384 0 : if (P_RIGHTMOST(opaque) || ++tries > 4)
2385 : break;
2386 : /* step right */
2387 0 : *blkno = opaque->btpo_next;
2388 0 : buf = _bt_relandgetbuf(rel, buf, *blkno, BT_READ);
2389 0 : page = BufferGetPage(buf);
2390 0 : opaque = BTPageGetOpaque(page);
2391 : }
2392 :
2393 : /*
2394 : * Return to the original page (usually the page most recently read by
2395 : * _bt_readpage, which is passed by caller as lastcurrblkno) to see
2396 : * what's up with its prev sibling link
2397 : */
2398 0 : buf = _bt_relandgetbuf(rel, buf, lastcurrblkno, BT_READ);
2399 0 : page = BufferGetPage(buf);
2400 0 : opaque = BTPageGetOpaque(page);
2401 0 : if (P_ISDELETED(opaque))
2402 : {
2403 : /*
2404 : * It was deleted. Move right to first nondeleted page (there
2405 : * must be one); that is the page that has acquired the deleted
2406 : * one's keyspace, so stepping left from it will take us where we
2407 : * want to be.
2408 : */
2409 : for (;;)
2410 : {
2411 0 : if (P_RIGHTMOST(opaque))
2412 0 : elog(ERROR, "fell off the end of index \"%s\"",
2413 : RelationGetRelationName(rel));
2414 0 : lastcurrblkno = opaque->btpo_next;
2415 0 : buf = _bt_relandgetbuf(rel, buf, lastcurrblkno, BT_READ);
2416 0 : page = BufferGetPage(buf);
2417 0 : opaque = BTPageGetOpaque(page);
2418 0 : if (!P_ISDELETED(opaque))
2419 0 : break;
2420 : }
2421 : }
2422 : else
2423 : {
2424 : /*
2425 : * Original lastcurrblkno wasn't deleted; the explanation had
2426 : * better be that the page to the left got split or deleted.
2427 : * Without this check, we risk going into an infinite loop.
2428 : */
2429 0 : if (opaque->btpo_prev == origblkno)
2430 0 : elog(ERROR, "could not find left sibling of block %u in index \"%s\"",
2431 : lastcurrblkno, RelationGetRelationName(rel));
2432 : /* Okay to try again, since left sibling link changed */
2433 : }
2434 :
2435 : /*
2436 : * Original lastcurrblkno from caller was concurrently deleted (could
2437 : * also have been a great many concurrent left sibling page splits).
2438 : * Found a non-deleted page that should now act as our lastcurrblkno.
2439 : */
2440 0 : if (P_LEFTMOST(opaque))
2441 : {
2442 : /* New lastcurrblkno has no left sibling (concurrently deleted) */
2443 0 : _bt_relbuf(rel, buf);
2444 0 : break;
2445 : }
2446 :
2447 : /* Start from scratch with new lastcurrblkno's blkno/prev link */
2448 0 : *blkno = origblkno = opaque->btpo_prev;
2449 0 : _bt_relbuf(rel, buf);
2450 : }
2451 :
2452 0 : return InvalidBuffer;
2453 : }
2454 :
2455 : /*
2456 : * _bt_get_endpoint() -- Find the first or last page on a given tree level
2457 : *
2458 : * If the index is empty, we will return InvalidBuffer; any other failure
2459 : * condition causes ereport(). We will not return a dead page.
2460 : *
2461 : * The returned buffer is pinned and read-locked.
2462 : */
2463 : Buffer
2464 76930 : _bt_get_endpoint(Relation rel, uint32 level, bool rightmost)
2465 : {
2466 : Buffer buf;
2467 : Page page;
2468 : BTPageOpaque opaque;
2469 : OffsetNumber offnum;
2470 : BlockNumber blkno;
2471 : IndexTuple itup;
2472 :
2473 : /*
2474 : * If we are looking for a leaf page, okay to descend from fast root;
2475 : * otherwise better descend from true root. (There is no point in being
2476 : * smarter about intermediate levels.)
2477 : */
2478 76930 : if (level == 0)
2479 76906 : buf = _bt_getroot(rel, NULL, BT_READ);
2480 : else
2481 24 : buf = _bt_gettrueroot(rel);
2482 :
2483 76930 : if (!BufferIsValid(buf))
2484 6566 : return InvalidBuffer;
2485 :
2486 70364 : page = BufferGetPage(buf);
2487 70364 : opaque = BTPageGetOpaque(page);
2488 :
2489 : for (;;)
2490 : {
2491 : /*
2492 : * If we landed on a deleted page, step right to find a live page
2493 : * (there must be one). Also, if we want the rightmost page, step
2494 : * right if needed to get to it (this could happen if the page split
2495 : * since we obtained a pointer to it).
2496 : */
2497 88910 : while (P_IGNORE(opaque) ||
2498 66 : (rightmost && !P_RIGHTMOST(opaque)))
2499 : {
2500 0 : blkno = opaque->btpo_next;
2501 0 : if (blkno == P_NONE)
2502 0 : elog(ERROR, "fell off the end of index \"%s\"",
2503 : RelationGetRelationName(rel));
2504 0 : buf = _bt_relandgetbuf(rel, buf, blkno, BT_READ);
2505 0 : page = BufferGetPage(buf);
2506 0 : opaque = BTPageGetOpaque(page);
2507 : }
2508 :
2509 : /* Done? */
2510 88910 : if (opaque->btpo_level == level)
2511 70364 : break;
2512 18546 : if (opaque->btpo_level < level)
2513 0 : ereport(ERROR,
2514 : (errcode(ERRCODE_INDEX_CORRUPTED),
2515 : errmsg_internal("btree level %u not found in index \"%s\"",
2516 : level, RelationGetRelationName(rel))));
2517 :
2518 : /* Descend to leftmost or rightmost child page */
2519 18546 : if (rightmost)
2520 6 : offnum = PageGetMaxOffsetNumber(page);
2521 : else
2522 18540 : offnum = P_FIRSTDATAKEY(opaque);
2523 :
2524 18546 : itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, offnum));
2525 18546 : blkno = BTreeTupleGetDownLink(itup);
2526 :
2527 18546 : buf = _bt_relandgetbuf(rel, buf, blkno, BT_READ);
2528 18546 : page = BufferGetPage(buf);
2529 18546 : opaque = BTPageGetOpaque(page);
2530 : }
2531 :
2532 70364 : return buf;
2533 : }
2534 :
2535 : /*
2536 : * _bt_endpoint() -- Find the first or last page in the index, and scan
2537 : * from there to the first key satisfying all the quals.
2538 : *
2539 : * This is used by _bt_first() to set up a scan when we've determined
2540 : * that the scan must start at the beginning or end of the index (for
2541 : * a forward or backward scan respectively).
2542 : *
2543 : * Parallel scan callers must have seized the scan before calling here.
2544 : * Exit conditions are the same as for _bt_first().
2545 : */
2546 : static bool
2547 76906 : _bt_endpoint(IndexScanDesc scan, ScanDirection dir)
2548 : {
2549 76906 : Relation rel = scan->indexRelation;
2550 76906 : BTScanOpaque so = (BTScanOpaque) scan->opaque;
2551 : Page page;
2552 : BTPageOpaque opaque;
2553 : OffsetNumber start;
2554 :
2555 : Assert(!BTScanPosIsValid(so->currPos));
2556 :
2557 : /*
2558 : * Scan down to the leftmost or rightmost leaf page. This is a simplified
2559 : * version of _bt_search().
2560 : */
2561 76906 : so->currPos.buf = _bt_get_endpoint(rel, 0, ScanDirectionIsBackward(dir));
2562 :
2563 76906 : if (!BufferIsValid(so->currPos.buf))
2564 : {
2565 : /*
2566 : * Empty index. Lock the whole relation, as nothing finer to lock
2567 : * exists.
2568 : */
2569 6566 : PredicateLockRelation(rel, scan->xs_snapshot);
2570 6566 : _bt_parallel_done(scan);
2571 6566 : return false;
2572 : }
2573 :
2574 70340 : page = BufferGetPage(so->currPos.buf);
2575 70340 : opaque = BTPageGetOpaque(page);
2576 : Assert(P_ISLEAF(opaque));
2577 :
2578 70340 : if (ScanDirectionIsForward(dir))
2579 : {
2580 : /* There could be dead pages to the left, so not this: */
2581 : /* Assert(P_LEFTMOST(opaque)); */
2582 :
2583 70280 : start = P_FIRSTDATAKEY(opaque);
2584 : }
2585 60 : else if (ScanDirectionIsBackward(dir))
2586 : {
2587 : Assert(P_RIGHTMOST(opaque));
2588 :
2589 60 : start = PageGetMaxOffsetNumber(page);
2590 : }
2591 : else
2592 : {
2593 0 : elog(ERROR, "invalid scan direction: %d", (int) dir);
2594 : start = 0; /* keep compiler quiet */
2595 : }
2596 :
2597 : /*
2598 : * Now load data from the first page of the scan.
2599 : */
2600 70340 : if (!_bt_readfirstpage(scan, start, dir))
2601 1610 : return false;
2602 :
2603 68730 : _bt_returnitem(scan, so);
2604 68730 : return true;
2605 : }
|