Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * spgutils.c
4 : * various support functions for SP-GiST
5 : *
6 : *
7 : * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
8 : * Portions Copyright (c) 1994, Regents of the University of California
9 : *
10 : * IDENTIFICATION
11 : * src/backend/access/spgist/spgutils.c
12 : *
13 : *-------------------------------------------------------------------------
14 : */
15 :
16 : #include "postgres.h"
17 :
18 : #include "access/amvalidate.h"
19 : #include "access/htup_details.h"
20 : #include "access/reloptions.h"
21 : #include "access/spgist_private.h"
22 : #include "access/toast_compression.h"
23 : #include "access/transam.h"
24 : #include "access/xact.h"
25 : #include "catalog/pg_amop.h"
26 : #include "commands/vacuum.h"
27 : #include "nodes/nodeFuncs.h"
28 : #include "parser/parse_coerce.h"
29 : #include "storage/bufmgr.h"
30 : #include "storage/indexfsm.h"
31 : #include "utils/catcache.h"
32 : #include "utils/fmgrprotos.h"
33 : #include "utils/index_selfuncs.h"
34 : #include "utils/lsyscache.h"
35 : #include "utils/rel.h"
36 : #include "utils/syscache.h"
37 :
38 :
39 : /*
40 : * SP-GiST handler function: return IndexAmRoutine with access method parameters
41 : * and callbacks.
42 : */
43 : Datum
44 833 : spghandler(PG_FUNCTION_ARGS)
45 : {
46 : static const IndexAmRoutine amroutine = {
47 : .type = T_IndexAmRoutine,
48 : .amstrategies = 0,
49 : .amsupport = SPGISTNProc,
50 : .amoptsprocnum = SPGIST_OPTIONS_PROC,
51 : .amcanorder = false,
52 : .amcanorderbyop = true,
53 : .amcanhash = false,
54 : .amconsistentequality = false,
55 : .amconsistentordering = false,
56 : .amcanbackward = false,
57 : .amcanunique = false,
58 : .amcanmulticol = false,
59 : .amoptionalkey = true,
60 : .amsearcharray = false,
61 : .amsearchnulls = true,
62 : .amstorage = true,
63 : .amclusterable = false,
64 : .ampredlocks = false,
65 : .amcanparallel = false,
66 : .amcanbuildparallel = false,
67 : .amcaninclude = true,
68 : .amusemaintenanceworkmem = false,
69 : .amsummarizing = false,
70 : .amparallelvacuumoptions =
71 : VACUUM_OPTION_PARALLEL_BULKDEL | VACUUM_OPTION_PARALLEL_COND_CLEANUP,
72 : .amkeytype = InvalidOid,
73 :
74 : .ambuild = spgbuild,
75 : .ambuildempty = spgbuildempty,
76 : .aminsert = spginsert,
77 : .aminsertcleanup = NULL,
78 : .ambulkdelete = spgbulkdelete,
79 : .amvacuumcleanup = spgvacuumcleanup,
80 : .amcanreturn = spgcanreturn,
81 : .amcostestimate = spgcostestimate,
82 : .amgettreeheight = NULL,
83 : .amoptions = spgoptions,
84 : .amproperty = spgproperty,
85 : .ambuildphasename = NULL,
86 : .amvalidate = spgvalidate,
87 : .amadjustmembers = spgadjustmembers,
88 : .ambeginscan = spgbeginscan,
89 : .amrescan = spgrescan,
90 : .amgettuple = spggettuple,
91 : .amgetbitmap = spggetbitmap,
92 : .amendscan = spgendscan,
93 : .ammarkpos = NULL,
94 : .amrestrpos = NULL,
95 : .amestimateparallelscan = NULL,
96 : .aminitparallelscan = NULL,
97 : .amparallelrescan = NULL,
98 : .amtranslatestrategy = NULL,
99 : .amtranslatecmptype = NULL,
100 : };
101 :
102 833 : PG_RETURN_POINTER(&amroutine);
103 : }
104 :
105 : /*
106 : * GetIndexInputType
107 : * Determine the nominal input data type for an index column
108 : *
109 : * We define the "nominal" input type as the associated opclass's opcintype,
110 : * or if that is a polymorphic type, the base type of the heap column or
111 : * expression that is the index's input. The reason for preferring the
112 : * opcintype is that non-polymorphic opclasses probably don't want to hear
113 : * about binary-compatible input types. For instance, if a text opclass
114 : * is being used with a varchar heap column, we want to report "text" not
115 : * "varchar". Likewise, opclasses don't want to hear about domain types,
116 : * so if we do consult the actual input type, we make sure to flatten domains.
117 : *
118 : * At some point maybe this should go somewhere else, but it's not clear
119 : * if any other index AMs have a use for it.
120 : */
121 : static Oid
122 261 : GetIndexInputType(Relation index, AttrNumber indexcol)
123 : {
124 : Oid opcintype;
125 : AttrNumber heapcol;
126 : List *indexprs;
127 : ListCell *indexpr_item;
128 :
129 : Assert(index->rd_index != NULL);
130 : Assert(indexcol > 0 && indexcol <= index->rd_index->indnkeyatts);
131 261 : opcintype = index->rd_opcintype[indexcol - 1];
132 261 : if (!IsPolymorphicType(opcintype))
133 202 : return opcintype;
134 59 : heapcol = index->rd_index->indkey.values[indexcol - 1];
135 59 : if (heapcol != 0) /* Simple index column? */
136 51 : return getBaseType(get_atttype(index->rd_index->indrelid, heapcol));
137 :
138 : /*
139 : * If the index expressions are already cached, skip calling
140 : * RelationGetIndexExpressions, as it will make a copy which is overkill.
141 : * We're not going to modify the trees, and we're not going to do anything
142 : * that would invalidate the relcache entry before we're done.
143 : */
144 8 : if (index->rd_indexprs)
145 0 : indexprs = index->rd_indexprs;
146 : else
147 8 : indexprs = RelationGetIndexExpressions(index);
148 8 : indexpr_item = list_head(indexprs);
149 8 : for (int i = 1; i <= index->rd_index->indnkeyatts; i++)
150 : {
151 8 : if (index->rd_index->indkey.values[i - 1] == 0)
152 : {
153 : /* expression column */
154 8 : if (indexpr_item == NULL)
155 0 : elog(ERROR, "wrong number of index expressions");
156 8 : if (i == indexcol)
157 8 : return getBaseType(exprType((Node *) lfirst(indexpr_item)));
158 0 : indexpr_item = lnext(indexprs, indexpr_item);
159 : }
160 : }
161 0 : elog(ERROR, "wrong number of index expressions");
162 : return InvalidOid; /* keep compiler quiet */
163 : }
164 :
165 : /* Fill in a SpGistTypeDesc struct with info about the specified data type */
166 : static void
167 801 : fillTypeDesc(SpGistTypeDesc *desc, Oid type)
168 : {
169 : HeapTuple tp;
170 : Form_pg_type typtup;
171 :
172 801 : desc->type = type;
173 801 : tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type));
174 801 : if (!HeapTupleIsValid(tp))
175 0 : elog(ERROR, "cache lookup failed for type %u", type);
176 801 : typtup = (Form_pg_type) GETSTRUCT(tp);
177 801 : desc->attlen = typtup->typlen;
178 801 : desc->attbyval = typtup->typbyval;
179 801 : desc->attalign = typtup->typalign;
180 801 : desc->attstorage = typtup->typstorage;
181 801 : ReleaseSysCache(tp);
182 801 : }
183 :
184 : /*
185 : * Fetch local cache of AM-specific info about the index, initializing it
186 : * if necessary
187 : */
188 : SpGistCache *
189 1790984 : spgGetCache(Relation index)
190 : {
191 : SpGistCache *cache;
192 :
193 1790984 : if (index->rd_amcache == NULL)
194 : {
195 : Oid atttype;
196 : spgConfigIn in;
197 : FmgrInfo *procinfo;
198 :
199 261 : cache = MemoryContextAllocZero(index->rd_indexcxt,
200 : sizeof(SpGistCache));
201 :
202 : /* SPGiST must have one key column and can also have INCLUDE columns */
203 : Assert(IndexRelationGetNumberOfKeyAttributes(index) == 1);
204 : Assert(IndexRelationGetNumberOfAttributes(index) <= INDEX_MAX_KEYS);
205 :
206 : /*
207 : * Get the actual (well, nominal) data type of the key column. We
208 : * pass this to the opclass config function so that polymorphic
209 : * opclasses are possible.
210 : */
211 261 : atttype = GetIndexInputType(index, spgKeyColumn + 1);
212 :
213 : /* Call the config function to get config info for the opclass */
214 261 : in.attType = atttype;
215 :
216 261 : procinfo = index_getprocinfo(index, 1, SPGIST_CONFIG_PROC);
217 261 : FunctionCall2Coll(procinfo,
218 261 : index->rd_indcollation[spgKeyColumn],
219 : PointerGetDatum(&in),
220 261 : PointerGetDatum(&cache->config));
221 :
222 : /*
223 : * If leafType isn't specified, use the declared index column type,
224 : * which index.c will have derived from the opclass's opcintype.
225 : * (Although we now make spgvalidate.c warn if these aren't the same,
226 : * old user-defined opclasses may not set the STORAGE parameter
227 : * correctly, so believe leafType if it's given.)
228 : */
229 261 : if (!OidIsValid(cache->config.leafType))
230 : {
231 243 : cache->config.leafType =
232 243 : TupleDescAttr(RelationGetDescr(index), spgKeyColumn)->atttypid;
233 :
234 : /*
235 : * If index column type is binary-coercible to atttype (for
236 : * example, it's a domain over atttype), treat it as plain atttype
237 : * to avoid thinking we need to compress.
238 : */
239 252 : if (cache->config.leafType != atttype &&
240 9 : IsBinaryCoercible(cache->config.leafType, atttype))
241 9 : cache->config.leafType = atttype;
242 : }
243 :
244 : /* Get the information we need about each relevant datatype */
245 261 : fillTypeDesc(&cache->attType, atttype);
246 :
247 261 : if (cache->config.leafType != atttype)
248 : {
249 18 : if (!OidIsValid(index_getprocid(index, 1, SPGIST_COMPRESS_PROC)))
250 0 : ereport(ERROR,
251 : (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
252 : errmsg("compress method must be defined when leaf type is different from input type")));
253 :
254 18 : fillTypeDesc(&cache->attLeafType, cache->config.leafType);
255 : }
256 : else
257 : {
258 : /* Save lookups in this common case */
259 243 : cache->attLeafType = cache->attType;
260 : }
261 :
262 261 : fillTypeDesc(&cache->attPrefixType, cache->config.prefixType);
263 261 : fillTypeDesc(&cache->attLabelType, cache->config.labelType);
264 :
265 : /*
266 : * Finally, if it's a real index (not a partitioned one), get the
267 : * lastUsedPages data from the metapage
268 : */
269 261 : if (index->rd_rel->relkind != RELKIND_PARTITIONED_INDEX)
270 : {
271 : Buffer metabuffer;
272 : SpGistMetaPageData *metadata;
273 :
274 257 : metabuffer = ReadBuffer(index, SPGIST_METAPAGE_BLKNO);
275 257 : LockBuffer(metabuffer, BUFFER_LOCK_SHARE);
276 :
277 257 : metadata = SpGistPageGetMeta(BufferGetPage(metabuffer));
278 :
279 257 : if (metadata->magicNumber != SPGIST_MAGIC_NUMBER)
280 0 : elog(ERROR, "index \"%s\" is not an SP-GiST index",
281 : RelationGetRelationName(index));
282 :
283 257 : cache->lastUsedPages = metadata->lastUsedPages;
284 :
285 257 : UnlockReleaseBuffer(metabuffer);
286 : }
287 :
288 261 : index->rd_amcache = cache;
289 : }
290 : else
291 : {
292 : /* assume it's up to date */
293 1790723 : cache = (SpGistCache *) index->rd_amcache;
294 : }
295 :
296 1790984 : return cache;
297 : }
298 :
299 : /*
300 : * Compute a tuple descriptor for leaf tuples or index-only-scan result tuples.
301 : *
302 : * We can use the relcache's tupdesc as-is in many cases, and it's always
303 : * OK so far as any INCLUDE columns are concerned. However, the entry for
304 : * the key column has to match leafType in the first case or attType in the
305 : * second case. While the relcache's tupdesc *should* show leafType, this
306 : * might not hold for legacy user-defined opclasses, since before v14 they
307 : * were not allowed to declare their true storage type in CREATE OPCLASS.
308 : * Also, attType can be different from what is in the relcache.
309 : *
310 : * This function gives back either a pointer to the relcache's tupdesc
311 : * if that is suitable, or a palloc'd copy that's been adjusted to match
312 : * the specified key column type. We can avoid doing any catalog lookups
313 : * here by insisting that the caller pass an SpGistTypeDesc not just an OID.
314 : */
315 : TupleDesc
316 162735 : getSpGistTupleDesc(Relation index, SpGistTypeDesc *keyType)
317 : {
318 : TupleDesc outTupDesc;
319 : Form_pg_attribute att;
320 :
321 325470 : if (keyType->type ==
322 162735 : TupleDescAttr(RelationGetDescr(index), spgKeyColumn)->atttypid)
323 162650 : outTupDesc = RelationGetDescr(index);
324 : else
325 : {
326 85 : outTupDesc = CreateTupleDescCopy(RelationGetDescr(index));
327 85 : att = TupleDescAttr(outTupDesc, spgKeyColumn);
328 : /* It's sufficient to update the type-dependent fields of the column */
329 85 : att->atttypid = keyType->type;
330 85 : att->atttypmod = -1;
331 85 : att->attlen = keyType->attlen;
332 85 : att->attbyval = keyType->attbyval;
333 85 : att->attalign = keyType->attalign;
334 85 : att->attstorage = keyType->attstorage;
335 : /* We shouldn't need to bother with making these valid: */
336 85 : att->attcompression = InvalidCompressionMethod;
337 85 : att->attcollation = InvalidOid;
338 :
339 85 : populate_compact_attribute(outTupDesc, spgKeyColumn);
340 85 : TupleDescFinalize(outTupDesc);
341 : }
342 162735 : return outTupDesc;
343 : }
344 :
345 : /* Initialize SpGistState for working with the given index */
346 : void
347 162133 : initSpGistState(SpGistState *state, Relation index)
348 : {
349 : SpGistCache *cache;
350 :
351 162133 : state->index = index;
352 :
353 : /* Get cached static information about index */
354 162133 : cache = spgGetCache(index);
355 :
356 162133 : state->config = cache->config;
357 162133 : state->attType = cache->attType;
358 162133 : state->attLeafType = cache->attLeafType;
359 162133 : state->attPrefixType = cache->attPrefixType;
360 162133 : state->attLabelType = cache->attLabelType;
361 :
362 : /* Ensure we have a valid descriptor for leaf tuples */
363 162133 : state->leafTupDesc = getSpGistTupleDesc(state->index, &state->attLeafType);
364 :
365 : /* Make workspace for constructing dead tuples */
366 162133 : state->deadTupleStorage = palloc0(SGDTSIZE);
367 :
368 : /*
369 : * Set horizon XID to use in redirection tuples. Use our own XID if we
370 : * have one, else use InvalidTransactionId. The latter case can happen in
371 : * VACUUM or REINDEX CONCURRENTLY, and in neither case would it be okay to
372 : * force an XID to be assigned. VACUUM won't create any redirection
373 : * tuples anyway, but REINDEX CONCURRENTLY can. Fortunately, REINDEX
374 : * CONCURRENTLY doesn't mark the index valid until the end, so there could
375 : * never be any concurrent scans "in flight" to a redirection tuple it has
376 : * inserted. And it locks out VACUUM until the end, too. So it's okay
377 : * for VACUUM to immediately expire a redirection tuple that contains an
378 : * invalid xid.
379 : */
380 162133 : state->redirectXid = GetTopTransactionIdIfAny();
381 :
382 : /* Assume we're not in an index build (spgbuild will override) */
383 162133 : state->isBuild = false;
384 162133 : }
385 :
386 : /*
387 : * Allocate a new page (either by recycling, or by extending the index file).
388 : *
389 : * The returned buffer is already pinned and exclusive-locked.
390 : * Caller is responsible for initializing the page by calling SpGistInitBuffer.
391 : */
392 : Buffer
393 4328 : SpGistNewBuffer(Relation index)
394 : {
395 : Buffer buffer;
396 :
397 : /* First, try to get a page from FSM */
398 : for (;;)
399 0 : {
400 4328 : BlockNumber blkno = GetFreeIndexPage(index);
401 :
402 4328 : if (blkno == InvalidBlockNumber)
403 4322 : break; /* nothing known to FSM */
404 :
405 : /*
406 : * The fixed pages shouldn't ever be listed in FSM, but just in case
407 : * one is, ignore it.
408 : */
409 6 : if (SpGistBlockIsFixed(blkno))
410 0 : continue;
411 :
412 6 : buffer = ReadBuffer(index, blkno);
413 :
414 : /*
415 : * We have to guard against the possibility that someone else already
416 : * recycled this page; the buffer may be locked if so.
417 : */
418 6 : if (ConditionalLockBuffer(buffer))
419 : {
420 6 : Page page = BufferGetPage(buffer);
421 :
422 6 : if (PageIsNew(page))
423 1 : return buffer; /* OK to use, if never initialized */
424 :
425 5 : if (SpGistPageIsDeleted(page) || PageIsEmpty(page))
426 5 : return buffer; /* OK to use */
427 :
428 0 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
429 : }
430 :
431 : /* Can't use it, so release buffer and try again */
432 0 : ReleaseBuffer(buffer);
433 : }
434 :
435 4322 : buffer = ExtendBufferedRel(BMR_REL(index), MAIN_FORKNUM, NULL,
436 : EB_LOCK_FIRST);
437 :
438 4322 : return buffer;
439 : }
440 :
441 : /*
442 : * Update index metapage's lastUsedPages info from local cache, if possible
443 : *
444 : * Updating meta page isn't critical for index working, so
445 : * 1 use ConditionalLockBuffer to improve concurrency
446 : * 2 don't WAL-log metabuffer changes to decrease WAL traffic
447 : */
448 : void
449 160968 : SpGistUpdateMetaPage(Relation index)
450 : {
451 160968 : SpGistCache *cache = (SpGistCache *) index->rd_amcache;
452 :
453 160968 : if (cache != NULL)
454 : {
455 : Buffer metabuffer;
456 :
457 160968 : metabuffer = ReadBuffer(index, SPGIST_METAPAGE_BLKNO);
458 :
459 160968 : if (ConditionalLockBuffer(metabuffer))
460 : {
461 160968 : Page metapage = BufferGetPage(metabuffer);
462 160968 : SpGistMetaPageData *metadata = SpGistPageGetMeta(metapage);
463 :
464 160968 : metadata->lastUsedPages = cache->lastUsedPages;
465 :
466 : /*
467 : * Set pd_lower just past the end of the metadata. This is
468 : * essential, because without doing so, metadata will be lost if
469 : * xlog.c compresses the page. (We must do this here because
470 : * pre-v11 versions of PG did not set the metapage's pd_lower
471 : * correctly, so a pg_upgraded index might contain the wrong
472 : * value.)
473 : */
474 160968 : ((PageHeader) metapage)->pd_lower =
475 160968 : ((char *) metadata + sizeof(SpGistMetaPageData)) - (char *) metapage;
476 :
477 160968 : MarkBufferDirty(metabuffer);
478 160968 : UnlockReleaseBuffer(metabuffer);
479 : }
480 : else
481 : {
482 0 : ReleaseBuffer(metabuffer);
483 : }
484 : }
485 160968 : }
486 :
487 : /* Macro to select proper element of lastUsedPages cache depending on flags */
488 : /* Masking flags with SPGIST_CACHED_PAGES is just for paranoia's sake */
489 : #define GET_LUP(c, f) (&(c)->lastUsedPages.cachedPage[((unsigned int) (f)) % SPGIST_CACHED_PAGES])
490 :
491 : /*
492 : * Allocate and initialize a new buffer of the type and parity specified by
493 : * flags. The returned buffer is already pinned and exclusive-locked.
494 : *
495 : * When requesting an inner page, if we get one with the wrong parity,
496 : * we just release the buffer and try again. We will get a different page
497 : * because GetFreeIndexPage will have marked the page used in FSM. The page
498 : * is entered in our local lastUsedPages cache, so there's some hope of
499 : * making use of it later in this session, but otherwise we rely on VACUUM
500 : * to eventually re-enter the page in FSM, making it available for recycling.
501 : * Note that such a page does not get marked dirty here, so unless it's used
502 : * fairly soon, the buffer will just get discarded and the page will remain
503 : * as it was on disk.
504 : *
505 : * When we return a buffer to the caller, the page is *not* entered into
506 : * the lastUsedPages cache; we expect the caller will do so after it's taken
507 : * whatever space it will use. This is because after the caller has used up
508 : * some space, the page might have less space than whatever was cached already
509 : * so we'd rather not trash the old cache entry.
510 : */
511 : static Buffer
512 3900 : allocNewBuffer(Relation index, int flags)
513 : {
514 3900 : SpGistCache *cache = spgGetCache(index);
515 3900 : uint16 pageflags = 0;
516 :
517 3900 : if (GBUF_REQ_LEAF(flags))
518 3830 : pageflags |= SPGIST_LEAF;
519 3900 : if (GBUF_REQ_NULLS(flags))
520 0 : pageflags |= SPGIST_NULLS;
521 :
522 : for (;;)
523 56 : {
524 : Buffer buffer;
525 :
526 3956 : buffer = SpGistNewBuffer(index);
527 3956 : SpGistInitBuffer(buffer, pageflags);
528 :
529 3956 : if (pageflags & SPGIST_LEAF)
530 : {
531 : /* Leaf pages have no parity concerns, so just use it */
532 3830 : return buffer;
533 : }
534 : else
535 : {
536 126 : BlockNumber blkno = BufferGetBlockNumber(buffer);
537 126 : int blkFlags = GBUF_INNER_PARITY(blkno);
538 :
539 126 : if ((flags & GBUF_PARITY_MASK) == blkFlags)
540 : {
541 : /* Page has right parity, use it */
542 70 : return buffer;
543 : }
544 : else
545 : {
546 : /* Page has wrong parity, record it in cache and try again */
547 56 : if (pageflags & SPGIST_NULLS)
548 0 : blkFlags |= GBUF_NULLS;
549 56 : cache->lastUsedPages.cachedPage[blkFlags].blkno = blkno;
550 56 : cache->lastUsedPages.cachedPage[blkFlags].freeSpace =
551 56 : PageGetExactFreeSpace(BufferGetPage(buffer));
552 56 : UnlockReleaseBuffer(buffer);
553 : }
554 : }
555 : }
556 : }
557 :
558 : /*
559 : * Get a buffer of the type and parity specified by flags, having at least
560 : * as much free space as indicated by needSpace. We use the lastUsedPages
561 : * cache to assign the same buffer previously requested when possible.
562 : * The returned buffer is already pinned and exclusive-locked.
563 : *
564 : * *isNew is set true if the page was initialized here, false if it was
565 : * already valid.
566 : */
567 : Buffer
568 7193 : SpGistGetBuffer(Relation index, int flags, int needSpace, bool *isNew)
569 : {
570 7193 : SpGistCache *cache = spgGetCache(index);
571 : SpGistLastUsedPage *lup;
572 :
573 : /* Bail out if even an empty page wouldn't meet the demand */
574 7193 : if (needSpace > SPGIST_PAGE_CAPACITY)
575 0 : elog(ERROR, "desired SPGiST tuple size is too big");
576 :
577 : /*
578 : * If possible, increase the space request to include relation's
579 : * fillfactor. This ensures that when we add unrelated tuples to a page,
580 : * we try to keep 100-fillfactor% available for adding tuples that are
581 : * related to the ones already on it. But fillfactor mustn't cause an
582 : * error for requests that would otherwise be legal.
583 : */
584 7193 : needSpace += SpGistGetTargetPageFreeSpace(index);
585 7193 : needSpace = Min(needSpace, SPGIST_PAGE_CAPACITY);
586 :
587 : /* Get the cache entry for this flags setting */
588 7193 : lup = GET_LUP(cache, flags);
589 :
590 : /* If we have nothing cached, just turn it over to allocNewBuffer */
591 7193 : if (lup->blkno == InvalidBlockNumber)
592 : {
593 120 : *isNew = true;
594 120 : return allocNewBuffer(index, flags);
595 : }
596 :
597 : /* fixed pages should never be in cache */
598 : Assert(!SpGistBlockIsFixed(lup->blkno));
599 :
600 : /* If cached freeSpace isn't enough, don't bother looking at the page */
601 7073 : if (lup->freeSpace >= needSpace)
602 : {
603 : Buffer buffer;
604 : Page page;
605 :
606 3293 : buffer = ReadBuffer(index, lup->blkno);
607 :
608 3293 : if (!ConditionalLockBuffer(buffer))
609 : {
610 : /*
611 : * buffer is locked by another process, so return a new buffer
612 : */
613 0 : ReleaseBuffer(buffer);
614 0 : *isNew = true;
615 0 : return allocNewBuffer(index, flags);
616 : }
617 :
618 3293 : page = BufferGetPage(buffer);
619 :
620 3293 : if (PageIsNew(page) || SpGistPageIsDeleted(page) || PageIsEmpty(page))
621 : {
622 : /* OK to initialize the page */
623 117 : uint16 pageflags = 0;
624 :
625 117 : if (GBUF_REQ_LEAF(flags))
626 113 : pageflags |= SPGIST_LEAF;
627 117 : if (GBUF_REQ_NULLS(flags))
628 0 : pageflags |= SPGIST_NULLS;
629 117 : SpGistInitBuffer(buffer, pageflags);
630 117 : lup->freeSpace = PageGetExactFreeSpace(page) - needSpace;
631 117 : *isNew = true;
632 117 : return buffer;
633 : }
634 :
635 : /*
636 : * Check that page is of right type and has enough space. We must
637 : * recheck this since our cache isn't necessarily up to date.
638 : */
639 6352 : if ((GBUF_REQ_LEAF(flags) ? SpGistPageIsLeaf(page) : !SpGistPageIsLeaf(page)) &&
640 3176 : (GBUF_REQ_NULLS(flags) ? SpGistPageStoresNulls(page) : !SpGistPageStoresNulls(page)))
641 : {
642 3176 : int freeSpace = PageGetExactFreeSpace(page);
643 :
644 3176 : if (freeSpace >= needSpace)
645 : {
646 : /* Success, update freespace info and return the buffer */
647 3176 : lup->freeSpace = freeSpace - needSpace;
648 3176 : *isNew = false;
649 3176 : return buffer;
650 : }
651 : }
652 :
653 : /*
654 : * fallback to allocation of new buffer
655 : */
656 0 : UnlockReleaseBuffer(buffer);
657 : }
658 :
659 : /* No success with cache, so return a new buffer */
660 3780 : *isNew = true;
661 3780 : return allocNewBuffer(index, flags);
662 : }
663 :
664 : /*
665 : * Update lastUsedPages cache when done modifying a page.
666 : *
667 : * We update the appropriate cache entry if it already contained this page
668 : * (its freeSpace is likely obsolete), or if this page has more space than
669 : * whatever we had cached.
670 : */
671 : void
672 1616241 : SpGistSetLastUsedPage(Relation index, Buffer buffer)
673 : {
674 1616241 : SpGistCache *cache = spgGetCache(index);
675 : SpGistLastUsedPage *lup;
676 : int freeSpace;
677 1616241 : Page page = BufferGetPage(buffer);
678 1616241 : BlockNumber blkno = BufferGetBlockNumber(buffer);
679 : int flags;
680 :
681 : /* Never enter fixed pages (root pages) in cache, though */
682 1616241 : if (SpGistBlockIsFixed(blkno))
683 535502 : return;
684 :
685 1080739 : if (SpGistPageIsLeaf(page))
686 558280 : flags = GBUF_LEAF;
687 : else
688 522459 : flags = GBUF_INNER_PARITY(blkno);
689 1080739 : if (SpGistPageStoresNulls(page))
690 0 : flags |= GBUF_NULLS;
691 :
692 1080739 : lup = GET_LUP(cache, flags);
693 :
694 1080739 : freeSpace = PageGetExactFreeSpace(page);
695 1080739 : if (lup->blkno == InvalidBlockNumber || lup->blkno == blkno ||
696 306715 : lup->freeSpace < freeSpace)
697 : {
698 779781 : lup->blkno = blkno;
699 779781 : lup->freeSpace = freeSpace;
700 : }
701 : }
702 :
703 : /*
704 : * Initialize an SPGiST page to empty, with specified flags
705 : */
706 : void
707 4961 : SpGistInitPage(Page page, uint16 f)
708 : {
709 : SpGistPageOpaque opaque;
710 :
711 4961 : PageInit(page, BLCKSZ, sizeof(SpGistPageOpaqueData));
712 4961 : opaque = SpGistPageGetOpaque(page);
713 4961 : opaque->flags = f;
714 4961 : opaque->spgist_page_id = SPGIST_PAGE_ID;
715 4961 : }
716 :
717 : /*
718 : * Initialize a buffer's page to empty, with specified flags
719 : */
720 : void
721 4822 : SpGistInitBuffer(Buffer b, uint16 f)
722 : {
723 : Assert(BufferGetPageSize(b) == BLCKSZ);
724 4822 : SpGistInitPage(BufferGetPage(b), f);
725 4822 : }
726 :
727 : /*
728 : * Initialize metadata page
729 : */
730 : void
731 129 : SpGistInitMetapage(Page page)
732 : {
733 : SpGistMetaPageData *metadata;
734 : int i;
735 :
736 129 : SpGistInitPage(page, SPGIST_META);
737 129 : metadata = SpGistPageGetMeta(page);
738 129 : memset(metadata, 0, sizeof(SpGistMetaPageData));
739 129 : metadata->magicNumber = SPGIST_MAGIC_NUMBER;
740 :
741 : /* initialize last-used-page cache to empty */
742 1161 : for (i = 0; i < SPGIST_CACHED_PAGES; i++)
743 1032 : metadata->lastUsedPages.cachedPage[i].blkno = InvalidBlockNumber;
744 :
745 : /*
746 : * Set pd_lower just past the end of the metadata. This is essential,
747 : * because without doing so, metadata will be lost if xlog.c compresses
748 : * the page.
749 : */
750 129 : ((PageHeader) page)->pd_lower =
751 129 : ((char *) metadata + sizeof(SpGistMetaPageData)) - (char *) page;
752 129 : }
753 :
754 : /*
755 : * reloptions processing for SPGiST
756 : */
757 : bytea *
758 88 : spgoptions(Datum reloptions, bool validate)
759 : {
760 : static const relopt_parse_elt tab[] = {
761 : {"fillfactor", RELOPT_TYPE_INT, offsetof(SpGistOptions, fillfactor)},
762 : };
763 :
764 88 : return (bytea *) build_reloptions(reloptions, validate,
765 : RELOPT_KIND_SPGIST,
766 : sizeof(SpGistOptions),
767 : tab, lengthof(tab));
768 : }
769 :
770 : /*
771 : * Get the space needed to store a non-null datum of the indicated type
772 : * in an inner tuple (that is, as a prefix or node label).
773 : * Note the result is already rounded up to a MAXALIGN boundary.
774 : * Here we follow the convention that pass-by-val types are just stored
775 : * in their Datum representation (compare memcpyInnerDatum).
776 : */
777 : unsigned int
778 8061 : SpGistGetInnerTypeSize(SpGistTypeDesc *att, Datum datum)
779 : {
780 : unsigned int size;
781 :
782 8061 : if (att->attbyval)
783 4149 : size = sizeof(Datum);
784 3912 : else if (att->attlen > 0)
785 2672 : size = att->attlen;
786 : else
787 1240 : size = VARSIZE_ANY(DatumGetPointer(datum));
788 :
789 8061 : return MAXALIGN(size);
790 : }
791 :
792 : /*
793 : * Copy the given non-null datum to *target, in the inner-tuple case
794 : */
795 : static void
796 8061 : memcpyInnerDatum(void *target, SpGistTypeDesc *att, Datum datum)
797 : {
798 : unsigned int size;
799 :
800 8061 : if (att->attbyval)
801 : {
802 4149 : memcpy(target, &datum, sizeof(Datum));
803 : }
804 : else
805 : {
806 3912 : size = (att->attlen > 0) ? att->attlen : VARSIZE_ANY(DatumGetPointer(datum));
807 3912 : memcpy(target, DatumGetPointer(datum), size);
808 : }
809 8061 : }
810 :
811 : /*
812 : * Compute space required for a leaf tuple holding the given data.
813 : *
814 : * This must match the size-calculation portion of spgFormLeafTuple.
815 : */
816 : Size
817 12997839 : SpGistGetLeafTupleSize(TupleDesc tupleDescriptor,
818 : const Datum *datums, const bool *isnulls)
819 : {
820 : Size size;
821 : Size data_size;
822 12997839 : bool needs_null_mask = false;
823 12997839 : int natts = tupleDescriptor->natts;
824 :
825 : /*
826 : * Decide whether we need a nulls bitmask.
827 : *
828 : * If there is only a key attribute (natts == 1), never use a bitmask, for
829 : * compatibility with the pre-v14 layout of leaf tuples. Otherwise, we
830 : * need one if any attribute is null.
831 : */
832 12997839 : if (natts > 1)
833 : {
834 653746 : for (int i = 0; i < natts; i++)
835 : {
836 445001 : if (isnulls[i])
837 : {
838 9266 : needs_null_mask = true;
839 9266 : break;
840 : }
841 : }
842 : }
843 :
844 : /*
845 : * Calculate size of the data part; same as for heap tuples.
846 : */
847 12997839 : data_size = heap_compute_data_size(tupleDescriptor, datums, isnulls);
848 :
849 : /*
850 : * Compute total size.
851 : */
852 12997839 : size = SGLTHDRSZ(needs_null_mask);
853 12997839 : size += data_size;
854 12997839 : size = MAXALIGN(size);
855 :
856 : /*
857 : * Ensure that we can replace the tuple with a dead tuple later. This test
858 : * is unnecessary when there are any non-null attributes, but be safe.
859 : */
860 12997839 : if (size < SGDTSIZE)
861 0 : size = SGDTSIZE;
862 :
863 12997839 : return size;
864 : }
865 :
866 : /*
867 : * Construct a leaf tuple containing the given heap TID and datum values
868 : */
869 : SpGistLeafTuple
870 1019707 : spgFormLeafTuple(SpGistState *state, const ItemPointerData *heapPtr,
871 : const Datum *datums, const bool *isnulls)
872 : {
873 : SpGistLeafTuple tup;
874 1019707 : TupleDesc tupleDescriptor = state->leafTupDesc;
875 : Size size;
876 : Size hoff;
877 : Size data_size;
878 1019707 : bool needs_null_mask = false;
879 1019707 : int natts = tupleDescriptor->natts;
880 : char *tp; /* ptr to tuple data */
881 1019707 : uint16 tupmask = 0; /* unused heap_fill_tuple output */
882 :
883 : /*
884 : * Decide whether we need a nulls bitmask.
885 : *
886 : * If there is only a key attribute (natts == 1), never use a bitmask, for
887 : * compatibility with the pre-v14 layout of leaf tuples. Otherwise, we
888 : * need one if any attribute is null.
889 : */
890 1019707 : if (natts > 1)
891 : {
892 270373 : for (int i = 0; i < natts; i++)
893 : {
894 186222 : if (isnulls[i])
895 : {
896 6017 : needs_null_mask = true;
897 6017 : break;
898 : }
899 : }
900 : }
901 :
902 : /*
903 : * Calculate size of the data part; same as for heap tuples.
904 : */
905 1019707 : data_size = heap_compute_data_size(tupleDescriptor, datums, isnulls);
906 :
907 : /*
908 : * Compute total size.
909 : */
910 1019707 : hoff = SGLTHDRSZ(needs_null_mask);
911 1019707 : size = hoff + data_size;
912 1019707 : size = MAXALIGN(size);
913 :
914 : /*
915 : * Ensure that we can replace the tuple with a dead tuple later. This test
916 : * is unnecessary when there are any non-null attributes, but be safe.
917 : */
918 1019707 : if (size < SGDTSIZE)
919 0 : size = SGDTSIZE;
920 :
921 : /* OK, form the tuple */
922 1019707 : tup = (SpGistLeafTuple) palloc0(size);
923 :
924 1019707 : tup->size = size;
925 1019707 : SGLT_SET_NEXTOFFSET(tup, InvalidOffsetNumber);
926 1019707 : tup->heapPtr = *heapPtr;
927 :
928 1019707 : tp = (char *) tup + hoff;
929 :
930 1019707 : if (needs_null_mask)
931 : {
932 : bits8 *bp; /* ptr to null bitmap in tuple */
933 :
934 : /* Set nullmask presence bit in SpGistLeafTuple header */
935 6017 : SGLT_SET_HASNULLMASK(tup, true);
936 : /* Fill the data area and null mask */
937 6017 : bp = (bits8 *) ((char *) tup + sizeof(SpGistLeafTupleData));
938 6017 : heap_fill_tuple(tupleDescriptor, datums, isnulls, tp, data_size,
939 : &tupmask, bp);
940 : }
941 1013690 : else if (natts > 1 || !isnulls[spgKeyColumn])
942 : {
943 : /* Fill data area only */
944 1013642 : heap_fill_tuple(tupleDescriptor, datums, isnulls, tp, data_size,
945 : &tupmask, (bits8 *) NULL);
946 : }
947 : /* otherwise we have no data, nor a bitmap, to fill */
948 :
949 1019707 : return tup;
950 : }
951 :
952 : /*
953 : * Construct a node (to go into an inner tuple) containing the given label
954 : *
955 : * Note that the node's downlink is just set invalid here. Caller will fill
956 : * it in later.
957 : */
958 : SpGistNodeTuple
959 26953 : spgFormNodeTuple(SpGistState *state, Datum label, bool isnull)
960 : {
961 : SpGistNodeTuple tup;
962 : unsigned int size;
963 26953 : unsigned short infomask = 0;
964 :
965 : /* compute space needed (note result is already maxaligned) */
966 26953 : size = SGNTHDRSZ;
967 26953 : if (!isnull)
968 3689 : size += SpGistGetInnerTypeSize(&state->attLabelType, label);
969 :
970 : /*
971 : * Here we make sure that the size will fit in the field reserved for it
972 : * in t_info.
973 : */
974 26953 : if ((size & INDEX_SIZE_MASK) != size)
975 0 : ereport(ERROR,
976 : (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
977 : errmsg("index row requires %zu bytes, maximum size is %zu",
978 : (Size) size, (Size) INDEX_SIZE_MASK)));
979 :
980 26953 : tup = (SpGistNodeTuple) palloc0(size);
981 :
982 26953 : if (isnull)
983 23264 : infomask |= INDEX_NULL_MASK;
984 : /* we don't bother setting the INDEX_VAR_MASK bit */
985 26953 : infomask |= size;
986 26953 : tup->t_info = infomask;
987 :
988 : /* The TID field will be filled in later */
989 26953 : ItemPointerSetInvalid(&tup->t_tid);
990 :
991 26953 : if (!isnull)
992 3689 : memcpyInnerDatum(SGNTDATAPTR(tup), &state->attLabelType, label);
993 :
994 26953 : return tup;
995 : }
996 :
997 : /*
998 : * Construct an inner tuple containing the given prefix and node array
999 : */
1000 : SpGistInnerTuple
1001 5645 : spgFormInnerTuple(SpGistState *state, bool hasPrefix, Datum prefix,
1002 : int nNodes, SpGistNodeTuple *nodes)
1003 : {
1004 : SpGistInnerTuple tup;
1005 : unsigned int size;
1006 : unsigned int prefixSize;
1007 : int i;
1008 : char *ptr;
1009 :
1010 : /* Compute size needed */
1011 5645 : if (hasPrefix)
1012 4372 : prefixSize = SpGistGetInnerTypeSize(&state->attPrefixType, prefix);
1013 : else
1014 1273 : prefixSize = 0;
1015 :
1016 5645 : size = SGITHDRSZ + prefixSize;
1017 :
1018 : /* Note: we rely on node tuple sizes to be maxaligned already */
1019 39054 : for (i = 0; i < nNodes; i++)
1020 33409 : size += IndexTupleSize(nodes[i]);
1021 :
1022 : /*
1023 : * Ensure that we can replace the tuple with a dead tuple later. This
1024 : * test is unnecessary given current tuple layouts, but let's be safe.
1025 : */
1026 5645 : if (size < SGDTSIZE)
1027 0 : size = SGDTSIZE;
1028 :
1029 : /*
1030 : * Inner tuple should be small enough to fit on a page
1031 : */
1032 5645 : if (size > SPGIST_PAGE_CAPACITY - sizeof(ItemIdData))
1033 0 : ereport(ERROR,
1034 : (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
1035 : errmsg("SP-GiST inner tuple size %zu exceeds maximum %zu",
1036 : (Size) size,
1037 : SPGIST_PAGE_CAPACITY - sizeof(ItemIdData)),
1038 : errhint("Values larger than a buffer page cannot be indexed.")));
1039 :
1040 : /*
1041 : * Check for overflow of header fields --- probably can't fail if the
1042 : * above succeeded, but let's be paranoid
1043 : */
1044 5645 : if (size > SGITMAXSIZE ||
1045 5645 : prefixSize > SGITMAXPREFIXSIZE ||
1046 : nNodes > SGITMAXNNODES)
1047 0 : elog(ERROR, "SPGiST inner tuple header field is too small");
1048 :
1049 : /* OK, form the tuple */
1050 5645 : tup = (SpGistInnerTuple) palloc0(size);
1051 :
1052 5645 : tup->nNodes = nNodes;
1053 5645 : tup->prefixSize = prefixSize;
1054 5645 : tup->size = size;
1055 :
1056 5645 : if (hasPrefix)
1057 4372 : memcpyInnerDatum(SGITDATAPTR(tup), &state->attPrefixType, prefix);
1058 :
1059 5645 : ptr = (char *) SGITNODEPTR(tup);
1060 :
1061 39054 : for (i = 0; i < nNodes; i++)
1062 : {
1063 33409 : SpGistNodeTuple node = nodes[i];
1064 :
1065 33409 : memcpy(ptr, node, IndexTupleSize(node));
1066 33409 : ptr += IndexTupleSize(node);
1067 : }
1068 :
1069 5645 : return tup;
1070 : }
1071 :
1072 : /*
1073 : * Construct a "dead" tuple to replace a tuple being deleted.
1074 : *
1075 : * The state can be SPGIST_REDIRECT, SPGIST_DEAD, or SPGIST_PLACEHOLDER.
1076 : * For a REDIRECT tuple, a pointer (blkno+offset) must be supplied, and
1077 : * the xid field is filled in automatically.
1078 : *
1079 : * This is called in critical sections, so we don't use palloc; the tuple
1080 : * is built in preallocated storage. It should be copied before another
1081 : * call with different parameters can occur.
1082 : */
1083 : SpGistDeadTuple
1084 9150 : spgFormDeadTuple(SpGistState *state, int tupstate,
1085 : BlockNumber blkno, OffsetNumber offnum)
1086 : {
1087 9150 : SpGistDeadTuple tuple = (SpGistDeadTuple) state->deadTupleStorage;
1088 :
1089 9150 : tuple->tupstate = tupstate;
1090 9150 : tuple->size = SGDTSIZE;
1091 9150 : SGLT_SET_NEXTOFFSET(tuple, InvalidOffsetNumber);
1092 :
1093 9150 : if (tupstate == SPGIST_REDIRECT)
1094 : {
1095 1510 : ItemPointerSet(&tuple->pointer, blkno, offnum);
1096 1510 : tuple->xid = state->redirectXid;
1097 : }
1098 : else
1099 : {
1100 7640 : ItemPointerSetInvalid(&tuple->pointer);
1101 7640 : tuple->xid = InvalidTransactionId;
1102 : }
1103 :
1104 9150 : return tuple;
1105 : }
1106 :
1107 : /*
1108 : * Convert an SPGiST leaf tuple into Datum/isnull arrays.
1109 : *
1110 : * The caller must allocate sufficient storage for the output arrays.
1111 : * (INDEX_MAX_KEYS entries should be enough.)
1112 : */
1113 : void
1114 39270 : spgDeformLeafTuple(SpGistLeafTuple tup, TupleDesc tupleDescriptor,
1115 : Datum *datums, bool *isnulls, bool keyColumnIsNull)
1116 : {
1117 39270 : bool hasNullsMask = SGLT_GET_HASNULLMASK(tup);
1118 : char *tp; /* ptr to tuple data */
1119 : bits8 *bp; /* ptr to null bitmap in tuple */
1120 :
1121 39270 : if (keyColumnIsNull && tupleDescriptor->natts == 1)
1122 : {
1123 : /*
1124 : * Trivial case: there is only the key attribute and we're in a nulls
1125 : * tree. The hasNullsMask bit in the tuple header should not be set
1126 : * (and thus we can't use index_deform_tuple_internal), but
1127 : * nonetheless the result is NULL.
1128 : *
1129 : * Note: currently this is dead code, because noplace calls this when
1130 : * there is only the key attribute. But we should cover the case.
1131 : */
1132 : Assert(!hasNullsMask);
1133 :
1134 0 : datums[spgKeyColumn] = (Datum) 0;
1135 0 : isnulls[spgKeyColumn] = true;
1136 0 : return;
1137 : }
1138 :
1139 39270 : tp = (char *) tup + SGLTHDRSZ(hasNullsMask);
1140 39270 : bp = (bits8 *) ((char *) tup + sizeof(SpGistLeafTupleData));
1141 :
1142 39270 : index_deform_tuple_internal(tupleDescriptor,
1143 : datums, isnulls,
1144 : tp, bp, hasNullsMask);
1145 :
1146 : /*
1147 : * Key column isnull value from the tuple should be consistent with
1148 : * keyColumnIsNull flag from the caller.
1149 : */
1150 : Assert(keyColumnIsNull == isnulls[spgKeyColumn]);
1151 : }
1152 :
1153 : /*
1154 : * Extract the label datums of the nodes within innerTuple
1155 : *
1156 : * Returns NULL if label datums are NULLs
1157 : */
1158 : Datum *
1159 12480165 : spgExtractNodeLabels(SpGistState *state, SpGistInnerTuple innerTuple)
1160 : {
1161 : Datum *nodeLabels;
1162 : int i;
1163 : SpGistNodeTuple node;
1164 :
1165 : /* Either all the labels must be NULL, or none. */
1166 12480165 : node = SGITNODEPTR(innerTuple);
1167 12480165 : if (IndexTupleHasNulls(node))
1168 : {
1169 67271722 : SGITITERATE(innerTuple, i, node)
1170 : {
1171 54943920 : if (!IndexTupleHasNulls(node))
1172 0 : elog(ERROR, "some but not all node labels are null in SPGiST inner tuple");
1173 : }
1174 : /* They're all null, so just return NULL */
1175 12327802 : return NULL;
1176 : }
1177 : else
1178 : {
1179 152363 : nodeLabels = palloc_array(Datum, innerTuple->nNodes);
1180 1726955 : SGITITERATE(innerTuple, i, node)
1181 : {
1182 1574592 : if (IndexTupleHasNulls(node))
1183 0 : elog(ERROR, "some but not all node labels are null in SPGiST inner tuple");
1184 1574592 : nodeLabels[i] = SGNTDATUM(node, state);
1185 : }
1186 152363 : return nodeLabels;
1187 : }
1188 : }
1189 :
1190 : /*
1191 : * Add a new item to the page, replacing a PLACEHOLDER item if possible.
1192 : * Return the location it's inserted at, or InvalidOffsetNumber on failure.
1193 : *
1194 : * If startOffset isn't NULL, we start searching for placeholders at
1195 : * *startOffset, and update that to the next place to search. This is just
1196 : * an optimization for repeated insertions.
1197 : *
1198 : * If errorOK is false, we throw error when there's not enough room,
1199 : * rather than returning InvalidOffsetNumber.
1200 : */
1201 : OffsetNumber
1202 1079385 : SpGistPageAddNewItem(SpGistState *state, Page page, const void *item, Size size,
1203 : OffsetNumber *startOffset, bool errorOK)
1204 : {
1205 1079385 : SpGistPageOpaque opaque = SpGistPageGetOpaque(page);
1206 : OffsetNumber i,
1207 : maxoff,
1208 : offnum;
1209 :
1210 1079385 : if (opaque->nPlaceholder > 0 &&
1211 303741 : PageGetExactFreeSpace(page) + SGDTSIZE >= MAXALIGN(size))
1212 : {
1213 : /* Try to replace a placeholder */
1214 303741 : maxoff = PageGetMaxOffsetNumber(page);
1215 303741 : offnum = InvalidOffsetNumber;
1216 :
1217 : for (;;)
1218 : {
1219 303741 : if (startOffset && *startOffset != InvalidOffsetNumber)
1220 75740 : i = *startOffset;
1221 : else
1222 228001 : i = FirstOffsetNumber;
1223 20760815 : for (; i <= maxoff; i++)
1224 : {
1225 20760815 : SpGistDeadTuple it = (SpGistDeadTuple) PageGetItem(page,
1226 20760815 : PageGetItemId(page, i));
1227 :
1228 20760815 : if (it->tupstate == SPGIST_PLACEHOLDER)
1229 : {
1230 303741 : offnum = i;
1231 303741 : break;
1232 : }
1233 : }
1234 :
1235 : /* Done if we found a placeholder */
1236 303741 : if (offnum != InvalidOffsetNumber)
1237 303741 : break;
1238 :
1239 0 : if (startOffset && *startOffset != InvalidOffsetNumber)
1240 : {
1241 : /* Hint was no good, re-search from beginning */
1242 0 : *startOffset = InvalidOffsetNumber;
1243 0 : continue;
1244 : }
1245 :
1246 : /* Hmm, no placeholder found? */
1247 0 : opaque->nPlaceholder = 0;
1248 0 : break;
1249 : }
1250 :
1251 303741 : if (offnum != InvalidOffsetNumber)
1252 : {
1253 : /* Replace the placeholder tuple */
1254 303741 : PageIndexTupleDelete(page, offnum);
1255 :
1256 303741 : offnum = PageAddItem(page, item, size, offnum, false, false);
1257 :
1258 : /*
1259 : * We should not have failed given the size check at the top of
1260 : * the function, but test anyway. If we did fail, we must PANIC
1261 : * because we've already deleted the placeholder tuple, and
1262 : * there's no other way to keep the damage from getting to disk.
1263 : */
1264 303741 : if (offnum != InvalidOffsetNumber)
1265 : {
1266 : Assert(opaque->nPlaceholder > 0);
1267 303741 : opaque->nPlaceholder--;
1268 303741 : if (startOffset)
1269 77474 : *startOffset = offnum + 1;
1270 : }
1271 : else
1272 0 : elog(PANIC, "failed to add item of size %zu to SPGiST index page",
1273 : size);
1274 :
1275 303741 : return offnum;
1276 : }
1277 : }
1278 :
1279 : /* No luck in replacing a placeholder, so just add it to the page */
1280 775644 : offnum = PageAddItem(page, item, size,
1281 : InvalidOffsetNumber, false, false);
1282 :
1283 775644 : if (offnum == InvalidOffsetNumber && !errorOK)
1284 0 : elog(ERROR, "failed to add item of size %zu to SPGiST index page",
1285 : size);
1286 :
1287 775644 : return offnum;
1288 : }
1289 :
1290 : /*
1291 : * spgproperty() -- Check boolean properties of indexes.
1292 : *
1293 : * This is optional for most AMs, but is required for SP-GiST because the core
1294 : * property code doesn't support AMPROP_DISTANCE_ORDERABLE.
1295 : */
1296 : bool
1297 124 : spgproperty(Oid index_oid, int attno,
1298 : IndexAMProperty prop, const char *propname,
1299 : bool *res, bool *isnull)
1300 : {
1301 : Oid opclass,
1302 : opfamily,
1303 : opcintype;
1304 : CatCList *catlist;
1305 : int i;
1306 :
1307 : /* Only answer column-level inquiries */
1308 124 : if (attno == 0)
1309 44 : return false;
1310 :
1311 80 : switch (prop)
1312 : {
1313 8 : case AMPROP_DISTANCE_ORDERABLE:
1314 8 : break;
1315 72 : default:
1316 72 : return false;
1317 : }
1318 :
1319 : /*
1320 : * Currently, SP-GiST distance-ordered scans require that there be a
1321 : * distance operator in the opclass with the default types. So we assume
1322 : * that if such an operator exists, then there's a reason for it.
1323 : */
1324 :
1325 : /* First we need to know the column's opclass. */
1326 8 : opclass = get_index_column_opclass(index_oid, attno);
1327 8 : if (!OidIsValid(opclass))
1328 : {
1329 0 : *isnull = true;
1330 0 : return true;
1331 : }
1332 :
1333 : /* Now look up the opclass family and input datatype. */
1334 8 : if (!get_opclass_opfamily_and_input_type(opclass, &opfamily, &opcintype))
1335 : {
1336 0 : *isnull = true;
1337 0 : return true;
1338 : }
1339 :
1340 : /* And now we can check whether the operator is provided. */
1341 8 : catlist = SearchSysCacheList1(AMOPSTRATEGY,
1342 : ObjectIdGetDatum(opfamily));
1343 :
1344 8 : *res = false;
1345 :
1346 68 : for (i = 0; i < catlist->n_members; i++)
1347 : {
1348 64 : HeapTuple amoptup = &catlist->members[i]->tuple;
1349 64 : Form_pg_amop amopform = (Form_pg_amop) GETSTRUCT(amoptup);
1350 :
1351 64 : if (amopform->amoppurpose == AMOP_ORDER &&
1352 4 : (amopform->amoplefttype == opcintype ||
1353 4 : amopform->amoprighttype == opcintype) &&
1354 4 : opfamily_can_sort_type(amopform->amopsortfamily,
1355 : get_op_rettype(amopform->amopopr)))
1356 : {
1357 4 : *res = true;
1358 4 : break;
1359 : }
1360 : }
1361 :
1362 8 : ReleaseSysCacheList(catlist);
1363 :
1364 8 : *isnull = false;
1365 :
1366 8 : return true;
1367 : }
|