Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * dynahash.c
4 : * dynamic chained hash tables
5 : *
6 : * dynahash.c supports both local-to-a-backend hash tables and hash tables in
7 : * shared memory. For shared hash tables, it is the caller's responsibility
8 : * to provide appropriate access interlocking. The simplest convention is
9 : * that a single LWLock protects the whole hash table. Searches (HASH_FIND or
10 : * hash_seq_search) need only shared lock, but any update requires exclusive
11 : * lock. For heavily-used shared tables, the single-lock approach creates a
12 : * concurrency bottleneck, so we also support "partitioned" locking wherein
13 : * there are multiple LWLocks guarding distinct subsets of the table. To use
14 : * a hash table in partitioned mode, the HASH_PARTITION flag must be given
15 : * to hash_create. This prevents any attempt to split buckets on-the-fly.
16 : * Therefore, each hash bucket chain operates independently, and no fields
17 : * of the hash header change after init except nentries and freeList.
18 : * (A partitioned table uses multiple copies of those fields, guarded by
19 : * spinlocks, for additional concurrency.)
20 : * This lets any subset of the hash buckets be treated as a separately
21 : * lockable partition. We expect callers to use the low-order bits of a
22 : * lookup key's hash value as a partition number --- this will work because
23 : * of the way calc_bucket() maps hash values to bucket numbers.
24 : *
25 : * For hash tables in shared memory, the memory allocator function should
26 : * match malloc's semantics of returning NULL on failure. For hash tables
27 : * in local memory, we typically use palloc() which will throw error on
28 : * failure. The code in this file has to cope with both cases.
29 : *
30 : * dynahash.c provides support for these types of lookup keys:
31 : *
32 : * 1. Null-terminated C strings (truncated if necessary to fit in keysize),
33 : * compared as though by strcmp(). This is selected by specifying the
34 : * HASH_STRINGS flag to hash_create.
35 : *
36 : * 2. Arbitrary binary data of size keysize, compared as though by memcmp().
37 : * (Caller must ensure there are no undefined padding bits in the keys!)
38 : * This is selected by specifying the HASH_BLOBS flag to hash_create.
39 : *
40 : * 3. More complex key behavior can be selected by specifying user-supplied
41 : * hashing, comparison, and/or key-copying functions. At least a hashing
42 : * function must be supplied; comparison defaults to memcmp() and key copying
43 : * to memcpy() when a user-defined hashing function is selected.
44 : *
45 : * Compared to simplehash, dynahash has the following benefits:
46 : *
47 : * - It supports partitioning, which is useful for shared memory access using
48 : * locks.
49 : * - Shared memory hashes are allocated in a fixed size area at startup and
50 : * are discoverable by name from other processes.
51 : * - Because entries don't need to be moved in the case of hash conflicts,
52 : * dynahash has better performance for large entries.
53 : * - Guarantees stable pointers to entries.
54 : *
55 : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
56 : * Portions Copyright (c) 1994, Regents of the University of California
57 : *
58 : *
59 : * IDENTIFICATION
60 : * src/backend/utils/hash/dynahash.c
61 : *
62 : *-------------------------------------------------------------------------
63 : */
64 :
65 : /*
66 : * Original comments:
67 : *
68 : * Dynamic hashing, after CACM April 1988 pp 446-457, by Per-Ake Larson.
69 : * Coded into C, with minor code improvements, and with hsearch(3) interface,
70 : * by ejp@ausmelb.oz, Jul 26, 1988: 13:16;
71 : * also, hcreate/hdestroy routines added to simulate hsearch(3).
72 : *
73 : * These routines simulate hsearch(3) and family, with the important
74 : * difference that the hash table is dynamic - can grow indefinitely
75 : * beyond its original size (as supplied to hcreate()).
76 : *
77 : * Performance appears to be comparable to that of hsearch(3).
78 : * The 'source-code' options referred to in hsearch(3)'s 'man' page
79 : * are not implemented; otherwise functionality is identical.
80 : *
81 : * Compilation controls:
82 : * HASH_DEBUG controls some informative traces, mainly for debugging.
83 : * HASH_STATISTICS causes HashAccesses and HashCollisions to be maintained;
84 : * when combined with HASH_DEBUG, these are displayed by hdestroy().
85 : *
86 : * Problems & fixes to ejp@ausmelb.oz. WARNING: relies on pre-processor
87 : * concatenation property, in probably unnecessary code 'optimization'.
88 : *
89 : * Modified margo@postgres.berkeley.edu February 1990
90 : * added multiple table interface
91 : * Modified by sullivan@postgres.berkeley.edu April 1990
92 : * changed ctl structure for shared memory
93 : */
94 :
95 : #include "postgres.h"
96 :
97 : #include <limits.h>
98 :
99 : #include "access/xact.h"
100 : #include "common/hashfn.h"
101 : #include "port/pg_bitutils.h"
102 : #include "storage/shmem.h"
103 : #include "storage/spin.h"
104 : #include "utils/dynahash.h"
105 : #include "utils/memutils.h"
106 :
107 :
108 : /*
109 : * Constants
110 : *
111 : * A hash table has a top-level "directory", each of whose entries points
112 : * to a "segment" of ssize bucket headers. The maximum number of hash
113 : * buckets is thus dsize * ssize (but dsize may be expansible). Of course,
114 : * the number of records in the table can be larger, but we don't want a
115 : * whole lot of records per bucket or performance goes down.
116 : *
117 : * In a hash table allocated in shared memory, the directory cannot be
118 : * expanded because it must stay at a fixed address. The directory size
119 : * should be selected using hash_select_dirsize (and you'd better have
120 : * a good idea of the maximum number of entries!). For non-shared hash
121 : * tables, the initial directory size can be left at the default.
122 : */
123 : #define DEF_SEGSIZE 256
124 : #define DEF_SEGSIZE_SHIFT 8 /* must be log2(DEF_SEGSIZE) */
125 : #define DEF_DIRSIZE 256
126 :
127 : /* Number of freelists to be used for a partitioned hash table. */
128 : #define NUM_FREELISTS 32
129 :
130 : /* A hash bucket is a linked list of HASHELEMENTs */
131 : typedef HASHELEMENT *HASHBUCKET;
132 :
133 : /* A hash segment is an array of bucket headers */
134 : typedef HASHBUCKET *HASHSEGMENT;
135 :
136 : /*
137 : * Per-freelist data.
138 : *
139 : * In a partitioned hash table, each freelist is associated with a specific
140 : * set of hashcodes, as determined by the FREELIST_IDX() macro below.
141 : * nentries tracks the number of live hashtable entries having those hashcodes
142 : * (NOT the number of entries in the freelist, as you might expect).
143 : *
144 : * The coverage of a freelist might be more or less than one partition, so it
145 : * needs its own lock rather than relying on caller locking. Relying on that
146 : * wouldn't work even if the coverage was the same, because of the occasional
147 : * need to "borrow" entries from another freelist; see get_hash_entry().
148 : *
149 : * Using an array of FreeListData instead of separate arrays of mutexes,
150 : * nentries and freeLists helps to reduce sharing of cache lines between
151 : * different mutexes.
152 : */
153 : typedef struct
154 : {
155 : slock_t mutex; /* spinlock for this freelist */
156 : long nentries; /* number of entries in associated buckets */
157 : HASHELEMENT *freeList; /* chain of free elements */
158 : } FreeListData;
159 :
160 : /*
161 : * Header structure for a hash table --- contains all changeable info
162 : *
163 : * In a shared-memory hash table, the HASHHDR is in shared memory, while
164 : * each backend has a local HTAB struct. For a non-shared table, there isn't
165 : * any functional difference between HASHHDR and HTAB, but we separate them
166 : * anyway to share code between shared and non-shared tables.
167 : */
168 : struct HASHHDR
169 : {
170 : /*
171 : * The freelist can become a point of contention in high-concurrency hash
172 : * tables, so we use an array of freelists, each with its own mutex and
173 : * nentries count, instead of just a single one. Although the freelists
174 : * normally operate independently, we will scavenge entries from freelists
175 : * other than a hashcode's default freelist when necessary.
176 : *
177 : * If the hash table is not partitioned, only freeList[0] is used and its
178 : * spinlock is not used at all; callers' locking is assumed sufficient.
179 : */
180 : FreeListData freeList[NUM_FREELISTS];
181 :
182 : /* These fields can change, but not in a partitioned table */
183 : /* Also, dsize can't change in a shared table, even if unpartitioned */
184 : long dsize; /* directory size */
185 : long nsegs; /* number of allocated segments (<= dsize) */
186 : uint32 max_bucket; /* ID of maximum bucket in use */
187 : uint32 high_mask; /* mask to modulo into entire table */
188 : uint32 low_mask; /* mask to modulo into lower half of table */
189 :
190 : /* These fields are fixed at hashtable creation */
191 : Size keysize; /* hash key length in bytes */
192 : Size entrysize; /* total user element size in bytes */
193 : long num_partitions; /* # partitions (must be power of 2), or 0 */
194 : long max_dsize; /* 'dsize' limit if directory is fixed size */
195 : long ssize; /* segment size --- must be power of 2 */
196 : int sshift; /* segment shift = log2(ssize) */
197 : int nelem_alloc; /* number of entries to allocate at once */
198 :
199 : #ifdef HASH_STATISTICS
200 :
201 : /*
202 : * Count statistics here. NB: stats code doesn't bother with mutex, so
203 : * counts could be corrupted a bit in a partitioned table.
204 : */
205 : long accesses;
206 : long collisions;
207 : #endif
208 : };
209 :
210 : #define IS_PARTITIONED(hctl) ((hctl)->num_partitions != 0)
211 :
212 : #define FREELIST_IDX(hctl, hashcode) \
213 : (IS_PARTITIONED(hctl) ? (hashcode) % NUM_FREELISTS : 0)
214 :
215 : /*
216 : * Top control structure for a hashtable --- in a shared table, each backend
217 : * has its own copy (OK since no fields change at runtime)
218 : */
219 : struct HTAB
220 : {
221 : HASHHDR *hctl; /* => shared control information */
222 : HASHSEGMENT *dir; /* directory of segment starts */
223 : HashValueFunc hash; /* hash function */
224 : HashCompareFunc match; /* key comparison function */
225 : HashCopyFunc keycopy; /* key copying function */
226 : HashAllocFunc alloc; /* memory allocator */
227 : MemoryContext hcxt; /* memory context if default allocator used */
228 : char *tabname; /* table name (for error messages) */
229 : bool isshared; /* true if table is in shared memory */
230 : bool isfixed; /* if true, don't enlarge */
231 :
232 : /* freezing a shared table isn't allowed, so we can keep state here */
233 : bool frozen; /* true = no more inserts allowed */
234 :
235 : /* We keep local copies of these fixed values to reduce contention */
236 : Size keysize; /* hash key length in bytes */
237 : long ssize; /* segment size --- must be power of 2 */
238 : int sshift; /* segment shift = log2(ssize) */
239 : };
240 :
241 : /*
242 : * Key (also entry) part of a HASHELEMENT
243 : */
244 : #define ELEMENTKEY(helem) (((char *)(helem)) + MAXALIGN(sizeof(HASHELEMENT)))
245 :
246 : /*
247 : * Obtain element pointer given pointer to key
248 : */
249 : #define ELEMENT_FROM_KEY(key) \
250 : ((HASHELEMENT *) (((char *) (key)) - MAXALIGN(sizeof(HASHELEMENT))))
251 :
252 : /*
253 : * Fast MOD arithmetic, assuming that y is a power of 2 !
254 : */
255 : #define MOD(x,y) ((x) & ((y)-1))
256 :
257 : #ifdef HASH_STATISTICS
258 : static long hash_accesses,
259 : hash_collisions,
260 : hash_expansions;
261 : #endif
262 :
263 : /*
264 : * Private function prototypes
265 : */
266 : static void *DynaHashAlloc(Size size);
267 : static HASHSEGMENT seg_alloc(HTAB *hashp);
268 : static bool element_alloc(HTAB *hashp, int nelem, int freelist_idx);
269 : static bool dir_realloc(HTAB *hashp);
270 : static bool expand_table(HTAB *hashp);
271 : static HASHBUCKET get_hash_entry(HTAB *hashp, int freelist_idx);
272 : static void hdefault(HTAB *hashp);
273 : static int choose_nelem_alloc(Size entrysize);
274 : static bool init_htab(HTAB *hashp, long nelem);
275 : pg_noreturn static void hash_corrupted(HTAB *hashp);
276 : static uint32 hash_initial_lookup(HTAB *hashp, uint32 hashvalue,
277 : HASHBUCKET **bucketptr);
278 : static long next_pow2_long(long num);
279 : static int next_pow2_int(long num);
280 : static void register_seq_scan(HTAB *hashp);
281 : static void deregister_seq_scan(HTAB *hashp);
282 : static bool has_seq_scans(HTAB *hashp);
283 :
284 :
285 : /*
286 : * memory allocation support
287 : */
288 : static MemoryContext CurrentDynaHashCxt = NULL;
289 :
290 : static void *
291 2343030 : DynaHashAlloc(Size size)
292 : {
293 : Assert(MemoryContextIsValid(CurrentDynaHashCxt));
294 2343030 : return MemoryContextAllocExtended(CurrentDynaHashCxt, size,
295 : MCXT_ALLOC_NO_OOM);
296 : }
297 :
298 :
299 : /*
300 : * HashCompareFunc for string keys
301 : *
302 : * Because we copy keys with strlcpy(), they will be truncated at keysize-1
303 : * bytes, so we can only compare that many ... hence strncmp is almost but
304 : * not quite the right thing.
305 : */
306 : static int
307 1022824 : string_compare(const char *key1, const char *key2, Size keysize)
308 : {
309 1022824 : return strncmp(key1, key2, keysize - 1);
310 : }
311 :
312 :
313 : /************************** CREATE ROUTINES **********************/
314 :
315 : /*
316 : * hash_create -- create a new dynamic hash table
317 : *
318 : * tabname: a name for the table (for debugging purposes)
319 : * nelem: maximum number of elements expected
320 : * *info: additional table parameters, as indicated by flags
321 : * flags: bitmask indicating which parameters to take from *info
322 : *
323 : * The flags value *must* include HASH_ELEM. (Formerly, this was nominally
324 : * optional, but the default keysize and entrysize values were useless.)
325 : * The flags value must also include exactly one of HASH_STRINGS, HASH_BLOBS,
326 : * or HASH_FUNCTION, to define the key hashing semantics (C strings,
327 : * binary blobs, or custom, respectively). Callers specifying a custom
328 : * hash function will likely also want to use HASH_COMPARE, and perhaps
329 : * also HASH_KEYCOPY, to control key comparison and copying.
330 : * Another often-used flag is HASH_CONTEXT, to allocate the hash table
331 : * under info->hcxt rather than under TopMemoryContext; the default
332 : * behavior is only suitable for session-lifespan hash tables.
333 : * Other flags bits are special-purpose and seldom used, except for those
334 : * associated with shared-memory hash tables, for which see ShmemInitHash().
335 : *
336 : * Fields in *info are read only when the associated flags bit is set.
337 : * It is not necessary to initialize other fields of *info.
338 : * Neither tabname nor *info need persist after the hash_create() call.
339 : *
340 : * Note: It is deprecated for callers of hash_create() to explicitly specify
341 : * string_hash, tag_hash, uint32_hash, or oid_hash. Just set HASH_STRINGS or
342 : * HASH_BLOBS. Use HASH_FUNCTION only when you want something other than
343 : * one of these.
344 : *
345 : * Note: for a shared-memory hashtable, nelem needs to be a pretty good
346 : * estimate, since we can't expand the table on the fly. But an unshared
347 : * hashtable can be expanded on-the-fly, so it's better for nelem to be
348 : * on the small side and let the table grow if it's exceeded. An overly
349 : * large nelem will penalize hash_seq_search speed without buying much.
350 : */
351 : HTAB *
352 539520 : hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
353 : {
354 : HTAB *hashp;
355 : HASHHDR *hctl;
356 :
357 : /*
358 : * Hash tables now allocate space for key and data, but you have to say
359 : * how much space to allocate.
360 : */
361 : Assert(flags & HASH_ELEM);
362 : Assert(info->keysize > 0);
363 : Assert(info->entrysize >= info->keysize);
364 :
365 : /*
366 : * For shared hash tables, we have a local hash header (HTAB struct) that
367 : * we allocate in TopMemoryContext; all else is in shared memory.
368 : *
369 : * For non-shared hash tables, everything including the hash header is in
370 : * a memory context created specially for the hash table --- this makes
371 : * hash_destroy very simple. The memory context is made a child of either
372 : * a context specified by the caller, or TopMemoryContext if nothing is
373 : * specified.
374 : */
375 539520 : if (flags & HASH_SHARED_MEM)
376 : {
377 : /* Set up to allocate the hash header */
378 18914 : CurrentDynaHashCxt = TopMemoryContext;
379 : }
380 : else
381 : {
382 : /* Create the hash table's private memory context */
383 520606 : if (flags & HASH_CONTEXT)
384 232360 : CurrentDynaHashCxt = info->hcxt;
385 : else
386 288246 : CurrentDynaHashCxt = TopMemoryContext;
387 520606 : CurrentDynaHashCxt = AllocSetContextCreate(CurrentDynaHashCxt,
388 : "dynahash",
389 : ALLOCSET_DEFAULT_SIZES);
390 : }
391 :
392 : /* Initialize the hash header, plus a copy of the table name */
393 539520 : hashp = (HTAB *) MemoryContextAlloc(CurrentDynaHashCxt,
394 539520 : sizeof(HTAB) + strlen(tabname) + 1);
395 7013760 : MemSet(hashp, 0, sizeof(HTAB));
396 :
397 539520 : hashp->tabname = (char *) (hashp + 1);
398 539520 : strcpy(hashp->tabname, tabname);
399 :
400 : /* If we have a private context, label it with hashtable's name */
401 539520 : if (!(flags & HASH_SHARED_MEM))
402 520606 : MemoryContextSetIdentifier(CurrentDynaHashCxt, hashp->tabname);
403 :
404 : /*
405 : * Select the appropriate hash function (see comments at head of file).
406 : */
407 539520 : if (flags & HASH_FUNCTION)
408 : {
409 : Assert(!(flags & (HASH_BLOBS | HASH_STRINGS)));
410 27124 : hashp->hash = info->hash;
411 : }
412 512396 : else if (flags & HASH_BLOBS)
413 : {
414 : Assert(!(flags & HASH_STRINGS));
415 : /* We can optimize hashing for common key sizes */
416 406464 : if (info->keysize == sizeof(uint32))
417 235940 : hashp->hash = uint32_hash;
418 : else
419 170524 : hashp->hash = tag_hash;
420 : }
421 : else
422 : {
423 : /*
424 : * string_hash used to be considered the default hash method, and in a
425 : * non-assert build it effectively still is. But we now consider it
426 : * an assertion error to not say HASH_STRINGS explicitly. To help
427 : * catch mistaken usage of HASH_STRINGS, we also insist on a
428 : * reasonably long string length: if the keysize is only 4 or 8 bytes,
429 : * it's almost certainly an integer or pointer not a string.
430 : */
431 : Assert(flags & HASH_STRINGS);
432 : Assert(info->keysize > 8);
433 :
434 105932 : hashp->hash = string_hash;
435 : }
436 :
437 : /*
438 : * If you don't specify a match function, it defaults to string_compare if
439 : * you used string_hash, and to memcmp otherwise.
440 : *
441 : * Note: explicitly specifying string_hash is deprecated, because this
442 : * might not work for callers in loadable modules on some platforms due to
443 : * referencing a trampoline instead of the string_hash function proper.
444 : * Specify HASH_STRINGS instead.
445 : */
446 539520 : if (flags & HASH_COMPARE)
447 14282 : hashp->match = info->match;
448 525238 : else if (hashp->hash == string_hash)
449 105932 : hashp->match = (HashCompareFunc) string_compare;
450 : else
451 419306 : hashp->match = memcmp;
452 :
453 : /*
454 : * Similarly, the key-copying function defaults to strlcpy or memcpy.
455 : */
456 539520 : if (flags & HASH_KEYCOPY)
457 0 : hashp->keycopy = info->keycopy;
458 539520 : else if (hashp->hash == string_hash)
459 : {
460 : /*
461 : * The signature of keycopy is meant for memcpy(), which returns
462 : * void*, but strlcpy() returns size_t. Since we never use the return
463 : * value of keycopy, and size_t is pretty much always the same size as
464 : * void *, this should be safe. The extra cast in the middle is to
465 : * avoid warnings from -Wcast-function-type.
466 : */
467 105932 : hashp->keycopy = (HashCopyFunc) (pg_funcptr_t) strlcpy;
468 : }
469 : else
470 433588 : hashp->keycopy = memcpy;
471 :
472 : /* And select the entry allocation function, too. */
473 539520 : if (flags & HASH_ALLOC)
474 18914 : hashp->alloc = info->alloc;
475 : else
476 520606 : hashp->alloc = DynaHashAlloc;
477 :
478 539520 : if (flags & HASH_SHARED_MEM)
479 : {
480 : /*
481 : * ctl structure and directory are preallocated for shared memory
482 : * tables. Note that HASH_DIRSIZE and HASH_ALLOC had better be set as
483 : * well.
484 : */
485 18914 : hashp->hctl = info->hctl;
486 18914 : hashp->dir = (HASHSEGMENT *) (((char *) info->hctl) + sizeof(HASHHDR));
487 18914 : hashp->hcxt = NULL;
488 18914 : hashp->isshared = true;
489 :
490 : /* hash table already exists, we're just attaching to it */
491 18914 : if (flags & HASH_ATTACH)
492 : {
493 : /* make local copies of some heavily-used values */
494 0 : hctl = hashp->hctl;
495 0 : hashp->keysize = hctl->keysize;
496 0 : hashp->ssize = hctl->ssize;
497 0 : hashp->sshift = hctl->sshift;
498 :
499 0 : return hashp;
500 : }
501 : }
502 : else
503 : {
504 : /* setup hash table defaults */
505 520606 : hashp->hctl = NULL;
506 520606 : hashp->dir = NULL;
507 520606 : hashp->hcxt = CurrentDynaHashCxt;
508 520606 : hashp->isshared = false;
509 : }
510 :
511 539520 : if (!hashp->hctl)
512 : {
513 520606 : hashp->hctl = (HASHHDR *) hashp->alloc(sizeof(HASHHDR));
514 520606 : if (!hashp->hctl)
515 0 : ereport(ERROR,
516 : (errcode(ERRCODE_OUT_OF_MEMORY),
517 : errmsg("out of memory")));
518 : }
519 :
520 539520 : hashp->frozen = false;
521 :
522 539520 : hdefault(hashp);
523 :
524 539520 : hctl = hashp->hctl;
525 :
526 539520 : if (flags & HASH_PARTITION)
527 : {
528 : /* Doesn't make sense to partition a local hash table */
529 : Assert(flags & HASH_SHARED_MEM);
530 :
531 : /*
532 : * The number of partitions had better be a power of 2. Also, it must
533 : * be less than INT_MAX (see init_htab()), so call the int version of
534 : * next_pow2.
535 : */
536 : Assert(info->num_partitions == next_pow2_int(info->num_partitions));
537 :
538 10500 : hctl->num_partitions = info->num_partitions;
539 : }
540 :
541 539520 : if (flags & HASH_SEGMENT)
542 : {
543 0 : hctl->ssize = info->ssize;
544 0 : hctl->sshift = my_log2(info->ssize);
545 : /* ssize had better be a power of 2 */
546 : Assert(hctl->ssize == (1L << hctl->sshift));
547 : }
548 :
549 : /*
550 : * SHM hash tables have fixed directory size passed by the caller.
551 : */
552 539520 : if (flags & HASH_DIRSIZE)
553 : {
554 18914 : hctl->max_dsize = info->max_dsize;
555 18914 : hctl->dsize = info->dsize;
556 : }
557 :
558 : /* remember the entry sizes, too */
559 539520 : hctl->keysize = info->keysize;
560 539520 : hctl->entrysize = info->entrysize;
561 :
562 : /* make local copies of heavily-used constant fields */
563 539520 : hashp->keysize = hctl->keysize;
564 539520 : hashp->ssize = hctl->ssize;
565 539520 : hashp->sshift = hctl->sshift;
566 :
567 : /* Build the hash directory structure */
568 539520 : if (!init_htab(hashp, nelem))
569 0 : elog(ERROR, "failed to initialize hash table \"%s\"", hashp->tabname);
570 :
571 : /*
572 : * For a shared hash table, preallocate the requested number of elements.
573 : * This reduces problems with run-time out-of-shared-memory conditions.
574 : *
575 : * For a non-shared hash table, preallocate the requested number of
576 : * elements if it's less than our chosen nelem_alloc. This avoids wasting
577 : * space if the caller correctly estimates a small table size.
578 : */
579 539520 : if ((flags & HASH_SHARED_MEM) ||
580 520606 : nelem < hctl->nelem_alloc)
581 : {
582 : int i,
583 : freelist_partitions,
584 : nelem_alloc,
585 : nelem_alloc_first;
586 :
587 : /*
588 : * If hash table is partitioned, give each freelist an equal share of
589 : * the initial allocation. Otherwise only freeList[0] is used.
590 : */
591 259652 : if (IS_PARTITIONED(hashp->hctl))
592 10500 : freelist_partitions = NUM_FREELISTS;
593 : else
594 249152 : freelist_partitions = 1;
595 :
596 259652 : nelem_alloc = nelem / freelist_partitions;
597 259652 : if (nelem_alloc <= 0)
598 0 : nelem_alloc = 1;
599 :
600 : /*
601 : * Make sure we'll allocate all the requested elements; freeList[0]
602 : * gets the excess if the request isn't divisible by NUM_FREELISTS.
603 : */
604 259652 : if (nelem_alloc * freelist_partitions < nelem)
605 102 : nelem_alloc_first =
606 102 : nelem - nelem_alloc * (freelist_partitions - 1);
607 : else
608 259550 : nelem_alloc_first = nelem_alloc;
609 :
610 844804 : for (i = 0; i < freelist_partitions; i++)
611 : {
612 585152 : int temp = (i == 0) ? nelem_alloc_first : nelem_alloc;
613 :
614 585152 : if (!element_alloc(hashp, temp, i))
615 0 : ereport(ERROR,
616 : (errcode(ERRCODE_OUT_OF_MEMORY),
617 : errmsg("out of memory")));
618 : }
619 : }
620 :
621 539520 : if (flags & HASH_FIXED_SIZE)
622 6300 : hashp->isfixed = true;
623 539520 : return hashp;
624 : }
625 :
626 : /*
627 : * Set default HASHHDR parameters.
628 : */
629 : static void
630 539520 : hdefault(HTAB *hashp)
631 : {
632 539520 : HASHHDR *hctl = hashp->hctl;
633 :
634 57728640 : MemSet(hctl, 0, sizeof(HASHHDR));
635 :
636 539520 : hctl->dsize = DEF_DIRSIZE;
637 539520 : hctl->nsegs = 0;
638 :
639 539520 : hctl->num_partitions = 0; /* not partitioned */
640 :
641 : /* table has no fixed maximum size */
642 539520 : hctl->max_dsize = NO_MAX_DSIZE;
643 :
644 539520 : hctl->ssize = DEF_SEGSIZE;
645 539520 : hctl->sshift = DEF_SEGSIZE_SHIFT;
646 :
647 : #ifdef HASH_STATISTICS
648 : hctl->accesses = hctl->collisions = 0;
649 : #endif
650 539520 : }
651 :
652 : /*
653 : * Given the user-specified entry size, choose nelem_alloc, ie, how many
654 : * elements to add to the hash table when we need more.
655 : */
656 : static int
657 574688 : choose_nelem_alloc(Size entrysize)
658 : {
659 : int nelem_alloc;
660 : Size elementSize;
661 : Size allocSize;
662 :
663 : /* Each element has a HASHELEMENT header plus user data. */
664 : /* NB: this had better match element_alloc() */
665 574688 : elementSize = MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(entrysize);
666 :
667 : /*
668 : * The idea here is to choose nelem_alloc at least 32, but round up so
669 : * that the allocation request will be a power of 2 or just less. This
670 : * makes little difference for hash tables in shared memory, but for hash
671 : * tables managed by palloc, the allocation request will be rounded up to
672 : * a power of 2 anyway. If we fail to take this into account, we'll waste
673 : * as much as half the allocated space.
674 : */
675 574688 : allocSize = 32 * 4; /* assume elementSize at least 8 */
676 : do
677 : {
678 2345312 : allocSize <<= 1;
679 2345312 : nelem_alloc = allocSize / elementSize;
680 2345312 : } while (nelem_alloc < 32);
681 :
682 574688 : return nelem_alloc;
683 : }
684 :
685 : /*
686 : * Compute derived fields of hctl and build the initial directory/segment
687 : * arrays
688 : */
689 : static bool
690 539520 : init_htab(HTAB *hashp, long nelem)
691 : {
692 539520 : HASHHDR *hctl = hashp->hctl;
693 : HASHSEGMENT *segp;
694 : int nbuckets;
695 : int nsegs;
696 : int i;
697 :
698 : /*
699 : * initialize mutexes if it's a partitioned table
700 : */
701 539520 : if (IS_PARTITIONED(hctl))
702 346500 : for (i = 0; i < NUM_FREELISTS; i++)
703 336000 : SpinLockInit(&(hctl->freeList[i].mutex));
704 :
705 : /*
706 : * Allocate space for the next greater power of two number of buckets,
707 : * assuming a desired maximum load factor of 1.
708 : */
709 539520 : nbuckets = next_pow2_int(nelem);
710 :
711 : /*
712 : * In a partitioned table, nbuckets must be at least equal to
713 : * num_partitions; were it less, keys with apparently different partition
714 : * numbers would map to the same bucket, breaking partition independence.
715 : * (Normally nbuckets will be much bigger; this is just a safety check.)
716 : */
717 539520 : while (nbuckets < hctl->num_partitions)
718 0 : nbuckets <<= 1;
719 :
720 539520 : hctl->max_bucket = hctl->low_mask = nbuckets - 1;
721 539520 : hctl->high_mask = (nbuckets << 1) - 1;
722 :
723 : /*
724 : * Figure number of directory segments needed, round up to a power of 2
725 : */
726 539520 : nsegs = (nbuckets - 1) / hctl->ssize + 1;
727 539520 : nsegs = next_pow2_int(nsegs);
728 :
729 : /*
730 : * Make sure directory is big enough. If pre-allocated directory is too
731 : * small, choke (caller screwed up).
732 : */
733 539520 : if (nsegs > hctl->dsize)
734 : {
735 0 : if (!(hashp->dir))
736 0 : hctl->dsize = nsegs;
737 : else
738 0 : return false;
739 : }
740 :
741 : /* Allocate a directory */
742 539520 : if (!(hashp->dir))
743 : {
744 520606 : CurrentDynaHashCxt = hashp->hcxt;
745 520606 : hashp->dir = (HASHSEGMENT *)
746 520606 : hashp->alloc(hctl->dsize * sizeof(HASHSEGMENT));
747 520606 : if (!hashp->dir)
748 0 : return false;
749 : }
750 :
751 : /* Allocate initial segments */
752 1667194 : for (segp = hashp->dir; hctl->nsegs < nsegs; hctl->nsegs++, segp++)
753 : {
754 1127674 : *segp = seg_alloc(hashp);
755 1127674 : if (*segp == NULL)
756 0 : return false;
757 : }
758 :
759 : /* Choose number of entries to allocate at a time */
760 539520 : hctl->nelem_alloc = choose_nelem_alloc(hctl->entrysize);
761 :
762 : #ifdef HASH_DEBUG
763 : fprintf(stderr, "init_htab:\n%s%p\n%s%ld\n%s%ld\n%s%d\n%s%ld\n%s%u\n%s%x\n%s%x\n%s%ld\n",
764 : "TABLE POINTER ", hashp,
765 : "DIRECTORY SIZE ", hctl->dsize,
766 : "SEGMENT SIZE ", hctl->ssize,
767 : "SEGMENT SHIFT ", hctl->sshift,
768 : "MAX BUCKET ", hctl->max_bucket,
769 : "HIGH MASK ", hctl->high_mask,
770 : "LOW MASK ", hctl->low_mask,
771 : "NSEGS ", hctl->nsegs);
772 : #endif
773 539520 : return true;
774 : }
775 :
776 : /*
777 : * Estimate the space needed for a hashtable containing the given number
778 : * of entries of given size.
779 : * NOTE: this is used to estimate the footprint of hashtables in shared
780 : * memory; therefore it does not count HTAB which is in local memory.
781 : * NB: assumes that all hash structure parameters have default values!
782 : */
783 : Size
784 35168 : hash_estimate_size(long num_entries, Size entrysize)
785 : {
786 : Size size;
787 : long nBuckets,
788 : nSegments,
789 : nDirEntries,
790 : nElementAllocs,
791 : elementSize,
792 : elementAllocCnt;
793 :
794 : /* estimate number of buckets wanted */
795 35168 : nBuckets = next_pow2_long(num_entries);
796 : /* # of segments needed for nBuckets */
797 35168 : nSegments = next_pow2_long((nBuckets - 1) / DEF_SEGSIZE + 1);
798 : /* directory entries */
799 35168 : nDirEntries = DEF_DIRSIZE;
800 35168 : while (nDirEntries < nSegments)
801 0 : nDirEntries <<= 1; /* dir_alloc doubles dsize at each call */
802 :
803 : /* fixed control info */
804 35168 : size = MAXALIGN(sizeof(HASHHDR)); /* but not HTAB, per above */
805 : /* directory */
806 35168 : size = add_size(size, mul_size(nDirEntries, sizeof(HASHSEGMENT)));
807 : /* segments */
808 35168 : size = add_size(size, mul_size(nSegments,
809 : MAXALIGN(DEF_SEGSIZE * sizeof(HASHBUCKET))));
810 : /* elements --- allocated in groups of choose_nelem_alloc() entries */
811 35168 : elementAllocCnt = choose_nelem_alloc(entrysize);
812 35168 : nElementAllocs = (num_entries - 1) / elementAllocCnt + 1;
813 35168 : elementSize = MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(entrysize);
814 35168 : size = add_size(size,
815 : mul_size(nElementAllocs,
816 : mul_size(elementAllocCnt, elementSize)));
817 :
818 35168 : return size;
819 : }
820 :
821 : /*
822 : * Select an appropriate directory size for a hashtable with the given
823 : * maximum number of entries.
824 : * This is only needed for hashtables in shared memory, whose directories
825 : * cannot be expanded dynamically.
826 : * NB: assumes that all hash structure parameters have default values!
827 : *
828 : * XXX this had better agree with the behavior of init_htab()...
829 : */
830 : long
831 18914 : hash_select_dirsize(long num_entries)
832 : {
833 : long nBuckets,
834 : nSegments,
835 : nDirEntries;
836 :
837 : /* estimate number of buckets wanted */
838 18914 : nBuckets = next_pow2_long(num_entries);
839 : /* # of segments needed for nBuckets */
840 18914 : nSegments = next_pow2_long((nBuckets - 1) / DEF_SEGSIZE + 1);
841 : /* directory entries */
842 18914 : nDirEntries = DEF_DIRSIZE;
843 18914 : while (nDirEntries < nSegments)
844 0 : nDirEntries <<= 1; /* dir_alloc doubles dsize at each call */
845 :
846 18914 : return nDirEntries;
847 : }
848 :
849 : /*
850 : * Compute the required initial memory allocation for a shared-memory
851 : * hashtable with the given parameters. We need space for the HASHHDR
852 : * and for the (non expansible) directory.
853 : */
854 : Size
855 18914 : hash_get_shared_size(HASHCTL *info, int flags)
856 : {
857 : Assert(flags & HASH_DIRSIZE);
858 : Assert(info->dsize == info->max_dsize);
859 18914 : return sizeof(HASHHDR) + info->dsize * sizeof(HASHSEGMENT);
860 : }
861 :
862 :
863 : /********************** DESTROY ROUTINES ************************/
864 :
865 : void
866 132558 : hash_destroy(HTAB *hashp)
867 : {
868 132558 : if (hashp != NULL)
869 : {
870 : /* allocation method must be one we know how to free, too */
871 : Assert(hashp->alloc == DynaHashAlloc);
872 : /* so this hashtable must have its own context */
873 : Assert(hashp->hcxt != NULL);
874 :
875 132558 : hash_stats("destroy", hashp);
876 :
877 : /*
878 : * Free everything by destroying the hash table's memory context.
879 : */
880 132558 : MemoryContextDelete(hashp->hcxt);
881 : }
882 132558 : }
883 :
884 : void
885 132558 : hash_stats(const char *where, HTAB *hashp)
886 : {
887 : #ifdef HASH_STATISTICS
888 : fprintf(stderr, "%s: this HTAB -- accesses %ld collisions %ld\n",
889 : where, hashp->hctl->accesses, hashp->hctl->collisions);
890 :
891 : fprintf(stderr, "hash_stats: entries %ld keysize %ld maxp %u segmentcount %ld\n",
892 : hash_get_num_entries(hashp), (long) hashp->hctl->keysize,
893 : hashp->hctl->max_bucket, hashp->hctl->nsegs);
894 : fprintf(stderr, "%s: total accesses %ld total collisions %ld\n",
895 : where, hash_accesses, hash_collisions);
896 : fprintf(stderr, "hash_stats: total expansions %ld\n",
897 : hash_expansions);
898 : #endif
899 132558 : }
900 :
901 : /*******************************SEARCH ROUTINES *****************************/
902 :
903 :
904 : /*
905 : * get_hash_value -- exported routine to calculate a key's hash value
906 : *
907 : * We export this because for partitioned tables, callers need to compute
908 : * the partition number (from the low-order bits of the hash value) before
909 : * searching.
910 : */
911 : uint32
912 158825940 : get_hash_value(HTAB *hashp, const void *keyPtr)
913 : {
914 158825940 : return hashp->hash(keyPtr, hashp->keysize);
915 : }
916 :
917 : /* Convert a hash value to a bucket number */
918 : static inline uint32
919 365101764 : calc_bucket(HASHHDR *hctl, uint32 hash_val)
920 : {
921 : uint32 bucket;
922 :
923 365101764 : bucket = hash_val & hctl->high_mask;
924 365101764 : if (bucket > hctl->max_bucket)
925 172217388 : bucket = bucket & hctl->low_mask;
926 :
927 365101764 : return bucket;
928 : }
929 :
930 : /*
931 : * hash_search -- look up key in table and perform action
932 : * hash_search_with_hash_value -- same, with key's hash value already computed
933 : *
934 : * action is one of:
935 : * HASH_FIND: look up key in table
936 : * HASH_ENTER: look up key in table, creating entry if not present
937 : * HASH_ENTER_NULL: same, but return NULL if out of memory
938 : * HASH_REMOVE: look up key in table, remove entry if present
939 : *
940 : * Return value is a pointer to the element found/entered/removed if any,
941 : * or NULL if no match was found. (NB: in the case of the REMOVE action,
942 : * the result is a dangling pointer that shouldn't be dereferenced!)
943 : *
944 : * HASH_ENTER will normally ereport a generic "out of memory" error if
945 : * it is unable to create a new entry. The HASH_ENTER_NULL operation is
946 : * the same except it will return NULL if out of memory.
947 : *
948 : * If foundPtr isn't NULL, then *foundPtr is set true if we found an
949 : * existing entry in the table, false otherwise. This is needed in the
950 : * HASH_ENTER case, but is redundant with the return value otherwise.
951 : *
952 : * For hash_search_with_hash_value, the hashvalue parameter must have been
953 : * calculated with get_hash_value().
954 : */
955 : void *
956 219659216 : hash_search(HTAB *hashp,
957 : const void *keyPtr,
958 : HASHACTION action,
959 : bool *foundPtr)
960 : {
961 219659216 : return hash_search_with_hash_value(hashp,
962 : keyPtr,
963 219659216 : hashp->hash(keyPtr, hashp->keysize),
964 : action,
965 : foundPtr);
966 : }
967 :
968 : void *
969 362565904 : hash_search_with_hash_value(HTAB *hashp,
970 : const void *keyPtr,
971 : uint32 hashvalue,
972 : HASHACTION action,
973 : bool *foundPtr)
974 : {
975 362565904 : HASHHDR *hctl = hashp->hctl;
976 362565904 : int freelist_idx = FREELIST_IDX(hctl, hashvalue);
977 : Size keysize;
978 : HASHBUCKET currBucket;
979 : HASHBUCKET *prevBucketPtr;
980 : HashCompareFunc match;
981 :
982 : #ifdef HASH_STATISTICS
983 : hash_accesses++;
984 : hctl->accesses++;
985 : #endif
986 :
987 : /*
988 : * If inserting, check if it is time to split a bucket.
989 : *
990 : * NOTE: failure to expand table is not a fatal error, it just means we
991 : * have to run at higher fill factor than we wanted. However, if we're
992 : * using the palloc allocator then it will throw error anyway on
993 : * out-of-memory, so we must do this before modifying the table.
994 : */
995 362565904 : if (action == HASH_ENTER || action == HASH_ENTER_NULL)
996 : {
997 : /*
998 : * Can't split if running in partitioned mode, nor if frozen, nor if
999 : * table is the subject of any active hash_seq_search scans.
1000 : */
1001 96886392 : if (hctl->freeList[0].nentries > (long) hctl->max_bucket &&
1002 781150 : !IS_PARTITIONED(hctl) && !hashp->frozen &&
1003 781150 : !has_seq_scans(hashp))
1004 781150 : (void) expand_table(hashp);
1005 : }
1006 :
1007 : /*
1008 : * Do the initial lookup
1009 : */
1010 362565904 : (void) hash_initial_lookup(hashp, hashvalue, &prevBucketPtr);
1011 362565904 : currBucket = *prevBucketPtr;
1012 :
1013 : /*
1014 : * Follow collision chain looking for matching key
1015 : */
1016 362565904 : match = hashp->match; /* save one fetch in inner loop */
1017 362565904 : keysize = hashp->keysize; /* ditto */
1018 :
1019 446370542 : while (currBucket != NULL)
1020 : {
1021 666813402 : if (currBucket->hashvalue == hashvalue &&
1022 291507122 : match(ELEMENTKEY(currBucket), keyPtr, keysize) == 0)
1023 291501642 : break;
1024 83804638 : prevBucketPtr = &(currBucket->link);
1025 83804638 : currBucket = *prevBucketPtr;
1026 : #ifdef HASH_STATISTICS
1027 : hash_collisions++;
1028 : hctl->collisions++;
1029 : #endif
1030 : }
1031 :
1032 362565904 : if (foundPtr)
1033 99409580 : *foundPtr = (bool) (currBucket != NULL);
1034 :
1035 : /*
1036 : * OK, now what?
1037 : */
1038 362565904 : switch (action)
1039 : {
1040 216106176 : case HASH_FIND:
1041 216106176 : if (currBucket != NULL)
1042 204304632 : return ELEMENTKEY(currBucket);
1043 11801544 : return NULL;
1044 :
1045 49573336 : case HASH_REMOVE:
1046 49573336 : if (currBucket != NULL)
1047 : {
1048 : /* if partitioned, must lock to touch nentries and freeList */
1049 49563908 : if (IS_PARTITIONED(hctl))
1050 10841016 : SpinLockAcquire(&(hctl->freeList[freelist_idx].mutex));
1051 :
1052 : /* delete the record from the appropriate nentries counter. */
1053 : Assert(hctl->freeList[freelist_idx].nentries > 0);
1054 49563908 : hctl->freeList[freelist_idx].nentries--;
1055 :
1056 : /* remove record from hash bucket's chain. */
1057 49563908 : *prevBucketPtr = currBucket->link;
1058 :
1059 : /* add the record to the appropriate freelist. */
1060 49563908 : currBucket->link = hctl->freeList[freelist_idx].freeList;
1061 49563908 : hctl->freeList[freelist_idx].freeList = currBucket;
1062 :
1063 49563908 : if (IS_PARTITIONED(hctl))
1064 10841016 : SpinLockRelease(&hctl->freeList[freelist_idx].mutex);
1065 :
1066 : /*
1067 : * better hope the caller is synchronizing access to this
1068 : * element, because someone else is going to reuse it the next
1069 : * time something is added to the table
1070 : */
1071 49563908 : return ELEMENTKEY(currBucket);
1072 : }
1073 9428 : return NULL;
1074 :
1075 96886392 : case HASH_ENTER:
1076 : case HASH_ENTER_NULL:
1077 : /* Return existing element if found, else create one */
1078 96886392 : if (currBucket != NULL)
1079 37633102 : return ELEMENTKEY(currBucket);
1080 :
1081 : /* disallow inserts if frozen */
1082 59253290 : if (hashp->frozen)
1083 0 : elog(ERROR, "cannot insert into frozen hashtable \"%s\"",
1084 : hashp->tabname);
1085 :
1086 59253290 : currBucket = get_hash_entry(hashp, freelist_idx);
1087 59253290 : if (currBucket == NULL)
1088 : {
1089 : /* out of memory */
1090 0 : if (action == HASH_ENTER_NULL)
1091 0 : return NULL;
1092 : /* report a generic message */
1093 0 : if (hashp->isshared)
1094 0 : ereport(ERROR,
1095 : (errcode(ERRCODE_OUT_OF_MEMORY),
1096 : errmsg("out of shared memory")));
1097 : else
1098 0 : ereport(ERROR,
1099 : (errcode(ERRCODE_OUT_OF_MEMORY),
1100 : errmsg("out of memory")));
1101 : }
1102 :
1103 : /* link into hashbucket chain */
1104 59253290 : *prevBucketPtr = currBucket;
1105 59253290 : currBucket->link = NULL;
1106 :
1107 : /* copy key into record */
1108 59253290 : currBucket->hashvalue = hashvalue;
1109 59253290 : hashp->keycopy(ELEMENTKEY(currBucket), keyPtr, keysize);
1110 :
1111 : /*
1112 : * Caller is expected to fill the data field on return. DO NOT
1113 : * insert any code that could possibly throw error here, as doing
1114 : * so would leave the table entry incomplete and hence corrupt the
1115 : * caller's data structure.
1116 : */
1117 :
1118 59253290 : return ELEMENTKEY(currBucket);
1119 : }
1120 :
1121 0 : elog(ERROR, "unrecognized hash action code: %d", (int) action);
1122 :
1123 : return NULL; /* keep compiler quiet */
1124 : }
1125 :
1126 : /*
1127 : * hash_update_hash_key -- change the hash key of an existing table entry
1128 : *
1129 : * This is equivalent to removing the entry, making a new entry, and copying
1130 : * over its data, except that the entry never goes to the table's freelist.
1131 : * Therefore this cannot suffer an out-of-memory failure, even if there are
1132 : * other processes operating in other partitions of the hashtable.
1133 : *
1134 : * Returns true if successful, false if the requested new hash key is already
1135 : * present. Throws error if the specified entry pointer isn't actually a
1136 : * table member.
1137 : *
1138 : * NB: currently, there is no special case for old and new hash keys being
1139 : * identical, which means we'll report false for that situation. This is
1140 : * preferable for existing uses.
1141 : *
1142 : * NB: for a partitioned hashtable, caller must hold lock on both relevant
1143 : * partitions, if the new hash key would belong to a different partition.
1144 : */
1145 : bool
1146 1346 : hash_update_hash_key(HTAB *hashp,
1147 : void *existingEntry,
1148 : const void *newKeyPtr)
1149 : {
1150 1346 : HASHELEMENT *existingElement = ELEMENT_FROM_KEY(existingEntry);
1151 : uint32 newhashvalue;
1152 : Size keysize;
1153 : uint32 bucket;
1154 : uint32 newbucket;
1155 : HASHBUCKET currBucket;
1156 : HASHBUCKET *prevBucketPtr;
1157 : HASHBUCKET *oldPrevPtr;
1158 : HashCompareFunc match;
1159 :
1160 : #ifdef HASH_STATISTICS
1161 : hash_accesses++;
1162 : hctl->accesses++;
1163 : #endif
1164 :
1165 : /* disallow updates if frozen */
1166 1346 : if (hashp->frozen)
1167 0 : elog(ERROR, "cannot update in frozen hashtable \"%s\"",
1168 : hashp->tabname);
1169 :
1170 : /*
1171 : * Lookup the existing element using its saved hash value. We need to do
1172 : * this to be able to unlink it from its hash chain, but as a side benefit
1173 : * we can verify the validity of the passed existingEntry pointer.
1174 : */
1175 1346 : bucket = hash_initial_lookup(hashp, existingElement->hashvalue,
1176 : &prevBucketPtr);
1177 1346 : currBucket = *prevBucketPtr;
1178 :
1179 1348 : while (currBucket != NULL)
1180 : {
1181 1348 : if (currBucket == existingElement)
1182 1346 : break;
1183 2 : prevBucketPtr = &(currBucket->link);
1184 2 : currBucket = *prevBucketPtr;
1185 : }
1186 :
1187 1346 : if (currBucket == NULL)
1188 0 : elog(ERROR, "hash_update_hash_key argument is not in hashtable \"%s\"",
1189 : hashp->tabname);
1190 :
1191 1346 : oldPrevPtr = prevBucketPtr;
1192 :
1193 : /*
1194 : * Now perform the equivalent of a HASH_ENTER operation to locate the hash
1195 : * chain we want to put the entry into.
1196 : */
1197 1346 : newhashvalue = hashp->hash(newKeyPtr, hashp->keysize);
1198 1346 : newbucket = hash_initial_lookup(hashp, newhashvalue, &prevBucketPtr);
1199 1346 : currBucket = *prevBucketPtr;
1200 :
1201 : /*
1202 : * Follow collision chain looking for matching key
1203 : */
1204 1346 : match = hashp->match; /* save one fetch in inner loop */
1205 1346 : keysize = hashp->keysize; /* ditto */
1206 :
1207 1484 : while (currBucket != NULL)
1208 : {
1209 138 : if (currBucket->hashvalue == newhashvalue &&
1210 0 : match(ELEMENTKEY(currBucket), newKeyPtr, keysize) == 0)
1211 0 : break;
1212 138 : prevBucketPtr = &(currBucket->link);
1213 138 : currBucket = *prevBucketPtr;
1214 : #ifdef HASH_STATISTICS
1215 : hash_collisions++;
1216 : hctl->collisions++;
1217 : #endif
1218 : }
1219 :
1220 1346 : if (currBucket != NULL)
1221 0 : return false; /* collision with an existing entry */
1222 :
1223 1346 : currBucket = existingElement;
1224 :
1225 : /*
1226 : * If old and new hash values belong to the same bucket, we need not
1227 : * change any chain links, and indeed should not since this simplistic
1228 : * update will corrupt the list if currBucket is the last element. (We
1229 : * cannot fall out earlier, however, since we need to scan the bucket to
1230 : * check for duplicate keys.)
1231 : */
1232 1346 : if (bucket != newbucket)
1233 : {
1234 : /* OK to remove record from old hash bucket's chain. */
1235 1210 : *oldPrevPtr = currBucket->link;
1236 :
1237 : /* link into new hashbucket chain */
1238 1210 : *prevBucketPtr = currBucket;
1239 1210 : currBucket->link = NULL;
1240 : }
1241 :
1242 : /* copy new key into record */
1243 1346 : currBucket->hashvalue = newhashvalue;
1244 1346 : hashp->keycopy(ELEMENTKEY(currBucket), newKeyPtr, keysize);
1245 :
1246 : /* rest of record is untouched */
1247 :
1248 1346 : return true;
1249 : }
1250 :
1251 : /*
1252 : * Allocate a new hashtable entry if possible; return NULL if out of memory.
1253 : * (Or, if the underlying space allocator throws error for out-of-memory,
1254 : * we won't return at all.)
1255 : */
1256 : static HASHBUCKET
1257 59253290 : get_hash_entry(HTAB *hashp, int freelist_idx)
1258 : {
1259 59253290 : HASHHDR *hctl = hashp->hctl;
1260 : HASHBUCKET newElement;
1261 :
1262 : for (;;)
1263 : {
1264 : /* if partitioned, must lock to touch nentries and freeList */
1265 59702688 : if (IS_PARTITIONED(hctl))
1266 12077368 : SpinLockAcquire(&hctl->freeList[freelist_idx].mutex);
1267 :
1268 : /* try to get an entry from the freelist */
1269 59702688 : newElement = hctl->freeList[freelist_idx].freeList;
1270 :
1271 59702688 : if (newElement != NULL)
1272 59253290 : break;
1273 :
1274 449398 : if (IS_PARTITIONED(hctl))
1275 2894 : SpinLockRelease(&hctl->freeList[freelist_idx].mutex);
1276 :
1277 : /*
1278 : * No free elements in this freelist. In a partitioned table, there
1279 : * might be entries in other freelists, but to reduce contention we
1280 : * prefer to first try to get another chunk of buckets from the main
1281 : * shmem allocator. If that fails, though, we *MUST* root through all
1282 : * the other freelists before giving up. There are multiple callers
1283 : * that assume that they can allocate every element in the initially
1284 : * requested table size, or that deleting an element guarantees they
1285 : * can insert a new element, even if shared memory is entirely full.
1286 : * Failing because the needed element is in a different freelist is
1287 : * not acceptable.
1288 : */
1289 449398 : if (!element_alloc(hashp, hctl->nelem_alloc, freelist_idx))
1290 : {
1291 : int borrow_from_idx;
1292 :
1293 0 : if (!IS_PARTITIONED(hctl))
1294 0 : return NULL; /* out of memory */
1295 :
1296 : /* try to borrow element from another freelist */
1297 0 : borrow_from_idx = freelist_idx;
1298 : for (;;)
1299 : {
1300 0 : borrow_from_idx = (borrow_from_idx + 1) % NUM_FREELISTS;
1301 0 : if (borrow_from_idx == freelist_idx)
1302 0 : break; /* examined all freelists, fail */
1303 :
1304 0 : SpinLockAcquire(&(hctl->freeList[borrow_from_idx].mutex));
1305 0 : newElement = hctl->freeList[borrow_from_idx].freeList;
1306 :
1307 0 : if (newElement != NULL)
1308 : {
1309 0 : hctl->freeList[borrow_from_idx].freeList = newElement->link;
1310 0 : SpinLockRelease(&(hctl->freeList[borrow_from_idx].mutex));
1311 :
1312 : /* careful: count the new element in its proper freelist */
1313 0 : SpinLockAcquire(&hctl->freeList[freelist_idx].mutex);
1314 0 : hctl->freeList[freelist_idx].nentries++;
1315 0 : SpinLockRelease(&hctl->freeList[freelist_idx].mutex);
1316 :
1317 0 : return newElement;
1318 : }
1319 :
1320 0 : SpinLockRelease(&(hctl->freeList[borrow_from_idx].mutex));
1321 : }
1322 :
1323 : /* no elements available to borrow either, so out of memory */
1324 0 : return NULL;
1325 : }
1326 : }
1327 :
1328 : /* remove entry from freelist, bump nentries */
1329 59253290 : hctl->freeList[freelist_idx].freeList = newElement->link;
1330 59253290 : hctl->freeList[freelist_idx].nentries++;
1331 :
1332 59253290 : if (IS_PARTITIONED(hctl))
1333 12074474 : SpinLockRelease(&hctl->freeList[freelist_idx].mutex);
1334 :
1335 59253290 : return newElement;
1336 : }
1337 :
1338 : /*
1339 : * hash_get_num_entries -- get the number of entries in a hashtable
1340 : */
1341 : long
1342 119560 : hash_get_num_entries(HTAB *hashp)
1343 : {
1344 : int i;
1345 119560 : long sum = hashp->hctl->freeList[0].nentries;
1346 :
1347 : /*
1348 : * We currently don't bother with acquiring the mutexes; it's only
1349 : * sensible to call this function if you've got lock on all partitions of
1350 : * the table.
1351 : */
1352 119560 : if (IS_PARTITIONED(hashp->hctl))
1353 : {
1354 126656 : for (i = 1; i < NUM_FREELISTS; i++)
1355 122698 : sum += hashp->hctl->freeList[i].nentries;
1356 : }
1357 :
1358 119560 : return sum;
1359 : }
1360 :
1361 : /*
1362 : * hash_seq_init/_search/_term
1363 : * Sequentially search through hash table and return
1364 : * all the elements one by one, return NULL when no more.
1365 : *
1366 : * hash_seq_term should be called if and only if the scan is abandoned before
1367 : * completion; if hash_seq_search returns NULL then it has already done the
1368 : * end-of-scan cleanup.
1369 : *
1370 : * NOTE: caller may delete the returned element before continuing the scan.
1371 : * However, deleting any other element while the scan is in progress is
1372 : * UNDEFINED (it might be the one that curIndex is pointing at!). Also,
1373 : * if elements are added to the table while the scan is in progress, it is
1374 : * unspecified whether they will be visited by the scan or not.
1375 : *
1376 : * NOTE: it is possible to use hash_seq_init/hash_seq_search without any
1377 : * worry about hash_seq_term cleanup, if the hashtable is first locked against
1378 : * further insertions by calling hash_freeze.
1379 : *
1380 : * NOTE: to use this with a partitioned hashtable, caller had better hold
1381 : * at least shared lock on all partitions of the table throughout the scan!
1382 : * We can cope with insertions or deletions by our own backend, but *not*
1383 : * with concurrent insertions or deletions by another.
1384 : */
1385 : void
1386 4449618 : hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
1387 : {
1388 4449618 : status->hashp = hashp;
1389 4449618 : status->curBucket = 0;
1390 4449618 : status->curEntry = NULL;
1391 4449618 : status->hasHashvalue = false;
1392 4449618 : if (!hashp->frozen)
1393 4449618 : register_seq_scan(hashp);
1394 4449618 : }
1395 :
1396 : /*
1397 : * Same as above but scan by the given hash value.
1398 : * See also hash_seq_search().
1399 : *
1400 : * NOTE: the default hash function doesn't match syscache hash function.
1401 : * Thus, if you're going to use this function in syscache callback, make sure
1402 : * you're using custom hash function. See relatt_cache_syshash()
1403 : * for example.
1404 : */
1405 : void
1406 1436622 : hash_seq_init_with_hash_value(HASH_SEQ_STATUS *status, HTAB *hashp,
1407 : uint32 hashvalue)
1408 : {
1409 : HASHBUCKET *bucketPtr;
1410 :
1411 1436622 : hash_seq_init(status, hashp);
1412 :
1413 1436622 : status->hasHashvalue = true;
1414 1436622 : status->hashvalue = hashvalue;
1415 :
1416 1436622 : status->curBucket = hash_initial_lookup(hashp, hashvalue, &bucketPtr);
1417 1436622 : status->curEntry = *bucketPtr;
1418 1436622 : }
1419 :
1420 : void *
1421 53397824 : hash_seq_search(HASH_SEQ_STATUS *status)
1422 : {
1423 : HTAB *hashp;
1424 : HASHHDR *hctl;
1425 : uint32 max_bucket;
1426 : long ssize;
1427 : long segment_num;
1428 : long segment_ndx;
1429 : HASHSEGMENT segp;
1430 : uint32 curBucket;
1431 : HASHELEMENT *curElem;
1432 :
1433 53397824 : if (status->hasHashvalue)
1434 : {
1435 : /*
1436 : * Scan entries only in the current bucket because only this bucket
1437 : * can contain entries with the given hash value.
1438 : */
1439 1633164 : while ((curElem = status->curEntry) != NULL)
1440 : {
1441 196542 : status->curEntry = curElem->link;
1442 196542 : if (status->hashvalue != curElem->hashvalue)
1443 187588 : continue;
1444 8954 : return (void *) ELEMENTKEY(curElem);
1445 : }
1446 :
1447 1436622 : hash_seq_term(status);
1448 1436622 : return NULL;
1449 : }
1450 :
1451 51952248 : if ((curElem = status->curEntry) != NULL)
1452 : {
1453 : /* Continuing scan of curBucket... */
1454 15206432 : status->curEntry = curElem->link;
1455 15206432 : if (status->curEntry == NULL) /* end of this bucket */
1456 10599250 : ++status->curBucket;
1457 15206432 : return ELEMENTKEY(curElem);
1458 : }
1459 :
1460 : /*
1461 : * Search for next nonempty bucket starting at curBucket.
1462 : */
1463 36745816 : curBucket = status->curBucket;
1464 36745816 : hashp = status->hashp;
1465 36745816 : hctl = hashp->hctl;
1466 36745816 : ssize = hashp->ssize;
1467 36745816 : max_bucket = hctl->max_bucket;
1468 :
1469 36745816 : if (curBucket > max_bucket)
1470 : {
1471 104756 : hash_seq_term(status);
1472 104756 : return NULL; /* search is done */
1473 : }
1474 :
1475 : /*
1476 : * first find the right segment in the table directory.
1477 : */
1478 36641060 : segment_num = curBucket >> hashp->sshift;
1479 36641060 : segment_ndx = MOD(curBucket, ssize);
1480 :
1481 36641060 : segp = hashp->dir[segment_num];
1482 :
1483 : /*
1484 : * Pick up the first item in this bucket's chain. If chain is not empty
1485 : * we can begin searching it. Otherwise we have to advance to find the
1486 : * next nonempty bucket. We try to optimize that case since searching a
1487 : * near-empty hashtable has to iterate this loop a lot.
1488 : */
1489 187833656 : while ((curElem = segp[segment_ndx]) == NULL)
1490 : {
1491 : /* empty bucket, advance to next */
1492 154065502 : if (++curBucket > max_bucket)
1493 : {
1494 2872906 : status->curBucket = curBucket;
1495 2872906 : hash_seq_term(status);
1496 2872906 : return NULL; /* search is done */
1497 : }
1498 151192596 : if (++segment_ndx >= ssize)
1499 : {
1500 279652 : segment_num++;
1501 279652 : segment_ndx = 0;
1502 279652 : segp = hashp->dir[segment_num];
1503 : }
1504 : }
1505 :
1506 : /* Begin scan of curBucket... */
1507 33768154 : status->curEntry = curElem->link;
1508 33768154 : if (status->curEntry == NULL) /* end of this bucket */
1509 23168534 : ++curBucket;
1510 33768154 : status->curBucket = curBucket;
1511 33768154 : return ELEMENTKEY(curElem);
1512 : }
1513 :
1514 : void
1515 4449598 : hash_seq_term(HASH_SEQ_STATUS *status)
1516 : {
1517 4449598 : if (!status->hashp->frozen)
1518 4449598 : deregister_seq_scan(status->hashp);
1519 4449598 : }
1520 :
1521 : /*
1522 : * hash_freeze
1523 : * Freeze a hashtable against future insertions (deletions are
1524 : * still allowed)
1525 : *
1526 : * The reason for doing this is that by preventing any more bucket splits,
1527 : * we no longer need to worry about registering hash_seq_search scans,
1528 : * and thus caller need not be careful about ensuring hash_seq_term gets
1529 : * called at the right times.
1530 : *
1531 : * Multiple calls to hash_freeze() are allowed, but you can't freeze a table
1532 : * with active scans (since hash_seq_term would then do the wrong thing).
1533 : */
1534 : void
1535 0 : hash_freeze(HTAB *hashp)
1536 : {
1537 0 : if (hashp->isshared)
1538 0 : elog(ERROR, "cannot freeze shared hashtable \"%s\"", hashp->tabname);
1539 0 : if (!hashp->frozen && has_seq_scans(hashp))
1540 0 : elog(ERROR, "cannot freeze hashtable \"%s\" because it has active scans",
1541 : hashp->tabname);
1542 0 : hashp->frozen = true;
1543 0 : }
1544 :
1545 :
1546 : /********************************* UTILITIES ************************/
1547 :
1548 : /*
1549 : * Expand the table by adding one more hash bucket.
1550 : */
1551 : static bool
1552 781150 : expand_table(HTAB *hashp)
1553 : {
1554 781150 : HASHHDR *hctl = hashp->hctl;
1555 : HASHSEGMENT old_seg,
1556 : new_seg;
1557 : long old_bucket,
1558 : new_bucket;
1559 : long new_segnum,
1560 : new_segndx;
1561 : long old_segnum,
1562 : old_segndx;
1563 : HASHBUCKET *oldlink,
1564 : *newlink;
1565 : HASHBUCKET currElement,
1566 : nextElement;
1567 :
1568 : Assert(!IS_PARTITIONED(hctl));
1569 :
1570 : #ifdef HASH_STATISTICS
1571 : hash_expansions++;
1572 : #endif
1573 :
1574 781150 : new_bucket = hctl->max_bucket + 1;
1575 781150 : new_segnum = new_bucket >> hashp->sshift;
1576 781150 : new_segndx = MOD(new_bucket, hashp->ssize);
1577 :
1578 781150 : if (new_segnum >= hctl->nsegs)
1579 : {
1580 : /* Allocate new segment if necessary -- could fail if dir full */
1581 2480 : if (new_segnum >= hctl->dsize)
1582 0 : if (!dir_realloc(hashp))
1583 0 : return false;
1584 2480 : if (!(hashp->dir[new_segnum] = seg_alloc(hashp)))
1585 0 : return false;
1586 2480 : hctl->nsegs++;
1587 : }
1588 :
1589 : /* OK, we created a new bucket */
1590 781150 : hctl->max_bucket++;
1591 :
1592 : /*
1593 : * *Before* changing masks, find old bucket corresponding to same hash
1594 : * values; values in that bucket may need to be relocated to new bucket.
1595 : * Note that new_bucket is certainly larger than low_mask at this point,
1596 : * so we can skip the first step of the regular hash mask calc.
1597 : */
1598 781150 : old_bucket = (new_bucket & hctl->low_mask);
1599 :
1600 : /*
1601 : * If we crossed a power of 2, readjust masks.
1602 : */
1603 781150 : if ((uint32) new_bucket > hctl->high_mask)
1604 : {
1605 4914 : hctl->low_mask = hctl->high_mask;
1606 4914 : hctl->high_mask = (uint32) new_bucket | hctl->low_mask;
1607 : }
1608 :
1609 : /*
1610 : * Relocate records to the new bucket. NOTE: because of the way the hash
1611 : * masking is done in calc_bucket, only one old bucket can need to be
1612 : * split at this point. With a different way of reducing the hash value,
1613 : * that might not be true!
1614 : */
1615 781150 : old_segnum = old_bucket >> hashp->sshift;
1616 781150 : old_segndx = MOD(old_bucket, hashp->ssize);
1617 :
1618 781150 : old_seg = hashp->dir[old_segnum];
1619 781150 : new_seg = hashp->dir[new_segnum];
1620 :
1621 781150 : oldlink = &old_seg[old_segndx];
1622 781150 : newlink = &new_seg[new_segndx];
1623 :
1624 1877696 : for (currElement = *oldlink;
1625 : currElement != NULL;
1626 1096546 : currElement = nextElement)
1627 : {
1628 1096546 : nextElement = currElement->link;
1629 1096546 : if ((long) calc_bucket(hctl, currElement->hashvalue) == old_bucket)
1630 : {
1631 540780 : *oldlink = currElement;
1632 540780 : oldlink = &currElement->link;
1633 : }
1634 : else
1635 : {
1636 555766 : *newlink = currElement;
1637 555766 : newlink = &currElement->link;
1638 : }
1639 : }
1640 : /* don't forget to terminate the rebuilt hash chains... */
1641 781150 : *oldlink = NULL;
1642 781150 : *newlink = NULL;
1643 :
1644 781150 : return true;
1645 : }
1646 :
1647 :
1648 : static bool
1649 0 : dir_realloc(HTAB *hashp)
1650 : {
1651 : HASHSEGMENT *p;
1652 : HASHSEGMENT *old_p;
1653 : long new_dsize;
1654 : long old_dirsize;
1655 : long new_dirsize;
1656 :
1657 0 : if (hashp->hctl->max_dsize != NO_MAX_DSIZE)
1658 0 : return false;
1659 :
1660 : /* Reallocate directory */
1661 0 : new_dsize = hashp->hctl->dsize << 1;
1662 0 : old_dirsize = hashp->hctl->dsize * sizeof(HASHSEGMENT);
1663 0 : new_dirsize = new_dsize * sizeof(HASHSEGMENT);
1664 :
1665 0 : old_p = hashp->dir;
1666 0 : CurrentDynaHashCxt = hashp->hcxt;
1667 0 : p = (HASHSEGMENT *) hashp->alloc((Size) new_dirsize);
1668 :
1669 0 : if (p != NULL)
1670 : {
1671 0 : memcpy(p, old_p, old_dirsize);
1672 0 : MemSet(((char *) p) + old_dirsize, 0, new_dirsize - old_dirsize);
1673 0 : hashp->dir = p;
1674 0 : hashp->hctl->dsize = new_dsize;
1675 :
1676 : /* XXX assume the allocator is palloc, so we know how to free */
1677 : Assert(hashp->alloc == DynaHashAlloc);
1678 0 : pfree(old_p);
1679 :
1680 0 : return true;
1681 : }
1682 :
1683 0 : return false;
1684 : }
1685 :
1686 :
1687 : static HASHSEGMENT
1688 1130154 : seg_alloc(HTAB *hashp)
1689 : {
1690 : HASHSEGMENT segp;
1691 :
1692 1130154 : CurrentDynaHashCxt = hashp->hcxt;
1693 1130154 : segp = (HASHSEGMENT) hashp->alloc(sizeof(HASHBUCKET) * hashp->ssize);
1694 :
1695 1130154 : if (!segp)
1696 0 : return NULL;
1697 :
1698 1130154 : MemSet(segp, 0, sizeof(HASHBUCKET) * hashp->ssize);
1699 :
1700 1130154 : return segp;
1701 : }
1702 :
1703 : /*
1704 : * allocate some new elements and link them into the indicated free list
1705 : */
1706 : static bool
1707 1034550 : element_alloc(HTAB *hashp, int nelem, int freelist_idx)
1708 : {
1709 1034550 : HASHHDR *hctl = hashp->hctl;
1710 : Size elementSize;
1711 : HASHELEMENT *firstElement;
1712 : HASHELEMENT *tmpElement;
1713 : HASHELEMENT *prevElement;
1714 : int i;
1715 :
1716 1034550 : if (hashp->isfixed)
1717 0 : return false;
1718 :
1719 : /* Each element has a HASHELEMENT header plus user data. */
1720 1034550 : elementSize = MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(hctl->entrysize);
1721 :
1722 1034550 : CurrentDynaHashCxt = hashp->hcxt;
1723 1034550 : firstElement = (HASHELEMENT *) hashp->alloc(nelem * elementSize);
1724 :
1725 1034550 : if (!firstElement)
1726 0 : return false;
1727 :
1728 : /* prepare to link all the new entries into the freelist */
1729 1034550 : prevElement = NULL;
1730 1034550 : tmpElement = firstElement;
1731 101712150 : for (i = 0; i < nelem; i++)
1732 : {
1733 100677600 : tmpElement->link = prevElement;
1734 100677600 : prevElement = tmpElement;
1735 100677600 : tmpElement = (HASHELEMENT *) (((char *) tmpElement) + elementSize);
1736 : }
1737 :
1738 : /* if partitioned, must lock to touch freeList */
1739 1034550 : if (IS_PARTITIONED(hctl))
1740 338894 : SpinLockAcquire(&hctl->freeList[freelist_idx].mutex);
1741 :
1742 : /* freelist could be nonempty if two backends did this concurrently */
1743 1034550 : firstElement->link = hctl->freeList[freelist_idx].freeList;
1744 1034550 : hctl->freeList[freelist_idx].freeList = prevElement;
1745 :
1746 1034550 : if (IS_PARTITIONED(hctl))
1747 338894 : SpinLockRelease(&hctl->freeList[freelist_idx].mutex);
1748 :
1749 1034550 : return true;
1750 : }
1751 :
1752 : /*
1753 : * Do initial lookup of a bucket for the given hash value, retrieving its
1754 : * bucket number and its hash bucket.
1755 : */
1756 : static inline uint32
1757 364005218 : hash_initial_lookup(HTAB *hashp, uint32 hashvalue, HASHBUCKET **bucketptr)
1758 : {
1759 364005218 : HASHHDR *hctl = hashp->hctl;
1760 : HASHSEGMENT segp;
1761 : long segment_num;
1762 : long segment_ndx;
1763 : uint32 bucket;
1764 :
1765 364005218 : bucket = calc_bucket(hctl, hashvalue);
1766 :
1767 364005218 : segment_num = bucket >> hashp->sshift;
1768 364005218 : segment_ndx = MOD(bucket, hashp->ssize);
1769 :
1770 364005218 : segp = hashp->dir[segment_num];
1771 :
1772 364005218 : if (segp == NULL)
1773 0 : hash_corrupted(hashp);
1774 :
1775 364005218 : *bucketptr = &segp[segment_ndx];
1776 364005218 : return bucket;
1777 : }
1778 :
1779 : /* complain when we have detected a corrupted hashtable */
1780 : static void
1781 0 : hash_corrupted(HTAB *hashp)
1782 : {
1783 : /*
1784 : * If the corruption is in a shared hashtable, we'd better force a
1785 : * systemwide restart. Otherwise, just shut down this one backend.
1786 : */
1787 0 : if (hashp->isshared)
1788 0 : elog(PANIC, "hash table \"%s\" corrupted", hashp->tabname);
1789 : else
1790 0 : elog(FATAL, "hash table \"%s\" corrupted", hashp->tabname);
1791 : }
1792 :
1793 : /* calculate ceil(log base 2) of num */
1794 : int
1795 1227470 : my_log2(long num)
1796 : {
1797 : /*
1798 : * guard against too-large input, which would be invalid for
1799 : * pg_ceil_log2_*()
1800 : */
1801 1227470 : if (num > LONG_MAX / 2)
1802 0 : num = LONG_MAX / 2;
1803 :
1804 : #if SIZEOF_LONG < 8
1805 : return pg_ceil_log2_32(num);
1806 : #else
1807 1227470 : return pg_ceil_log2_64(num);
1808 : #endif
1809 : }
1810 :
1811 : /* calculate first power of 2 >= num, bounded to what will fit in a long */
1812 : static long
1813 108164 : next_pow2_long(long num)
1814 : {
1815 : /* my_log2's internal range check is sufficient */
1816 108164 : return 1L << my_log2(num);
1817 : }
1818 :
1819 : /* calculate first power of 2 >= num, bounded to what will fit in an int */
1820 : static int
1821 1079040 : next_pow2_int(long num)
1822 : {
1823 1079040 : if (num > INT_MAX / 2)
1824 0 : num = INT_MAX / 2;
1825 1079040 : return 1 << my_log2(num);
1826 : }
1827 :
1828 :
1829 : /************************* SEQ SCAN TRACKING ************************/
1830 :
1831 : /*
1832 : * We track active hash_seq_search scans here. The need for this mechanism
1833 : * comes from the fact that a scan will get confused if a bucket split occurs
1834 : * while it's in progress: it might visit entries twice, or even miss some
1835 : * entirely (if it's partway through the same bucket that splits). Hence
1836 : * we want to inhibit bucket splits if there are any active scans on the
1837 : * table being inserted into. This is a fairly rare case in current usage,
1838 : * so just postponing the split until the next insertion seems sufficient.
1839 : *
1840 : * Given present usages of the function, only a few scans are likely to be
1841 : * open concurrently; so a finite-size stack of open scans seems sufficient,
1842 : * and we don't worry that linear search is too slow. Note that we do
1843 : * allow multiple scans of the same hashtable to be open concurrently.
1844 : *
1845 : * This mechanism can support concurrent scan and insertion in a shared
1846 : * hashtable if it's the same backend doing both. It would fail otherwise,
1847 : * but locking reasons seem to preclude any such scenario anyway, so we don't
1848 : * worry.
1849 : *
1850 : * This arrangement is reasonably robust if a transient hashtable is deleted
1851 : * without notifying us. The absolute worst case is we might inhibit splits
1852 : * in another table created later at exactly the same address. We will give
1853 : * a warning at transaction end for reference leaks, so any bugs leading to
1854 : * lack of notification should be easy to catch.
1855 : */
1856 :
1857 : #define MAX_SEQ_SCANS 100
1858 :
1859 : static HTAB *seq_scan_tables[MAX_SEQ_SCANS]; /* tables being scanned */
1860 : static int seq_scan_level[MAX_SEQ_SCANS]; /* subtransaction nest level */
1861 : static int num_seq_scans = 0;
1862 :
1863 :
1864 : /* Register a table as having an active hash_seq_search scan */
1865 : static void
1866 4449618 : register_seq_scan(HTAB *hashp)
1867 : {
1868 4449618 : if (num_seq_scans >= MAX_SEQ_SCANS)
1869 0 : elog(ERROR, "too many active hash_seq_search scans, cannot start one on \"%s\"",
1870 : hashp->tabname);
1871 4449618 : seq_scan_tables[num_seq_scans] = hashp;
1872 4449618 : seq_scan_level[num_seq_scans] = GetCurrentTransactionNestLevel();
1873 4449618 : num_seq_scans++;
1874 4449618 : }
1875 :
1876 : /* Deregister an active scan */
1877 : static void
1878 4449598 : deregister_seq_scan(HTAB *hashp)
1879 : {
1880 : int i;
1881 :
1882 : /* Search backward since it's most likely at the stack top */
1883 4449598 : for (i = num_seq_scans - 1; i >= 0; i--)
1884 : {
1885 4449598 : if (seq_scan_tables[i] == hashp)
1886 : {
1887 4449598 : seq_scan_tables[i] = seq_scan_tables[num_seq_scans - 1];
1888 4449598 : seq_scan_level[i] = seq_scan_level[num_seq_scans - 1];
1889 4449598 : num_seq_scans--;
1890 4449598 : return;
1891 : }
1892 : }
1893 0 : elog(ERROR, "no hash_seq_search scan for hash table \"%s\"",
1894 : hashp->tabname);
1895 : }
1896 :
1897 : /* Check if a table has any active scan */
1898 : static bool
1899 781150 : has_seq_scans(HTAB *hashp)
1900 : {
1901 : int i;
1902 :
1903 781150 : for (i = 0; i < num_seq_scans; i++)
1904 : {
1905 0 : if (seq_scan_tables[i] == hashp)
1906 0 : return true;
1907 : }
1908 781150 : return false;
1909 : }
1910 :
1911 : /* Clean up any open scans at end of transaction */
1912 : void
1913 865834 : AtEOXact_HashTables(bool isCommit)
1914 : {
1915 : /*
1916 : * During abort cleanup, open scans are expected; just silently clean 'em
1917 : * out. An open scan at commit means someone forgot a hash_seq_term()
1918 : * call, so complain.
1919 : *
1920 : * Note: it's tempting to try to print the tabname here, but refrain for
1921 : * fear of touching deallocated memory. This isn't a user-facing message
1922 : * anyway, so it needn't be pretty.
1923 : */
1924 865834 : if (isCommit)
1925 : {
1926 : int i;
1927 :
1928 816906 : for (i = 0; i < num_seq_scans; i++)
1929 : {
1930 0 : elog(WARNING, "leaked hash_seq_search scan for hash table %p",
1931 : seq_scan_tables[i]);
1932 : }
1933 : }
1934 865834 : num_seq_scans = 0;
1935 865834 : }
1936 :
1937 : /* Clean up any open scans at end of subtransaction */
1938 : void
1939 20052 : AtEOSubXact_HashTables(bool isCommit, int nestDepth)
1940 : {
1941 : int i;
1942 :
1943 : /*
1944 : * Search backward to make cleanup easy. Note we must check all entries,
1945 : * not only those at the end of the array, because deletion technique
1946 : * doesn't keep them in order.
1947 : */
1948 20052 : for (i = num_seq_scans - 1; i >= 0; i--)
1949 : {
1950 0 : if (seq_scan_level[i] >= nestDepth)
1951 : {
1952 0 : if (isCommit)
1953 0 : elog(WARNING, "leaked hash_seq_search scan for hash table %p",
1954 : seq_scan_tables[i]);
1955 0 : seq_scan_tables[i] = seq_scan_tables[num_seq_scans - 1];
1956 0 : seq_scan_level[i] = seq_scan_level[num_seq_scans - 1];
1957 0 : num_seq_scans--;
1958 : }
1959 : }
1960 20052 : }
|