Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * aset.c
4 : * Allocation set definitions.
5 : *
6 : * AllocSet is our standard implementation of the abstract MemoryContext
7 : * type.
8 : *
9 : *
10 : * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
11 : * Portions Copyright (c) 1994, Regents of the University of California
12 : *
13 : * IDENTIFICATION
14 : * src/backend/utils/mmgr/aset.c
15 : *
16 : * NOTE:
17 : * This is a new (Feb. 05, 1999) implementation of the allocation set
18 : * routines. AllocSet...() does not use OrderedSet...() any more.
19 : * Instead it manages allocations in a block pool by itself, combining
20 : * many small allocations in a few bigger blocks. AllocSetFree() normally
21 : * doesn't free() memory really. It just add's the free'd area to some
22 : * list for later reuse by AllocSetAlloc(). All memory blocks are free()'d
23 : * at once on AllocSetReset(), which happens when the memory context gets
24 : * destroyed.
25 : * Jan Wieck
26 : *
27 : * Performance improvement from Tom Lane, 8/99: for extremely large request
28 : * sizes, we do want to be able to give the memory back to free() as soon
29 : * as it is pfree()'d. Otherwise we risk tying up a lot of memory in
30 : * freelist entries that might never be usable. This is specially needed
31 : * when the caller is repeatedly repalloc()'ing a block bigger and bigger;
32 : * the previous instances of the block were guaranteed to be wasted until
33 : * AllocSetReset() under the old way.
34 : *
35 : * Further improvement 12/00: as the code stood, request sizes in the
36 : * midrange between "small" and "large" were handled very inefficiently,
37 : * because any sufficiently large free chunk would be used to satisfy a
38 : * request, even if it was much larger than necessary. This led to more
39 : * and more wasted space in allocated chunks over time. To fix, get rid
40 : * of the midrange behavior: we now handle only "small" power-of-2-size
41 : * chunks as chunks. Anything "large" is passed off to malloc(). Change
42 : * the number of freelists to change the small/large boundary.
43 : *
44 : *-------------------------------------------------------------------------
45 : */
46 :
47 : #include "postgres.h"
48 :
49 : #include "port/pg_bitutils.h"
50 : #include "utils/memdebug.h"
51 : #include "utils/memutils.h"
52 : #include "utils/memutils_internal.h"
53 : #include "utils/memutils_memorychunk.h"
54 :
55 : /*--------------------
56 : * Chunk freelist k holds chunks of size 1 << (k + ALLOC_MINBITS),
57 : * for k = 0 .. ALLOCSET_NUM_FREELISTS-1.
58 : *
59 : * Note that all chunks in the freelists have power-of-2 sizes. This
60 : * improves recyclability: we may waste some space, but the wasted space
61 : * should stay pretty constant as requests are made and released.
62 : *
63 : * A request too large for the last freelist is handled by allocating a
64 : * dedicated block from malloc(). The block still has a block header and
65 : * chunk header, but when the chunk is freed we'll return the whole block
66 : * to malloc(), not put it on our freelists.
67 : *
68 : * CAUTION: ALLOC_MINBITS must be large enough so that
69 : * 1<<ALLOC_MINBITS is at least MAXALIGN,
70 : * or we may fail to align the smallest chunks adequately.
71 : * 8-byte alignment is enough on all currently known machines. This 8-byte
72 : * minimum also allows us to store a pointer to the next freelist item within
73 : * the chunk of memory itself.
74 : *
75 : * With the current parameters, request sizes up to 8K are treated as chunks,
76 : * larger requests go into dedicated blocks. Change ALLOCSET_NUM_FREELISTS
77 : * to adjust the boundary point; and adjust ALLOCSET_SEPARATE_THRESHOLD in
78 : * memutils.h to agree. (Note: in contexts with small maxBlockSize, we may
79 : * set the allocChunkLimit to less than 8K, so as to avoid space wastage.)
80 : *--------------------
81 : */
82 :
83 : #define ALLOC_MINBITS 3 /* smallest chunk size is 8 bytes */
84 : #define ALLOCSET_NUM_FREELISTS 11
85 : #define ALLOC_CHUNK_LIMIT (1 << (ALLOCSET_NUM_FREELISTS-1+ALLOC_MINBITS))
86 : /* Size of largest chunk that we use a fixed size for */
87 : #define ALLOC_CHUNK_FRACTION 4
88 : /* We allow chunks to be at most 1/4 of maxBlockSize (less overhead) */
89 :
90 : /* ALLOC_CHUNK_LIMIT must be equal to ALLOCSET_SEPARATE_THRESHOLD */
91 : StaticAssertDecl(ALLOC_CHUNK_LIMIT == ALLOCSET_SEPARATE_THRESHOLD,
92 : "ALLOC_CHUNK_LIMIT != ALLOCSET_SEPARATE_THRESHOLD");
93 :
94 : /*--------------------
95 : * The first block allocated for an allocset has size initBlockSize.
96 : * Each time we have to allocate another block, we double the block size
97 : * (if possible, and without exceeding maxBlockSize), so as to reduce
98 : * the bookkeeping load on malloc().
99 : *
100 : * Blocks allocated to hold oversize chunks do not follow this rule, however;
101 : * they are just however big they need to be to hold that single chunk.
102 : *
103 : * Also, if a minContextSize is specified, the first block has that size,
104 : * and then initBlockSize is used for the next one.
105 : *--------------------
106 : */
107 :
108 : #define ALLOC_BLOCKHDRSZ MAXALIGN(sizeof(AllocBlockData))
109 : #define ALLOC_CHUNKHDRSZ sizeof(MemoryChunk)
110 : #define FIRST_BLOCKHDRSZ (MAXALIGN(sizeof(AllocSetContext)) + \
111 : ALLOC_BLOCKHDRSZ)
112 :
113 : typedef struct AllocBlockData *AllocBlock; /* forward reference */
114 :
115 : /*
116 : * AllocPointer
117 : * Aligned pointer which may be a member of an allocation set.
118 : */
119 : typedef void *AllocPointer;
120 :
121 : /*
122 : * AllocFreeListLink
123 : * When pfreeing memory, if we maintain a freelist for the given chunk's
124 : * size then we use a AllocFreeListLink to point to the current item in
125 : * the AllocSetContext's freelist and then set the given freelist element
126 : * to point to the chunk being freed.
127 : */
128 : typedef struct AllocFreeListLink
129 : {
130 : MemoryChunk *next;
131 : } AllocFreeListLink;
132 :
133 : /*
134 : * Obtain a AllocFreeListLink for the given chunk. Allocation sizes are
135 : * always at least sizeof(AllocFreeListLink), so we reuse the pointer's memory
136 : * itself to store the freelist link.
137 : */
138 : #define GetFreeListLink(chkptr) \
139 : (AllocFreeListLink *) ((char *) (chkptr) + ALLOC_CHUNKHDRSZ)
140 :
141 : /* Validate a freelist index retrieved from a chunk header */
142 : #define FreeListIdxIsValid(fidx) \
143 : ((fidx) >= 0 && (fidx) < ALLOCSET_NUM_FREELISTS)
144 :
145 : /* Determine the size of the chunk based on the freelist index */
146 : #define GetChunkSizeFromFreeListIdx(fidx) \
147 : ((((Size) 1) << ALLOC_MINBITS) << (fidx))
148 :
149 : /*
150 : * AllocSetContext is our standard implementation of MemoryContext.
151 : *
152 : * Note: header.isReset means there is nothing for AllocSetReset to do.
153 : * This is different from the aset being physically empty (empty blocks list)
154 : * because we will still have a keeper block. It's also different from the set
155 : * being logically empty, because we don't attempt to detect pfree'ing the
156 : * last active chunk.
157 : */
158 : typedef struct AllocSetContext
159 : {
160 : MemoryContextData header; /* Standard memory-context fields */
161 : /* Info about storage allocated in this context: */
162 : AllocBlock blocks; /* head of list of blocks in this set */
163 : MemoryChunk *freelist[ALLOCSET_NUM_FREELISTS]; /* free chunk lists */
164 : /* Allocation parameters for this context: */
165 : uint32 initBlockSize; /* initial block size */
166 : uint32 maxBlockSize; /* maximum block size */
167 : uint32 nextBlockSize; /* next block size to allocate */
168 : uint32 allocChunkLimit; /* effective chunk size limit */
169 : /* freelist this context could be put in, or -1 if not a candidate: */
170 : int freeListIndex; /* index in context_freelists[], or -1 */
171 : } AllocSetContext;
172 :
173 : typedef AllocSetContext *AllocSet;
174 :
175 : /*
176 : * AllocBlock
177 : * An AllocBlock is the unit of memory that is obtained by aset.c
178 : * from malloc(). It contains one or more MemoryChunks, which are
179 : * the units requested by palloc() and freed by pfree(). MemoryChunks
180 : * cannot be returned to malloc() individually, instead they are put
181 : * on freelists by pfree() and re-used by the next palloc() that has
182 : * a matching request size.
183 : *
184 : * AllocBlockData is the header data for a block --- the usable space
185 : * within the block begins at the next alignment boundary.
186 : */
187 : typedef struct AllocBlockData
188 : {
189 : AllocSet aset; /* aset that owns this block */
190 : AllocBlock prev; /* prev block in aset's blocks list, if any */
191 : AllocBlock next; /* next block in aset's blocks list, if any */
192 : char *freeptr; /* start of free space in this block */
193 : char *endptr; /* end of space in this block */
194 : } AllocBlockData;
195 :
196 : /*
197 : * AllocSetIsValid
198 : * True iff set is valid allocation set.
199 : */
200 : #define AllocSetIsValid(set) \
201 : ((set) && IsA(set, AllocSetContext))
202 :
203 : /*
204 : * AllocBlockIsValid
205 : * True iff block is valid block of allocation set.
206 : */
207 : #define AllocBlockIsValid(block) \
208 : ((block) && AllocSetIsValid((block)->aset))
209 :
210 : /*
211 : * We always store external chunks on a dedicated block. This makes fetching
212 : * the block from an external chunk easy since it's always the first and only
213 : * chunk on the block.
214 : */
215 : #define ExternalChunkGetBlock(chunk) \
216 : (AllocBlock) ((char *) chunk - ALLOC_BLOCKHDRSZ)
217 :
218 : /*
219 : * Rather than repeatedly creating and deleting memory contexts, we keep some
220 : * freed contexts in freelists so that we can hand them out again with little
221 : * work. Before putting a context in a freelist, we reset it so that it has
222 : * only its initial malloc chunk and no others. To be a candidate for a
223 : * freelist, a context must have the same minContextSize/initBlockSize as
224 : * other contexts in the list; but its maxBlockSize is irrelevant since that
225 : * doesn't affect the size of the initial chunk.
226 : *
227 : * We currently provide one freelist for ALLOCSET_DEFAULT_SIZES contexts
228 : * and one for ALLOCSET_SMALL_SIZES contexts; the latter works for
229 : * ALLOCSET_START_SMALL_SIZES too, since only the maxBlockSize differs.
230 : *
231 : * Ordinarily, we re-use freelist contexts in last-in-first-out order, in
232 : * hopes of improving locality of reference. But if there get to be too
233 : * many contexts in the list, we'd prefer to drop the most-recently-created
234 : * contexts in hopes of keeping the process memory map compact.
235 : * We approximate that by simply deleting all existing entries when the list
236 : * overflows, on the assumption that queries that allocate a lot of contexts
237 : * will probably free them in more or less reverse order of allocation.
238 : *
239 : * Contexts in a freelist are chained via their nextchild pointers.
240 : */
241 : #define MAX_FREE_CONTEXTS 100 /* arbitrary limit on freelist length */
242 :
243 : /* Obtain the keeper block for an allocation set */
244 : #define KeeperBlock(set) \
245 : ((AllocBlock) (((char *) set) + MAXALIGN(sizeof(AllocSetContext))))
246 :
247 : /* Check if the block is the keeper block of the given allocation set */
248 : #define IsKeeperBlock(set, block) ((block) == (KeeperBlock(set)))
249 :
250 : typedef struct AllocSetFreeList
251 : {
252 : int num_free; /* current list length */
253 : AllocSetContext *first_free; /* list header */
254 : } AllocSetFreeList;
255 :
256 : /* context_freelists[0] is for default params, [1] for small params */
257 : static AllocSetFreeList context_freelists[2] =
258 : {
259 : {
260 : 0, NULL
261 : },
262 : {
263 : 0, NULL
264 : }
265 : };
266 :
267 :
268 : /* ----------
269 : * AllocSetFreeIndex -
270 : *
271 : * Depending on the size of an allocation compute which freechunk
272 : * list of the alloc set it belongs to. Caller must have verified
273 : * that size <= ALLOC_CHUNK_LIMIT.
274 : * ----------
275 : */
276 : static inline int
277 717184313 : AllocSetFreeIndex(Size size)
278 : {
279 : int idx;
280 :
281 717184313 : if (size > (1 << ALLOC_MINBITS))
282 : {
283 : /*----------
284 : * At this point we must compute ceil(log2(size >> ALLOC_MINBITS)).
285 : * This is the same as
286 : * pg_leftmost_one_pos32((size - 1) >> ALLOC_MINBITS) + 1
287 : * or equivalently
288 : * pg_leftmost_one_pos32(size - 1) - ALLOC_MINBITS + 1
289 : *
290 : * However, for platforms without intrinsic support, we duplicate the
291 : * logic here, allowing an additional optimization. It's reasonable
292 : * to assume that ALLOC_CHUNK_LIMIT fits in 16 bits, so we can unroll
293 : * the byte-at-a-time loop in pg_leftmost_one_pos32 and just handle
294 : * the last two bytes.
295 : *
296 : * Yes, this function is enough of a hot-spot to make it worth this
297 : * much trouble.
298 : *----------
299 : */
300 : #ifdef HAVE_BITSCAN_REVERSE
301 620702123 : idx = pg_leftmost_one_pos32((uint32) size - 1) - ALLOC_MINBITS + 1;
302 : #else
303 : uint32 t,
304 : tsize;
305 :
306 : /* Statically assert that we only have a 16-bit input value. */
307 : StaticAssertDecl(ALLOC_CHUNK_LIMIT < (1 << 16),
308 : "ALLOC_CHUNK_LIMIT must be less than 64kB");
309 :
310 : tsize = size - 1;
311 : t = tsize >> 8;
312 : idx = t ? pg_leftmost_one_pos[t] + 8 : pg_leftmost_one_pos[tsize];
313 : idx -= ALLOC_MINBITS - 1;
314 : #endif
315 :
316 : Assert(idx < ALLOCSET_NUM_FREELISTS);
317 : }
318 : else
319 96482190 : idx = 0;
320 :
321 717184313 : return idx;
322 : }
323 :
324 :
325 : /*
326 : * Public routines
327 : */
328 :
329 :
330 : /*
331 : * AllocSetContextCreateInternal
332 : * Create a new AllocSet context.
333 : *
334 : * parent: parent context, or NULL if top-level context
335 : * name: name of context (must be statically allocated)
336 : * minContextSize: minimum context size
337 : * initBlockSize: initial allocation block size
338 : * maxBlockSize: maximum allocation block size
339 : *
340 : * Most callers should abstract the context size parameters using a macro
341 : * such as ALLOCSET_DEFAULT_SIZES.
342 : *
343 : * Note: don't call this directly; go through the wrapper macro
344 : * AllocSetContextCreate.
345 : */
346 : MemoryContext
347 6948463 : AllocSetContextCreateInternal(MemoryContext parent,
348 : const char *name,
349 : Size minContextSize,
350 : Size initBlockSize,
351 : Size maxBlockSize)
352 : {
353 : int freeListIndex;
354 : Size firstBlockSize;
355 : AllocSet set;
356 : AllocBlock block;
357 :
358 : /* ensure MemoryChunk's size is properly maxaligned */
359 : StaticAssertDecl(ALLOC_CHUNKHDRSZ == MAXALIGN(ALLOC_CHUNKHDRSZ),
360 : "sizeof(MemoryChunk) is not maxaligned");
361 : /* check we have enough space to store the freelist link */
362 : StaticAssertDecl(sizeof(AllocFreeListLink) <= (1 << ALLOC_MINBITS),
363 : "sizeof(AllocFreeListLink) larger than minimum allocation size");
364 :
365 : /*
366 : * First, validate allocation parameters. Once these were regular runtime
367 : * tests and elog's, but in practice Asserts seem sufficient because
368 : * nobody varies their parameters at runtime. We somewhat arbitrarily
369 : * enforce a minimum 1K block size. We restrict the maximum block size to
370 : * MEMORYCHUNK_MAX_BLOCKOFFSET as MemoryChunks are limited to this in
371 : * regards to addressing the offset between the chunk and the block that
372 : * the chunk is stored on. We would be unable to store the offset between
373 : * the chunk and block for any chunks that were beyond
374 : * MEMORYCHUNK_MAX_BLOCKOFFSET bytes into the block if the block was to be
375 : * larger than this.
376 : */
377 : Assert(initBlockSize == MAXALIGN(initBlockSize) &&
378 : initBlockSize >= 1024);
379 : Assert(maxBlockSize == MAXALIGN(maxBlockSize) &&
380 : maxBlockSize >= initBlockSize &&
381 : AllocHugeSizeIsValid(maxBlockSize)); /* must be safe to double */
382 : Assert(minContextSize == 0 ||
383 : (minContextSize == MAXALIGN(minContextSize) &&
384 : minContextSize >= 1024 &&
385 : minContextSize <= maxBlockSize));
386 : Assert(maxBlockSize <= MEMORYCHUNK_MAX_BLOCKOFFSET);
387 :
388 : /*
389 : * Check whether the parameters match either available freelist. We do
390 : * not need to demand a match of maxBlockSize.
391 : */
392 6948463 : if (minContextSize == ALLOCSET_DEFAULT_MINSIZE &&
393 : initBlockSize == ALLOCSET_DEFAULT_INITSIZE)
394 4425130 : freeListIndex = 0;
395 2523333 : else if (minContextSize == ALLOCSET_SMALL_MINSIZE &&
396 : initBlockSize == ALLOCSET_SMALL_INITSIZE)
397 2502753 : freeListIndex = 1;
398 : else
399 20580 : freeListIndex = -1;
400 :
401 : /*
402 : * If a suitable freelist entry exists, just recycle that context.
403 : */
404 6948463 : if (freeListIndex >= 0)
405 : {
406 6927883 : AllocSetFreeList *freelist = &context_freelists[freeListIndex];
407 :
408 6927883 : if (freelist->first_free != NULL)
409 : {
410 : /* Remove entry from freelist */
411 4746431 : set = freelist->first_free;
412 4746431 : freelist->first_free = (AllocSet) set->header.nextchild;
413 4746431 : freelist->num_free--;
414 :
415 : /* Update its maxBlockSize; everything else should be OK */
416 4746431 : set->maxBlockSize = maxBlockSize;
417 :
418 : /* Reinitialize its header, installing correct name and parent */
419 4746431 : MemoryContextCreate((MemoryContext) set,
420 : T_AllocSetContext,
421 : MCTX_ASET_ID,
422 : parent,
423 : name);
424 :
425 4746431 : ((MemoryContext) set)->mem_allocated =
426 4746431 : KeeperBlock(set)->endptr - ((char *) set);
427 :
428 4746431 : return (MemoryContext) set;
429 : }
430 : }
431 :
432 : /* Determine size of initial block */
433 2202032 : firstBlockSize = MAXALIGN(sizeof(AllocSetContext)) +
434 : ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
435 2202032 : if (minContextSize != 0)
436 20580 : firstBlockSize = Max(firstBlockSize, minContextSize);
437 : else
438 2181452 : firstBlockSize = Max(firstBlockSize, initBlockSize);
439 :
440 : /*
441 : * Allocate the initial block. Unlike other aset.c blocks, it starts with
442 : * the context header and its block header follows that.
443 : */
444 2202032 : set = (AllocSet) malloc(firstBlockSize);
445 2202032 : if (set == NULL)
446 : {
447 0 : if (TopMemoryContext)
448 0 : MemoryContextStats(TopMemoryContext);
449 0 : ereport(ERROR,
450 : (errcode(ERRCODE_OUT_OF_MEMORY),
451 : errmsg("out of memory"),
452 : errdetail("Failed while creating memory context \"%s\".",
453 : name)));
454 : }
455 :
456 : /*
457 : * Avoid writing code that can fail between here and MemoryContextCreate;
458 : * we'd leak the header/initial block if we ereport in this stretch.
459 : */
460 :
461 : /* Create a vpool associated with the context */
462 : VALGRIND_CREATE_MEMPOOL(set, 0, false);
463 :
464 : /*
465 : * Create a vchunk covering both the AllocSetContext struct and the keeper
466 : * block's header. (Perhaps it would be more sensible for these to be two
467 : * separate vchunks, but doing that seems to tickle bugs in some versions
468 : * of Valgrind.) We must have these vchunks, and also a vchunk for each
469 : * subsequently-added block header, so that Valgrind considers the
470 : * pointers within them while checking for leaked memory. Note that
471 : * Valgrind doesn't distinguish between these vchunks and those created by
472 : * mcxt.c for the user-accessible-data chunks we allocate.
473 : */
474 : VALGRIND_MEMPOOL_ALLOC(set, set, FIRST_BLOCKHDRSZ);
475 :
476 : /* Fill in the initial block's block header */
477 2202032 : block = KeeperBlock(set);
478 2202032 : block->aset = set;
479 2202032 : block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
480 2202032 : block->endptr = ((char *) set) + firstBlockSize;
481 2202032 : block->prev = NULL;
482 2202032 : block->next = NULL;
483 :
484 : /* Mark unallocated space NOACCESS; leave the block header alone. */
485 : VALGRIND_MAKE_MEM_NOACCESS(block->freeptr, block->endptr - block->freeptr);
486 :
487 : /* Remember block as part of block list */
488 2202032 : set->blocks = block;
489 :
490 : /* Finish filling in aset-specific parts of the context header */
491 26424384 : MemSetAligned(set->freelist, 0, sizeof(set->freelist));
492 :
493 2202032 : set->initBlockSize = (uint32) initBlockSize;
494 2202032 : set->maxBlockSize = (uint32) maxBlockSize;
495 2202032 : set->nextBlockSize = (uint32) initBlockSize;
496 2202032 : set->freeListIndex = freeListIndex;
497 :
498 : /*
499 : * Compute the allocation chunk size limit for this context. It can't be
500 : * more than ALLOC_CHUNK_LIMIT because of the fixed number of freelists.
501 : * If maxBlockSize is small then requests exceeding the maxBlockSize, or
502 : * even a significant fraction of it, should be treated as large chunks
503 : * too. For the typical case of maxBlockSize a power of 2, the chunk size
504 : * limit will be at most 1/8th maxBlockSize, so that given a stream of
505 : * requests that are all the maximum chunk size we will waste at most
506 : * 1/8th of the allocated space.
507 : *
508 : * Determine the maximum size that a chunk can be before we allocate an
509 : * entire AllocBlock dedicated for that chunk. We set the absolute limit
510 : * of that size as ALLOC_CHUNK_LIMIT but we reduce it further so that we
511 : * can fit about ALLOC_CHUNK_FRACTION chunks this size on a maximally
512 : * sized block. (We opt to keep allocChunkLimit a power-of-2 value
513 : * primarily for legacy reasons rather than calculating it so that exactly
514 : * ALLOC_CHUNK_FRACTION chunks fit on a maximally sized block.)
515 : */
516 2202032 : set->allocChunkLimit = ALLOC_CHUNK_LIMIT;
517 2202032 : while ((Size) (set->allocChunkLimit + ALLOC_CHUNKHDRSZ) >
518 7369190 : (Size) ((maxBlockSize - ALLOC_BLOCKHDRSZ) / ALLOC_CHUNK_FRACTION))
519 5167158 : set->allocChunkLimit >>= 1;
520 :
521 : /* Finally, do the type-independent part of context creation */
522 2202032 : MemoryContextCreate((MemoryContext) set,
523 : T_AllocSetContext,
524 : MCTX_ASET_ID,
525 : parent,
526 : name);
527 :
528 2202032 : ((MemoryContext) set)->mem_allocated = firstBlockSize;
529 :
530 2202032 : return (MemoryContext) set;
531 : }
532 :
533 : /*
534 : * AllocSetReset
535 : * Frees all memory which is allocated in the given set.
536 : *
537 : * Actually, this routine has some discretion about what to do.
538 : * It should mark all allocated chunks freed, but it need not necessarily
539 : * give back all the resources the set owns. Our actual implementation is
540 : * that we give back all but the "keeper" block (which we must keep, since
541 : * it shares a malloc chunk with the context header). In this way, we don't
542 : * thrash malloc() when a context is repeatedly reset after small allocations,
543 : * which is typical behavior for per-tuple contexts.
544 : */
545 : void
546 27173001 : AllocSetReset(MemoryContext context)
547 : {
548 27173001 : AllocSet set = (AllocSet) context;
549 : AllocBlock block;
550 : Size keepersize PG_USED_FOR_ASSERTS_ONLY;
551 :
552 : Assert(AllocSetIsValid(set));
553 :
554 : #ifdef MEMORY_CONTEXT_CHECKING
555 : /* Check for corruption and leaks before freeing */
556 : AllocSetCheck(context);
557 : #endif
558 :
559 : /* Remember keeper block size for Assert below */
560 27173001 : keepersize = KeeperBlock(set)->endptr - ((char *) set);
561 :
562 : /* Clear chunk freelists */
563 326076012 : MemSetAligned(set->freelist, 0, sizeof(set->freelist));
564 :
565 27173001 : block = set->blocks;
566 :
567 : /* New blocks list will be just the keeper block */
568 27173001 : set->blocks = KeeperBlock(set);
569 :
570 59878694 : while (block != NULL)
571 : {
572 32705693 : AllocBlock next = block->next;
573 :
574 32705693 : if (IsKeeperBlock(set, block))
575 : {
576 : /* Reset the block, but don't return it to malloc */
577 27173001 : char *datastart = ((char *) block) + ALLOC_BLOCKHDRSZ;
578 :
579 : #ifdef CLOBBER_FREED_MEMORY
580 : wipe_mem(datastart, block->freeptr - datastart);
581 : #else
582 : /* wipe_mem() would have done this */
583 : VALGRIND_MAKE_MEM_NOACCESS(datastart, block->freeptr - datastart);
584 : #endif
585 27173001 : block->freeptr = datastart;
586 27173001 : block->prev = NULL;
587 27173001 : block->next = NULL;
588 : }
589 : else
590 : {
591 : /* Normal case, release the block */
592 5532692 : context->mem_allocated -= block->endptr - ((char *) block);
593 :
594 : #ifdef CLOBBER_FREED_MEMORY
595 : wipe_mem(block, block->freeptr - ((char *) block));
596 : #endif
597 :
598 : /*
599 : * We need to free the block header's vchunk explicitly, although
600 : * the user-data vchunks within will go away in the TRIM below.
601 : * Otherwise Valgrind complains about leaked allocations.
602 : */
603 : VALGRIND_MEMPOOL_FREE(set, block);
604 :
605 5532692 : free(block);
606 : }
607 32705693 : block = next;
608 : }
609 :
610 : Assert(context->mem_allocated == keepersize);
611 :
612 : /*
613 : * Instruct Valgrind to throw away all the vchunks associated with this
614 : * context, except for the one covering the AllocSetContext and
615 : * keeper-block header. This gets rid of the vchunks for whatever user
616 : * data is getting discarded by the context reset.
617 : */
618 : VALGRIND_MEMPOOL_TRIM(set, set, FIRST_BLOCKHDRSZ);
619 :
620 : /* Reset block size allocation sequence, too */
621 27173001 : set->nextBlockSize = set->initBlockSize;
622 27173001 : }
623 :
624 : /*
625 : * AllocSetDelete
626 : * Frees all memory which is allocated in the given set,
627 : * in preparation for deletion of the set.
628 : *
629 : * Unlike AllocSetReset, this *must* free all resources of the set.
630 : */
631 : void
632 5008770 : AllocSetDelete(MemoryContext context)
633 : {
634 5008770 : AllocSet set = (AllocSet) context;
635 5008770 : AllocBlock block = set->blocks;
636 : Size keepersize PG_USED_FOR_ASSERTS_ONLY;
637 :
638 : Assert(AllocSetIsValid(set));
639 :
640 : #ifdef MEMORY_CONTEXT_CHECKING
641 : /* Check for corruption and leaks before freeing */
642 : AllocSetCheck(context);
643 : #endif
644 :
645 : /* Remember keeper block size for Assert below */
646 5008770 : keepersize = KeeperBlock(set)->endptr - ((char *) set);
647 :
648 : /*
649 : * If the context is a candidate for a freelist, put it into that freelist
650 : * instead of destroying it.
651 : */
652 5008770 : if (set->freeListIndex >= 0)
653 : {
654 5008770 : AllocSetFreeList *freelist = &context_freelists[set->freeListIndex];
655 :
656 : /*
657 : * Reset the context, if it needs it, so that we aren't hanging on to
658 : * more than the initial malloc chunk.
659 : */
660 5008770 : if (!context->isReset)
661 3112048 : MemoryContextResetOnly(context);
662 :
663 : /*
664 : * If the freelist is full, just discard what's already in it. See
665 : * comments with context_freelists[].
666 : */
667 5008770 : if (freelist->num_free >= MAX_FREE_CONTEXTS)
668 : {
669 32118 : while (freelist->first_free != NULL)
670 : {
671 31800 : AllocSetContext *oldset = freelist->first_free;
672 :
673 31800 : freelist->first_free = (AllocSetContext *) oldset->header.nextchild;
674 31800 : freelist->num_free--;
675 :
676 : /* Destroy the context's vpool --- see notes below */
677 : VALGRIND_DESTROY_MEMPOOL(oldset);
678 :
679 : /* All that remains is to free the header/initial block */
680 31800 : free(oldset);
681 : }
682 : Assert(freelist->num_free == 0);
683 : }
684 :
685 : /* Now add the just-deleted context to the freelist. */
686 5008770 : set->header.nextchild = (MemoryContext) freelist->first_free;
687 5008770 : freelist->first_free = set;
688 5008770 : freelist->num_free++;
689 :
690 5008770 : return;
691 : }
692 :
693 : /* Free all blocks, except the keeper which is part of context header */
694 0 : while (block != NULL)
695 : {
696 0 : AllocBlock next = block->next;
697 :
698 0 : if (!IsKeeperBlock(set, block))
699 0 : context->mem_allocated -= block->endptr - ((char *) block);
700 :
701 : #ifdef CLOBBER_FREED_MEMORY
702 : wipe_mem(block, block->freeptr - ((char *) block));
703 : #endif
704 :
705 0 : if (!IsKeeperBlock(set, block))
706 : {
707 : /* As in AllocSetReset, free block-header vchunks explicitly */
708 : VALGRIND_MEMPOOL_FREE(set, block);
709 0 : free(block);
710 : }
711 :
712 0 : block = next;
713 : }
714 :
715 : Assert(context->mem_allocated == keepersize);
716 :
717 : /*
718 : * Destroy the vpool. We don't seem to need to explicitly free the
719 : * initial block's header vchunk, nor any user-data vchunks that Valgrind
720 : * still knows about; they'll all go away automatically.
721 : */
722 : VALGRIND_DESTROY_MEMPOOL(set);
723 :
724 : /* Finally, free the context header, including the keeper block */
725 0 : free(set);
726 : }
727 :
728 : /*
729 : * Helper for AllocSetAlloc() that allocates an entire block for the chunk.
730 : *
731 : * AllocSetAlloc()'s comment explains why this is separate.
732 : */
733 : pg_noinline
734 : static void *
735 11107238 : AllocSetAllocLarge(MemoryContext context, Size size, int flags)
736 : {
737 11107238 : AllocSet set = (AllocSet) context;
738 : AllocBlock block;
739 : MemoryChunk *chunk;
740 : Size chunk_size;
741 : Size blksize;
742 :
743 : /* validate 'size' is within the limits for the given 'flags' */
744 11107238 : MemoryContextCheckSize(context, size, flags);
745 :
746 : #ifdef MEMORY_CONTEXT_CHECKING
747 : /* ensure there's always space for the sentinel byte */
748 : chunk_size = MAXALIGN(size + 1);
749 : #else
750 11107238 : chunk_size = MAXALIGN(size);
751 : #endif
752 :
753 11107238 : blksize = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
754 11107238 : block = (AllocBlock) malloc(blksize);
755 11107238 : if (block == NULL)
756 0 : return MemoryContextAllocationFailure(context, size, flags);
757 :
758 : /* Make a vchunk covering the new block's header */
759 : VALGRIND_MEMPOOL_ALLOC(set, block, ALLOC_BLOCKHDRSZ);
760 :
761 11107238 : context->mem_allocated += blksize;
762 :
763 11107238 : block->aset = set;
764 11107238 : block->freeptr = block->endptr = ((char *) block) + blksize;
765 :
766 11107238 : chunk = (MemoryChunk *) (((char *) block) + ALLOC_BLOCKHDRSZ);
767 :
768 : /* mark the MemoryChunk as externally managed */
769 11107238 : MemoryChunkSetHdrMaskExternal(chunk, MCTX_ASET_ID);
770 :
771 : #ifdef MEMORY_CONTEXT_CHECKING
772 : chunk->requested_size = size;
773 : /* set mark to catch clobber of "unused" space */
774 : Assert(size < chunk_size);
775 : set_sentinel(MemoryChunkGetPointer(chunk), size);
776 : #endif
777 : #ifdef RANDOMIZE_ALLOCATED_MEMORY
778 : /* fill the allocated space with junk */
779 : randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
780 : #endif
781 :
782 : /*
783 : * Stick the new block underneath the active allocation block, if any, so
784 : * that we don't lose the use of the space remaining therein.
785 : */
786 11107238 : if (set->blocks != NULL)
787 : {
788 11107238 : block->prev = set->blocks;
789 11107238 : block->next = set->blocks->next;
790 11107238 : if (block->next)
791 8642331 : block->next->prev = block;
792 11107238 : set->blocks->next = block;
793 : }
794 : else
795 : {
796 0 : block->prev = NULL;
797 0 : block->next = NULL;
798 0 : set->blocks = block;
799 : }
800 :
801 : /* Ensure any padding bytes are marked NOACCESS. */
802 : VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size,
803 : chunk_size - size);
804 :
805 : /* Disallow access to the chunk header. */
806 : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
807 :
808 11107238 : return MemoryChunkGetPointer(chunk);
809 : }
810 :
811 : /*
812 : * Small helper for allocating a new chunk from a chunk, to avoid duplicating
813 : * the code between AllocSetAlloc() and AllocSetAllocFromNewBlock().
814 : */
815 : static inline void *
816 486879940 : AllocSetAllocChunkFromBlock(MemoryContext context, AllocBlock block,
817 : Size size, Size chunk_size, int fidx)
818 : {
819 : MemoryChunk *chunk;
820 :
821 486879940 : chunk = (MemoryChunk *) (block->freeptr);
822 :
823 : /* Prepare to initialize the chunk header. */
824 : VALGRIND_MAKE_MEM_UNDEFINED(chunk, ALLOC_CHUNKHDRSZ);
825 :
826 486879940 : block->freeptr += (chunk_size + ALLOC_CHUNKHDRSZ);
827 : Assert(block->freeptr <= block->endptr);
828 :
829 : /* store the free list index in the value field */
830 486879940 : MemoryChunkSetHdrMask(chunk, block, fidx, MCTX_ASET_ID);
831 :
832 : #ifdef MEMORY_CONTEXT_CHECKING
833 : chunk->requested_size = size;
834 : /* set mark to catch clobber of "unused" space */
835 : if (size < chunk_size)
836 : set_sentinel(MemoryChunkGetPointer(chunk), size);
837 : #endif
838 : #ifdef RANDOMIZE_ALLOCATED_MEMORY
839 : /* fill the allocated space with junk */
840 : randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
841 : #endif
842 :
843 : /* Ensure any padding bytes are marked NOACCESS. */
844 : VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size,
845 : chunk_size - size);
846 :
847 : /* Disallow access to the chunk header. */
848 : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
849 :
850 486879940 : return MemoryChunkGetPointer(chunk);
851 : }
852 :
853 : /*
854 : * Helper for AllocSetAlloc() that allocates a new block and returns a chunk
855 : * allocated from it.
856 : *
857 : * AllocSetAlloc()'s comment explains why this is separate.
858 : */
859 : pg_noinline
860 : static void *
861 6072636 : AllocSetAllocFromNewBlock(MemoryContext context, Size size, int flags,
862 : int fidx)
863 : {
864 6072636 : AllocSet set = (AllocSet) context;
865 : AllocBlock block;
866 : Size availspace;
867 : Size blksize;
868 : Size required_size;
869 : Size chunk_size;
870 :
871 : /* due to the keeper block set->blocks should always be valid */
872 : Assert(set->blocks != NULL);
873 6072636 : block = set->blocks;
874 6072636 : availspace = block->endptr - block->freeptr;
875 :
876 : /*
877 : * The existing active (top) block does not have enough room for the
878 : * requested allocation, but it might still have a useful amount of space
879 : * in it. Once we push it down in the block list, we'll never try to
880 : * allocate more space from it. So, before we do that, carve up its free
881 : * space into chunks that we can put on the set's freelists.
882 : *
883 : * Because we can only get here when there's less than ALLOC_CHUNK_LIMIT
884 : * left in the block, this loop cannot iterate more than
885 : * ALLOCSET_NUM_FREELISTS-1 times.
886 : */
887 23809881 : while (availspace >= ((1 << ALLOC_MINBITS) + ALLOC_CHUNKHDRSZ))
888 : {
889 : AllocFreeListLink *link;
890 : MemoryChunk *chunk;
891 17737245 : Size availchunk = availspace - ALLOC_CHUNKHDRSZ;
892 17737245 : int a_fidx = AllocSetFreeIndex(availchunk);
893 :
894 : /*
895 : * In most cases, we'll get back the index of the next larger freelist
896 : * than the one we need to put this chunk on. The exception is when
897 : * availchunk is exactly a power of 2.
898 : */
899 17737245 : if (availchunk != GetChunkSizeFromFreeListIdx(a_fidx))
900 : {
901 13735578 : a_fidx--;
902 : Assert(a_fidx >= 0);
903 13735578 : availchunk = GetChunkSizeFromFreeListIdx(a_fidx);
904 : }
905 :
906 17737245 : chunk = (MemoryChunk *) (block->freeptr);
907 :
908 : /* Prepare to initialize the chunk header. */
909 : VALGRIND_MAKE_MEM_UNDEFINED(chunk, ALLOC_CHUNKHDRSZ);
910 17737245 : block->freeptr += (availchunk + ALLOC_CHUNKHDRSZ);
911 17737245 : availspace -= (availchunk + ALLOC_CHUNKHDRSZ);
912 :
913 : /* store the freelist index in the value field */
914 17737245 : MemoryChunkSetHdrMask(chunk, block, a_fidx, MCTX_ASET_ID);
915 : #ifdef MEMORY_CONTEXT_CHECKING
916 : chunk->requested_size = InvalidAllocSize; /* mark it free */
917 : #endif
918 : /* push this chunk onto the free list */
919 17737245 : link = GetFreeListLink(chunk);
920 :
921 : VALGRIND_MAKE_MEM_DEFINED(link, sizeof(AllocFreeListLink));
922 17737245 : link->next = set->freelist[a_fidx];
923 : VALGRIND_MAKE_MEM_NOACCESS(link, sizeof(AllocFreeListLink));
924 :
925 17737245 : set->freelist[a_fidx] = chunk;
926 : }
927 :
928 : /*
929 : * The first such block has size initBlockSize, and we double the space in
930 : * each succeeding block, but not more than maxBlockSize.
931 : */
932 6072636 : blksize = set->nextBlockSize;
933 6072636 : set->nextBlockSize <<= 1;
934 6072636 : if (set->nextBlockSize > set->maxBlockSize)
935 384570 : set->nextBlockSize = set->maxBlockSize;
936 :
937 : /* Choose the actual chunk size to allocate */
938 6072636 : chunk_size = GetChunkSizeFromFreeListIdx(fidx);
939 : Assert(chunk_size >= size);
940 :
941 : /*
942 : * If initBlockSize is less than ALLOC_CHUNK_LIMIT, we could need more
943 : * space... but try to keep it a power of 2.
944 : */
945 6072636 : required_size = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
946 9033766 : while (blksize < required_size)
947 2961130 : blksize <<= 1;
948 :
949 : /* Try to allocate it */
950 6072636 : block = (AllocBlock) malloc(blksize);
951 :
952 : /*
953 : * We could be asking for pretty big blocks here, so cope if malloc fails.
954 : * But give up if there's less than 1 MB or so available...
955 : */
956 6072636 : while (block == NULL && blksize > 1024 * 1024)
957 : {
958 0 : blksize >>= 1;
959 0 : if (blksize < required_size)
960 0 : break;
961 0 : block = (AllocBlock) malloc(blksize);
962 : }
963 :
964 6072636 : if (block == NULL)
965 0 : return MemoryContextAllocationFailure(context, size, flags);
966 :
967 : /* Make a vchunk covering the new block's header */
968 : VALGRIND_MEMPOOL_ALLOC(set, block, ALLOC_BLOCKHDRSZ);
969 :
970 6072636 : context->mem_allocated += blksize;
971 :
972 6072636 : block->aset = set;
973 6072636 : block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
974 6072636 : block->endptr = ((char *) block) + blksize;
975 :
976 : /* Mark unallocated space NOACCESS. */
977 : VALGRIND_MAKE_MEM_NOACCESS(block->freeptr,
978 : blksize - ALLOC_BLOCKHDRSZ);
979 :
980 6072636 : block->prev = NULL;
981 6072636 : block->next = set->blocks;
982 6072636 : if (block->next)
983 6072636 : block->next->prev = block;
984 6072636 : set->blocks = block;
985 :
986 6072636 : return AllocSetAllocChunkFromBlock(context, block, size, chunk_size, fidx);
987 : }
988 :
989 : /*
990 : * AllocSetAlloc
991 : * Returns a pointer to allocated memory of given size or raises an ERROR
992 : * on allocation failure, or returns NULL when flags contains
993 : * MCXT_ALLOC_NO_OOM.
994 : *
995 : * No request may exceed:
996 : * MAXALIGN_DOWN(SIZE_MAX) - ALLOC_BLOCKHDRSZ - ALLOC_CHUNKHDRSZ
997 : * All callers use a much-lower limit.
998 : *
999 : * Note: when using valgrind, it doesn't matter how the returned allocation
1000 : * is marked, as mcxt.c will set it to UNDEFINED. In some paths we will
1001 : * return space that is marked NOACCESS - AllocSetRealloc has to beware!
1002 : *
1003 : * This function should only contain the most common code paths. Everything
1004 : * else should be in pg_noinline helper functions, thus avoiding the overhead
1005 : * of creating a stack frame for the common cases. Allocating memory is often
1006 : * a bottleneck in many workloads, so avoiding stack frame setup is
1007 : * worthwhile. Helper functions should always directly return the newly
1008 : * allocated memory so that we can just return that address directly as a tail
1009 : * call.
1010 : */
1011 : void *
1012 710554306 : AllocSetAlloc(MemoryContext context, Size size, int flags)
1013 : {
1014 710554306 : AllocSet set = (AllocSet) context;
1015 : AllocBlock block;
1016 : MemoryChunk *chunk;
1017 : int fidx;
1018 : Size chunk_size;
1019 : Size availspace;
1020 :
1021 : Assert(AllocSetIsValid(set));
1022 :
1023 : /* due to the keeper block set->blocks should never be NULL */
1024 : Assert(set->blocks != NULL);
1025 :
1026 : /*
1027 : * If requested size exceeds maximum for chunks we hand the request off to
1028 : * AllocSetAllocLarge().
1029 : */
1030 710554306 : if (size > set->allocChunkLimit)
1031 11107238 : return AllocSetAllocLarge(context, size, flags);
1032 :
1033 : /*
1034 : * Request is small enough to be treated as a chunk. Look in the
1035 : * corresponding free list to see if there is a free chunk we could reuse.
1036 : * If one is found, remove it from the free list, make it again a member
1037 : * of the alloc set and return its data address.
1038 : *
1039 : * Note that we don't attempt to ensure there's space for the sentinel
1040 : * byte here. We expect a large proportion of allocations to be for sizes
1041 : * which are already a power of 2. If we were to always make space for a
1042 : * sentinel byte in MEMORY_CONTEXT_CHECKING builds, then we'd end up
1043 : * doubling the memory requirements for such allocations.
1044 : */
1045 699447068 : fidx = AllocSetFreeIndex(size);
1046 699447068 : chunk = set->freelist[fidx];
1047 699447068 : if (chunk != NULL)
1048 : {
1049 212567128 : AllocFreeListLink *link = GetFreeListLink(chunk);
1050 :
1051 : /* Allow access to the chunk header. */
1052 : VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOC_CHUNKHDRSZ);
1053 :
1054 : Assert(fidx == MemoryChunkGetValue(chunk));
1055 :
1056 : /* pop this chunk off the freelist */
1057 : VALGRIND_MAKE_MEM_DEFINED(link, sizeof(AllocFreeListLink));
1058 212567128 : set->freelist[fidx] = link->next;
1059 : VALGRIND_MAKE_MEM_NOACCESS(link, sizeof(AllocFreeListLink));
1060 :
1061 : #ifdef MEMORY_CONTEXT_CHECKING
1062 : chunk->requested_size = size;
1063 : /* set mark to catch clobber of "unused" space */
1064 : if (size < GetChunkSizeFromFreeListIdx(fidx))
1065 : set_sentinel(MemoryChunkGetPointer(chunk), size);
1066 : #endif
1067 : #ifdef RANDOMIZE_ALLOCATED_MEMORY
1068 : /* fill the allocated space with junk */
1069 : randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
1070 : #endif
1071 :
1072 : /* Ensure any padding bytes are marked NOACCESS. */
1073 : VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size,
1074 : GetChunkSizeFromFreeListIdx(fidx) - size);
1075 :
1076 : /* Disallow access to the chunk header. */
1077 : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
1078 :
1079 212567128 : return MemoryChunkGetPointer(chunk);
1080 : }
1081 :
1082 : /*
1083 : * Choose the actual chunk size to allocate.
1084 : */
1085 486879940 : chunk_size = GetChunkSizeFromFreeListIdx(fidx);
1086 : Assert(chunk_size >= size);
1087 :
1088 486879940 : block = set->blocks;
1089 486879940 : availspace = block->endptr - block->freeptr;
1090 :
1091 : /*
1092 : * If there is enough room in the active allocation block, we will put the
1093 : * chunk into that block. Else must start a new one.
1094 : */
1095 486879940 : if (unlikely(availspace < (chunk_size + ALLOC_CHUNKHDRSZ)))
1096 6072636 : return AllocSetAllocFromNewBlock(context, size, flags, fidx);
1097 :
1098 : /* There's enough space on the current block, so allocate from that */
1099 480807304 : return AllocSetAllocChunkFromBlock(context, block, size, chunk_size, fidx);
1100 : }
1101 :
1102 : /*
1103 : * AllocSetFree
1104 : * Frees allocated memory; memory is removed from the set.
1105 : */
1106 : void
1107 262099455 : AllocSetFree(void *pointer)
1108 : {
1109 : AllocSet set;
1110 262099455 : MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
1111 :
1112 : /* Allow access to the chunk header. */
1113 : VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOC_CHUNKHDRSZ);
1114 :
1115 262099455 : if (MemoryChunkIsExternal(chunk))
1116 : {
1117 : /* Release single-chunk block. */
1118 10384851 : AllocBlock block = ExternalChunkGetBlock(chunk);
1119 :
1120 : /*
1121 : * Try to verify that we have a sane block pointer: the block header
1122 : * should reference an aset and the freeptr should match the endptr.
1123 : */
1124 10384851 : if (!AllocBlockIsValid(block) || block->freeptr != block->endptr)
1125 0 : elog(ERROR, "could not find block containing chunk %p", chunk);
1126 :
1127 10384851 : set = block->aset;
1128 :
1129 : #ifdef MEMORY_CONTEXT_CHECKING
1130 : {
1131 : /* Test for someone scribbling on unused space in chunk */
1132 : Assert(chunk->requested_size < (block->endptr - (char *) pointer));
1133 : if (!sentinel_ok(pointer, chunk->requested_size))
1134 : elog(WARNING, "detected write past chunk end in %s %p",
1135 : set->header.name, chunk);
1136 : }
1137 : #endif
1138 :
1139 : /* OK, remove block from aset's list and free it */
1140 10384851 : if (block->prev)
1141 10384851 : block->prev->next = block->next;
1142 : else
1143 0 : set->blocks = block->next;
1144 10384851 : if (block->next)
1145 8115168 : block->next->prev = block->prev;
1146 :
1147 10384851 : set->header.mem_allocated -= block->endptr - ((char *) block);
1148 :
1149 : #ifdef CLOBBER_FREED_MEMORY
1150 : wipe_mem(block, block->freeptr - ((char *) block));
1151 : #endif
1152 :
1153 : /* As in AllocSetReset, free block-header vchunks explicitly */
1154 : VALGRIND_MEMPOOL_FREE(set, block);
1155 :
1156 10384851 : free(block);
1157 : }
1158 : else
1159 : {
1160 251714604 : AllocBlock block = MemoryChunkGetBlock(chunk);
1161 : int fidx;
1162 : AllocFreeListLink *link;
1163 :
1164 : /*
1165 : * In this path, for speed reasons we just Assert that the referenced
1166 : * block is good. We can also Assert that the value field is sane.
1167 : * Future field experience may show that these Asserts had better
1168 : * become regular runtime test-and-elog checks.
1169 : */
1170 : Assert(AllocBlockIsValid(block));
1171 251714604 : set = block->aset;
1172 :
1173 251714604 : fidx = MemoryChunkGetValue(chunk);
1174 : Assert(FreeListIdxIsValid(fidx));
1175 251714604 : link = GetFreeListLink(chunk);
1176 :
1177 : #ifdef MEMORY_CONTEXT_CHECKING
1178 : /* Test for someone scribbling on unused space in chunk */
1179 : if (chunk->requested_size < GetChunkSizeFromFreeListIdx(fidx))
1180 : if (!sentinel_ok(pointer, chunk->requested_size))
1181 : elog(WARNING, "detected write past chunk end in %s %p",
1182 : set->header.name, chunk);
1183 : #endif
1184 :
1185 : #ifdef CLOBBER_FREED_MEMORY
1186 : wipe_mem(pointer, GetChunkSizeFromFreeListIdx(fidx));
1187 : #endif
1188 : /* push this chunk onto the top of the free list */
1189 : VALGRIND_MAKE_MEM_DEFINED(link, sizeof(AllocFreeListLink));
1190 251714604 : link->next = set->freelist[fidx];
1191 : VALGRIND_MAKE_MEM_NOACCESS(link, sizeof(AllocFreeListLink));
1192 251714604 : set->freelist[fidx] = chunk;
1193 :
1194 : #ifdef MEMORY_CONTEXT_CHECKING
1195 :
1196 : /*
1197 : * Reset requested_size to InvalidAllocSize in chunks that are on free
1198 : * list.
1199 : */
1200 : chunk->requested_size = InvalidAllocSize;
1201 : #endif
1202 : }
1203 262099455 : }
1204 :
1205 : /*
1206 : * AllocSetRealloc
1207 : * Returns new pointer to allocated memory of given size or NULL if
1208 : * request could not be completed; this memory is added to the set.
1209 : * Memory associated with given pointer is copied into the new memory,
1210 : * and the old memory is freed.
1211 : *
1212 : * Without MEMORY_CONTEXT_CHECKING, we don't know the old request size. This
1213 : * makes our Valgrind client requests less-precise, hazarding false negatives.
1214 : * (In principle, we could use VALGRIND_GET_VBITS() to rediscover the old
1215 : * request size.)
1216 : */
1217 : void *
1218 4775616 : AllocSetRealloc(void *pointer, Size size, int flags)
1219 : {
1220 : AllocBlock block;
1221 : AllocSet set;
1222 4775616 : MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
1223 : Size oldchksize;
1224 : int fidx;
1225 :
1226 : /* Allow access to the chunk header. */
1227 : VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOC_CHUNKHDRSZ);
1228 :
1229 4775616 : if (MemoryChunkIsExternal(chunk))
1230 : {
1231 : /*
1232 : * The chunk must have been allocated as a single-chunk block. Use
1233 : * realloc() to make the containing block bigger, or smaller, with
1234 : * minimum space wastage.
1235 : */
1236 : AllocBlock newblock;
1237 : Size chksize;
1238 : Size blksize;
1239 : Size oldblksize;
1240 :
1241 146437 : block = ExternalChunkGetBlock(chunk);
1242 :
1243 : /*
1244 : * Try to verify that we have a sane block pointer: the block header
1245 : * should reference an aset and the freeptr should match the endptr.
1246 : */
1247 146437 : if (!AllocBlockIsValid(block) || block->freeptr != block->endptr)
1248 0 : elog(ERROR, "could not find block containing chunk %p", chunk);
1249 :
1250 146437 : set = block->aset;
1251 :
1252 : /* only check size in paths where the limits could be hit */
1253 146437 : MemoryContextCheckSize((MemoryContext) set, size, flags);
1254 :
1255 146437 : oldchksize = block->endptr - (char *) pointer;
1256 :
1257 : #ifdef MEMORY_CONTEXT_CHECKING
1258 : /* Test for someone scribbling on unused space in chunk */
1259 : Assert(chunk->requested_size < oldchksize);
1260 : if (!sentinel_ok(pointer, chunk->requested_size))
1261 : elog(WARNING, "detected write past chunk end in %s %p",
1262 : set->header.name, chunk);
1263 : #endif
1264 :
1265 : #ifdef MEMORY_CONTEXT_CHECKING
1266 : /* ensure there's always space for the sentinel byte */
1267 : chksize = MAXALIGN(size + 1);
1268 : #else
1269 146437 : chksize = MAXALIGN(size);
1270 : #endif
1271 :
1272 : /* Do the realloc */
1273 146437 : blksize = chksize + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
1274 146437 : oldblksize = block->endptr - ((char *) block);
1275 :
1276 146437 : newblock = (AllocBlock) realloc(block, blksize);
1277 146437 : if (newblock == NULL)
1278 : {
1279 : /* Disallow access to the chunk header. */
1280 : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
1281 0 : return MemoryContextAllocationFailure(&set->header, size, flags);
1282 : }
1283 :
1284 : /*
1285 : * Move the block-header vchunk explicitly. (mcxt.c will take care of
1286 : * moving the vchunk for the user data.)
1287 : */
1288 : VALGRIND_MEMPOOL_CHANGE(set, block, newblock, ALLOC_BLOCKHDRSZ);
1289 146437 : block = newblock;
1290 :
1291 : /* updated separately, not to underflow when (oldblksize > blksize) */
1292 146437 : set->header.mem_allocated -= oldblksize;
1293 146437 : set->header.mem_allocated += blksize;
1294 :
1295 146437 : block->freeptr = block->endptr = ((char *) block) + blksize;
1296 :
1297 : /* Update pointers since block has likely been moved */
1298 146437 : chunk = (MemoryChunk *) (((char *) block) + ALLOC_BLOCKHDRSZ);
1299 146437 : pointer = MemoryChunkGetPointer(chunk);
1300 146437 : if (block->prev)
1301 146437 : block->prev->next = block;
1302 : else
1303 0 : set->blocks = block;
1304 146437 : if (block->next)
1305 133223 : block->next->prev = block;
1306 :
1307 : #ifdef MEMORY_CONTEXT_CHECKING
1308 : #ifdef RANDOMIZE_ALLOCATED_MEMORY
1309 :
1310 : /*
1311 : * We can only randomize the extra space if we know the prior request.
1312 : * When using Valgrind, randomize_mem() also marks memory UNDEFINED.
1313 : */
1314 : if (size > chunk->requested_size)
1315 : randomize_mem((char *) pointer + chunk->requested_size,
1316 : size - chunk->requested_size);
1317 : #else
1318 :
1319 : /*
1320 : * If this is an increase, realloc() will have marked any
1321 : * newly-allocated part (from oldchksize to chksize) UNDEFINED, but we
1322 : * also need to adjust trailing bytes from the old allocation (from
1323 : * chunk->requested_size to oldchksize) as they are marked NOACCESS.
1324 : * Make sure not to mark too many bytes in case chunk->requested_size
1325 : * < size < oldchksize.
1326 : */
1327 : #ifdef USE_VALGRIND
1328 : if (Min(size, oldchksize) > chunk->requested_size)
1329 : VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + chunk->requested_size,
1330 : Min(size, oldchksize) - chunk->requested_size);
1331 : #endif
1332 : #endif
1333 :
1334 : chunk->requested_size = size;
1335 : /* set mark to catch clobber of "unused" space */
1336 : Assert(size < chksize);
1337 : set_sentinel(pointer, size);
1338 : #else /* !MEMORY_CONTEXT_CHECKING */
1339 :
1340 : /*
1341 : * We may need to adjust marking of bytes from the old allocation as
1342 : * some of them may be marked NOACCESS. We don't know how much of the
1343 : * old chunk size was the requested size; it could have been as small
1344 : * as one byte. We have to be conservative and just mark the entire
1345 : * old portion DEFINED. Make sure not to mark memory beyond the new
1346 : * allocation in case it's smaller than the old one.
1347 : */
1348 : VALGRIND_MAKE_MEM_DEFINED(pointer, Min(size, oldchksize));
1349 : #endif
1350 :
1351 : /* Ensure any padding bytes are marked NOACCESS. */
1352 : VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size, chksize - size);
1353 :
1354 : /* Disallow access to the chunk header. */
1355 : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
1356 :
1357 146437 : return pointer;
1358 : }
1359 :
1360 4629179 : block = MemoryChunkGetBlock(chunk);
1361 :
1362 : /*
1363 : * In this path, for speed reasons we just Assert that the referenced
1364 : * block is good. We can also Assert that the value field is sane. Future
1365 : * field experience may show that these Asserts had better become regular
1366 : * runtime test-and-elog checks.
1367 : */
1368 : Assert(AllocBlockIsValid(block));
1369 4629179 : set = block->aset;
1370 :
1371 4629179 : fidx = MemoryChunkGetValue(chunk);
1372 : Assert(FreeListIdxIsValid(fidx));
1373 4629179 : oldchksize = GetChunkSizeFromFreeListIdx(fidx);
1374 :
1375 : #ifdef MEMORY_CONTEXT_CHECKING
1376 : /* Test for someone scribbling on unused space in chunk */
1377 : if (chunk->requested_size < oldchksize)
1378 : if (!sentinel_ok(pointer, chunk->requested_size))
1379 : elog(WARNING, "detected write past chunk end in %s %p",
1380 : set->header.name, chunk);
1381 : #endif
1382 :
1383 : /*
1384 : * Chunk sizes are aligned to power of 2 in AllocSetAlloc(). Maybe the
1385 : * allocated area already is >= the new size. (In particular, we will
1386 : * fall out here if the requested size is a decrease.)
1387 : */
1388 4629179 : if (oldchksize >= size)
1389 : {
1390 : #ifdef MEMORY_CONTEXT_CHECKING
1391 : Size oldrequest = chunk->requested_size;
1392 :
1393 : #ifdef RANDOMIZE_ALLOCATED_MEMORY
1394 : /* We can only fill the extra space if we know the prior request */
1395 : if (size > oldrequest)
1396 : randomize_mem((char *) pointer + oldrequest,
1397 : size - oldrequest);
1398 : #endif
1399 :
1400 : chunk->requested_size = size;
1401 :
1402 : /*
1403 : * If this is an increase, mark any newly-available part UNDEFINED.
1404 : * Otherwise, mark the obsolete part NOACCESS.
1405 : */
1406 : if (size > oldrequest)
1407 : VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest,
1408 : size - oldrequest);
1409 : else
1410 : VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size,
1411 : oldchksize - size);
1412 :
1413 : /* set mark to catch clobber of "unused" space */
1414 : if (size < oldchksize)
1415 : set_sentinel(pointer, size);
1416 : #else /* !MEMORY_CONTEXT_CHECKING */
1417 :
1418 : /*
1419 : * We don't have the information to determine whether we're growing
1420 : * the old request or shrinking it, so we conservatively mark the
1421 : * entire new allocation DEFINED.
1422 : */
1423 : VALGRIND_MAKE_MEM_NOACCESS(pointer, oldchksize);
1424 : VALGRIND_MAKE_MEM_DEFINED(pointer, size);
1425 : #endif
1426 :
1427 : /* Disallow access to the chunk header. */
1428 : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
1429 :
1430 1320700 : return pointer;
1431 : }
1432 : else
1433 : {
1434 : /*
1435 : * Enlarge-a-small-chunk case. We just do this by brute force, ie,
1436 : * allocate a new chunk and copy the data. Since we know the existing
1437 : * data isn't huge, this won't involve any great memcpy expense, so
1438 : * it's not worth being smarter. (At one time we tried to avoid
1439 : * memcpy when it was possible to enlarge the chunk in-place, but that
1440 : * turns out to misbehave unpleasantly for repeated cycles of
1441 : * palloc/repalloc/pfree: the eventually freed chunks go into the
1442 : * wrong freelist for the next initial palloc request, and so we leak
1443 : * memory indefinitely. See pgsql-hackers archives for 2007-08-11.)
1444 : */
1445 : AllocPointer newPointer;
1446 : Size oldsize;
1447 :
1448 : /* allocate new chunk (this also checks size is valid) */
1449 3308479 : newPointer = AllocSetAlloc((MemoryContext) set, size, flags);
1450 :
1451 : /* leave immediately if request was not completed */
1452 3308479 : if (newPointer == NULL)
1453 : {
1454 : /* Disallow access to the chunk header. */
1455 : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
1456 0 : return MemoryContextAllocationFailure((MemoryContext) set, size, flags);
1457 : }
1458 :
1459 : /*
1460 : * AllocSetAlloc() may have returned a region that is still NOACCESS.
1461 : * Change it to UNDEFINED for the moment; memcpy() will then transfer
1462 : * definedness from the old allocation to the new. If we know the old
1463 : * allocation, copy just that much. Otherwise, make the entire old
1464 : * chunk defined to avoid errors as we copy the currently-NOACCESS
1465 : * trailing bytes.
1466 : */
1467 : VALGRIND_MAKE_MEM_UNDEFINED(newPointer, size);
1468 : #ifdef MEMORY_CONTEXT_CHECKING
1469 : oldsize = chunk->requested_size;
1470 : #else
1471 3308479 : oldsize = oldchksize;
1472 : VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
1473 : #endif
1474 :
1475 : /* transfer existing data (certain to fit) */
1476 3308479 : memcpy(newPointer, pointer, oldsize);
1477 :
1478 : /* free old chunk */
1479 3308479 : AllocSetFree(pointer);
1480 :
1481 3308479 : return newPointer;
1482 : }
1483 : }
1484 :
1485 : /*
1486 : * AllocSetGetChunkContext
1487 : * Return the MemoryContext that 'pointer' belongs to.
1488 : */
1489 : MemoryContext
1490 1923183 : AllocSetGetChunkContext(void *pointer)
1491 : {
1492 1923183 : MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
1493 : AllocBlock block;
1494 : AllocSet set;
1495 :
1496 : /* Allow access to the chunk header. */
1497 : VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOC_CHUNKHDRSZ);
1498 :
1499 1923183 : if (MemoryChunkIsExternal(chunk))
1500 0 : block = ExternalChunkGetBlock(chunk);
1501 : else
1502 1923183 : block = (AllocBlock) MemoryChunkGetBlock(chunk);
1503 :
1504 : /* Disallow access to the chunk header. */
1505 : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
1506 :
1507 : Assert(AllocBlockIsValid(block));
1508 1923183 : set = block->aset;
1509 :
1510 1923183 : return &set->header;
1511 : }
1512 :
1513 : /*
1514 : * AllocSetGetChunkSpace
1515 : * Given a currently-allocated chunk, determine the total space
1516 : * it occupies (including all memory-allocation overhead).
1517 : */
1518 : Size
1519 4425085 : AllocSetGetChunkSpace(void *pointer)
1520 : {
1521 4425085 : MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
1522 : int fidx;
1523 :
1524 : /* Allow access to the chunk header. */
1525 : VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOC_CHUNKHDRSZ);
1526 :
1527 4425085 : if (MemoryChunkIsExternal(chunk))
1528 : {
1529 498816 : AllocBlock block = ExternalChunkGetBlock(chunk);
1530 :
1531 : /* Disallow access to the chunk header. */
1532 : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
1533 :
1534 : Assert(AllocBlockIsValid(block));
1535 :
1536 498816 : return block->endptr - (char *) chunk;
1537 : }
1538 :
1539 3926269 : fidx = MemoryChunkGetValue(chunk);
1540 : Assert(FreeListIdxIsValid(fidx));
1541 :
1542 : /* Disallow access to the chunk header. */
1543 : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
1544 :
1545 3926269 : return GetChunkSizeFromFreeListIdx(fidx) + ALLOC_CHUNKHDRSZ;
1546 : }
1547 :
1548 : /*
1549 : * AllocSetIsEmpty
1550 : * Is an allocset empty of any allocated space?
1551 : */
1552 : bool
1553 5385 : AllocSetIsEmpty(MemoryContext context)
1554 : {
1555 : Assert(AllocSetIsValid(context));
1556 :
1557 : /*
1558 : * For now, we say "empty" only if the context is new or just reset. We
1559 : * could examine the freelists to determine if all space has been freed,
1560 : * but it's not really worth the trouble for present uses of this
1561 : * functionality.
1562 : */
1563 5385 : if (context->isReset)
1564 5370 : return true;
1565 15 : return false;
1566 : }
1567 :
1568 : /*
1569 : * AllocSetStats
1570 : * Compute stats about memory consumption of an allocset.
1571 : *
1572 : * printfunc: if not NULL, pass a human-readable stats string to this.
1573 : * passthru: pass this pointer through to printfunc.
1574 : * totals: if not NULL, add stats about this context into *totals.
1575 : * print_to_stderr: print stats to stderr if true, elog otherwise.
1576 : */
1577 : void
1578 2243 : AllocSetStats(MemoryContext context,
1579 : MemoryStatsPrintFunc printfunc, void *passthru,
1580 : MemoryContextCounters *totals, bool print_to_stderr)
1581 : {
1582 2243 : AllocSet set = (AllocSet) context;
1583 2243 : Size nblocks = 0;
1584 2243 : Size freechunks = 0;
1585 : Size totalspace;
1586 2243 : Size freespace = 0;
1587 : AllocBlock block;
1588 : int fidx;
1589 :
1590 : Assert(AllocSetIsValid(set));
1591 :
1592 : /* Include context header in totalspace */
1593 2243 : totalspace = MAXALIGN(sizeof(AllocSetContext));
1594 :
1595 6276 : for (block = set->blocks; block != NULL; block = block->next)
1596 : {
1597 4033 : nblocks++;
1598 4033 : totalspace += block->endptr - ((char *) block);
1599 4033 : freespace += block->endptr - block->freeptr;
1600 : }
1601 26916 : for (fidx = 0; fidx < ALLOCSET_NUM_FREELISTS; fidx++)
1602 : {
1603 24673 : Size chksz = GetChunkSizeFromFreeListIdx(fidx);
1604 24673 : MemoryChunk *chunk = set->freelist[fidx];
1605 :
1606 33626 : while (chunk != NULL)
1607 : {
1608 8953 : AllocFreeListLink *link = GetFreeListLink(chunk);
1609 :
1610 : /* Allow access to the chunk header. */
1611 : VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOC_CHUNKHDRSZ);
1612 : Assert(MemoryChunkGetValue(chunk) == fidx);
1613 : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
1614 :
1615 8953 : freechunks++;
1616 8953 : freespace += chksz + ALLOC_CHUNKHDRSZ;
1617 :
1618 : VALGRIND_MAKE_MEM_DEFINED(link, sizeof(AllocFreeListLink));
1619 8953 : chunk = link->next;
1620 : VALGRIND_MAKE_MEM_NOACCESS(link, sizeof(AllocFreeListLink));
1621 : }
1622 : }
1623 :
1624 2243 : if (printfunc)
1625 : {
1626 : char stats_string[200];
1627 :
1628 813 : snprintf(stats_string, sizeof(stats_string),
1629 : "%zu total in %zu blocks; %zu free (%zu chunks); %zu used",
1630 : totalspace, nblocks, freespace, freechunks,
1631 : totalspace - freespace);
1632 813 : printfunc(context, passthru, stats_string, print_to_stderr);
1633 : }
1634 :
1635 2243 : if (totals)
1636 : {
1637 2243 : totals->nblocks += nblocks;
1638 2243 : totals->freechunks += freechunks;
1639 2243 : totals->totalspace += totalspace;
1640 2243 : totals->freespace += freespace;
1641 : }
1642 2243 : }
1643 :
1644 :
1645 : #ifdef MEMORY_CONTEXT_CHECKING
1646 :
1647 : /*
1648 : * AllocSetCheck
1649 : * Walk through chunks and check consistency of memory.
1650 : *
1651 : * NOTE: report errors as WARNING, *not* ERROR or FATAL. Otherwise you'll
1652 : * find yourself in an infinite loop when trouble occurs, because this
1653 : * routine will be entered again when elog cleanup tries to release memory!
1654 : */
1655 : void
1656 : AllocSetCheck(MemoryContext context)
1657 : {
1658 : AllocSet set = (AllocSet) context;
1659 : const char *name = set->header.name;
1660 : AllocBlock prevblock;
1661 : AllocBlock block;
1662 : Size total_allocated = 0;
1663 :
1664 : for (prevblock = NULL, block = set->blocks;
1665 : block != NULL;
1666 : prevblock = block, block = block->next)
1667 : {
1668 : char *bpoz = ((char *) block) + ALLOC_BLOCKHDRSZ;
1669 : Size blk_used = block->freeptr - bpoz;
1670 : Size blk_data = 0;
1671 : Size nchunks = 0;
1672 : bool has_external_chunk = false;
1673 :
1674 : if (IsKeeperBlock(set, block))
1675 : total_allocated += block->endptr - ((char *) set);
1676 : else
1677 : total_allocated += block->endptr - ((char *) block);
1678 :
1679 : /*
1680 : * Empty block - empty can be keeper-block only
1681 : */
1682 : if (!blk_used)
1683 : {
1684 : if (!IsKeeperBlock(set, block))
1685 : elog(WARNING, "problem in alloc set %s: empty block %p",
1686 : name, block);
1687 : }
1688 :
1689 : /*
1690 : * Check block header fields
1691 : */
1692 : if (block->aset != set ||
1693 : block->prev != prevblock ||
1694 : block->freeptr < bpoz ||
1695 : block->freeptr > block->endptr)
1696 : elog(WARNING, "problem in alloc set %s: corrupt header in block %p",
1697 : name, block);
1698 :
1699 : /*
1700 : * Chunk walker
1701 : */
1702 : while (bpoz < block->freeptr)
1703 : {
1704 : MemoryChunk *chunk = (MemoryChunk *) bpoz;
1705 : Size chsize,
1706 : dsize;
1707 :
1708 : /* Allow access to the chunk header. */
1709 : VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOC_CHUNKHDRSZ);
1710 :
1711 : if (MemoryChunkIsExternal(chunk))
1712 : {
1713 : chsize = block->endptr - (char *) MemoryChunkGetPointer(chunk); /* aligned chunk size */
1714 : has_external_chunk = true;
1715 :
1716 : /* make sure this chunk consumes the entire block */
1717 : if (chsize + ALLOC_CHUNKHDRSZ != blk_used)
1718 : elog(WARNING, "problem in alloc set %s: bad single-chunk %p in block %p",
1719 : name, chunk, block);
1720 : }
1721 : else
1722 : {
1723 : int fidx = MemoryChunkGetValue(chunk);
1724 :
1725 : if (!FreeListIdxIsValid(fidx))
1726 : elog(WARNING, "problem in alloc set %s: bad chunk size for chunk %p in block %p",
1727 : name, chunk, block);
1728 :
1729 : chsize = GetChunkSizeFromFreeListIdx(fidx); /* aligned chunk size */
1730 :
1731 : /*
1732 : * Check the stored block offset correctly references this
1733 : * block.
1734 : */
1735 : if (block != MemoryChunkGetBlock(chunk))
1736 : elog(WARNING, "problem in alloc set %s: bad block offset for chunk %p in block %p",
1737 : name, chunk, block);
1738 : }
1739 : dsize = chunk->requested_size; /* real data */
1740 :
1741 : /* an allocated chunk's requested size must be <= the chsize */
1742 : if (dsize != InvalidAllocSize && dsize > chsize)
1743 : elog(WARNING, "problem in alloc set %s: req size > alloc size for chunk %p in block %p",
1744 : name, chunk, block);
1745 :
1746 : /* chsize must not be smaller than the first freelist's size */
1747 : if (chsize < (1 << ALLOC_MINBITS))
1748 : elog(WARNING, "problem in alloc set %s: bad size %zu for chunk %p in block %p",
1749 : name, chsize, chunk, block);
1750 :
1751 : /*
1752 : * Check for overwrite of padding space in an allocated chunk.
1753 : */
1754 : if (dsize != InvalidAllocSize && dsize < chsize &&
1755 : !sentinel_ok(chunk, ALLOC_CHUNKHDRSZ + dsize))
1756 : elog(WARNING, "problem in alloc set %s: detected write past chunk end in block %p, chunk %p",
1757 : name, block, chunk);
1758 :
1759 : /* if chunk is allocated, disallow access to the chunk header */
1760 : if (dsize != InvalidAllocSize)
1761 : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
1762 :
1763 : blk_data += chsize;
1764 : nchunks++;
1765 :
1766 : bpoz += ALLOC_CHUNKHDRSZ + chsize;
1767 : }
1768 :
1769 : if ((blk_data + (nchunks * ALLOC_CHUNKHDRSZ)) != blk_used)
1770 : elog(WARNING, "problem in alloc set %s: found inconsistent memory block %p",
1771 : name, block);
1772 :
1773 : if (has_external_chunk && nchunks > 1)
1774 : elog(WARNING, "problem in alloc set %s: external chunk on non-dedicated block %p",
1775 : name, block);
1776 : }
1777 :
1778 : Assert(total_allocated == context->mem_allocated);
1779 : }
1780 :
1781 : #endif /* MEMORY_CONTEXT_CHECKING */
|