Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * slab.c
4 : * SLAB allocator definitions.
5 : *
6 : * SLAB is a MemoryContext implementation designed for cases where large
7 : * numbers of equally-sized objects can be allocated and freed efficiently
8 : * with minimal memory wastage and fragmentation.
9 : *
10 : *
11 : * Portions Copyright (c) 2017-2025, PostgreSQL Global Development Group
12 : *
13 : * IDENTIFICATION
14 : * src/backend/utils/mmgr/slab.c
15 : *
16 : *
17 : * NOTE:
18 : * The constant allocation size allows significant simplification and various
19 : * optimizations over more general purpose allocators. The blocks are carved
20 : * into chunks of exactly the right size, wasting only the space required to
21 : * MAXALIGN the allocated chunks.
22 : *
23 : * Slab can also help reduce memory fragmentation in cases where longer-lived
24 : * chunks remain stored on blocks while most of the other chunks have already
25 : * been pfree'd. We give priority to putting new allocations into the
26 : * "fullest" block. This help avoid having too many sparsely used blocks
27 : * around and allows blocks to more easily become completely unused which
28 : * allows them to be eventually free'd.
29 : *
30 : * We identify the "fullest" block to put new allocations on by using a block
31 : * from the lowest populated element of the context's "blocklist" array.
32 : * This is an array of dlists containing blocks which we partition by the
33 : * number of free chunks which block has. Blocks with fewer free chunks are
34 : * stored in a lower indexed dlist array slot. Full blocks go on the 0th
35 : * element of the blocklist array. So that we don't have to have too many
36 : * elements in the array, each dlist in the array is responsible for a range
37 : * of free chunks. When a chunk is palloc'd or pfree'd we may need to move
38 : * the block onto another dlist if the number of free chunks crosses the
39 : * range boundary that the current list is responsible for. Having just a
40 : * few blocklist elements reduces the number of times we must move the block
41 : * onto another dlist element.
42 : *
43 : * We keep track of free chunks within each block by using a block-level free
44 : * list. We consult this list when we allocate a new chunk in the block.
45 : * The free list is a linked list, the head of which is pointed to with
46 : * SlabBlock's freehead field. Each subsequent list item is stored in the
47 : * free chunk's memory. We ensure chunks are large enough to store this
48 : * address.
49 : *
50 : * When we allocate a new block, technically all chunks are free, however, to
51 : * avoid having to write out the entire block to set the linked list for the
52 : * free chunks for every chunk in the block, we instead store a pointer to
53 : * the next "unused" chunk on the block and keep track of how many of these
54 : * unused chunks there are. When a new block is malloc'd, all chunks are
55 : * unused. The unused pointer starts with the first chunk on the block and
56 : * as chunks are allocated, the unused pointer is incremented. As chunks are
57 : * pfree'd, the unused pointer never goes backwards. The unused pointer can
58 : * be thought of as a high watermark for the maximum number of chunks in the
59 : * block which have been in use concurrently. When a chunk is pfree'd the
60 : * chunk is put onto the head of the free list and the unused pointer is not
61 : * changed. We only consume more unused chunks if we run out of free chunks
62 : * on the free list. This method effectively gives priority to using
63 : * previously used chunks over previously unused chunks, which should perform
64 : * better due to CPU caching effects.
65 : *
66 : *-------------------------------------------------------------------------
67 : */
68 :
69 : #include "postgres.h"
70 :
71 : #include "lib/ilist.h"
72 : #include "utils/memdebug.h"
73 : #include "utils/memutils.h"
74 : #include "utils/memutils_internal.h"
75 : #include "utils/memutils_memorychunk.h"
76 :
77 : #define Slab_BLOCKHDRSZ MAXALIGN(sizeof(SlabBlock))
78 :
79 : #ifdef MEMORY_CONTEXT_CHECKING
80 : /*
81 : * Size of the memory required to store the SlabContext.
82 : * MEMORY_CONTEXT_CHECKING builds need some extra memory for the isChunkFree
83 : * array.
84 : */
85 : #define Slab_CONTEXT_HDRSZ(chunksPerBlock) \
86 : (sizeof(SlabContext) + ((chunksPerBlock) * sizeof(bool)))
87 : #else
88 : #define Slab_CONTEXT_HDRSZ(chunksPerBlock) sizeof(SlabContext)
89 : #endif
90 :
91 : /*
92 : * The number of partitions to divide the blocklist into based their number of
93 : * free chunks. There must be at least 2.
94 : */
95 : #define SLAB_BLOCKLIST_COUNT 3
96 :
97 : /* The maximum number of completely empty blocks to keep around for reuse. */
98 : #define SLAB_MAXIMUM_EMPTY_BLOCKS 10
99 :
100 : /*
101 : * SlabContext is a specialized implementation of MemoryContext.
102 : */
103 : typedef struct SlabContext
104 : {
105 : MemoryContextData header; /* Standard memory-context fields */
106 : /* Allocation parameters for this context: */
107 : uint32 chunkSize; /* the requested (non-aligned) chunk size */
108 : uint32 fullChunkSize; /* chunk size with chunk header and alignment */
109 : uint32 blockSize; /* the size to make each block of chunks */
110 : int32 chunksPerBlock; /* number of chunks that fit in 1 block */
111 : int32 curBlocklistIndex; /* index into the blocklist[] element
112 : * containing the fullest, blocks */
113 : #ifdef MEMORY_CONTEXT_CHECKING
114 : bool *isChunkFree; /* array to mark free chunks in a block during
115 : * SlabCheck */
116 : #endif
117 :
118 : int32 blocklist_shift; /* number of bits to shift the nfree count
119 : * by to get the index into blocklist[] */
120 : dclist_head emptyblocks; /* empty blocks to use up first instead of
121 : * mallocing new blocks */
122 :
123 : /*
124 : * Blocks with free space, grouped by the number of free chunks they
125 : * contain. Completely full blocks are stored in the 0th element.
126 : * Completely empty blocks are stored in emptyblocks or free'd if we have
127 : * enough empty blocks already.
128 : */
129 : dlist_head blocklist[SLAB_BLOCKLIST_COUNT];
130 : } SlabContext;
131 :
132 : /*
133 : * SlabBlock
134 : * Structure of a single slab block.
135 : *
136 : * slab: pointer back to the owning MemoryContext
137 : * nfree: number of chunks on the block which are unallocated
138 : * nunused: number of chunks on the block unallocated and not on the block's
139 : * freelist.
140 : * freehead: linked-list header storing a pointer to the first free chunk on
141 : * the block. Subsequent pointers are stored in the chunk's memory. NULL
142 : * indicates the end of the list.
143 : * unused: pointer to the next chunk which has yet to be used.
144 : * node: doubly-linked list node for the context's blocklist
145 : */
146 : typedef struct SlabBlock
147 : {
148 : SlabContext *slab; /* owning context */
149 : int32 nfree; /* number of chunks on free + unused chunks */
150 : int32 nunused; /* number of unused chunks */
151 : MemoryChunk *freehead; /* pointer to the first free chunk */
152 : MemoryChunk *unused; /* pointer to the next unused chunk */
153 : dlist_node node; /* doubly-linked list for blocklist[] */
154 : } SlabBlock;
155 :
156 :
157 : #define Slab_CHUNKHDRSZ sizeof(MemoryChunk)
158 : #define SlabChunkGetPointer(chk) \
159 : ((void *) (((char *) (chk)) + sizeof(MemoryChunk)))
160 :
161 : /*
162 : * SlabBlockGetChunk
163 : * Obtain a pointer to the nth (0-based) chunk in the block
164 : */
165 : #define SlabBlockGetChunk(slab, block, n) \
166 : ((MemoryChunk *) ((char *) (block) + Slab_BLOCKHDRSZ \
167 : + ((n) * (slab)->fullChunkSize)))
168 :
169 : #if defined(MEMORY_CONTEXT_CHECKING) || defined(USE_ASSERT_CHECKING)
170 :
171 : /*
172 : * SlabChunkIndex
173 : * Get the 0-based index of how many chunks into the block the given
174 : * chunk is.
175 : */
176 : #define SlabChunkIndex(slab, block, chunk) \
177 : (((char *) (chunk) - (char *) SlabBlockGetChunk(slab, block, 0)) / \
178 : (slab)->fullChunkSize)
179 :
180 : /*
181 : * SlabChunkMod
182 : * A MemoryChunk should always be at an address which is a multiple of
183 : * fullChunkSize starting from the 0th chunk position. This will return
184 : * non-zero if it's not.
185 : */
186 : #define SlabChunkMod(slab, block, chunk) \
187 : (((char *) (chunk) - (char *) SlabBlockGetChunk(slab, block, 0)) % \
188 : (slab)->fullChunkSize)
189 :
190 : #endif
191 :
192 : /*
193 : * SlabIsValid
194 : * True iff set is a valid slab allocation set.
195 : */
196 : #define SlabIsValid(set) (PointerIsValid(set) && IsA(set, SlabContext))
197 :
198 : /*
199 : * SlabBlockIsValid
200 : * True iff block is a valid block of slab allocation set.
201 : */
202 : #define SlabBlockIsValid(block) \
203 : (PointerIsValid(block) && SlabIsValid((block)->slab))
204 :
205 : /*
206 : * SlabBlocklistIndex
207 : * Determine the blocklist index that a block should be in for the given
208 : * number of free chunks.
209 : */
210 : static inline int32
211 11925370 : SlabBlocklistIndex(SlabContext *slab, int nfree)
212 : {
213 : int32 index;
214 11925370 : int32 blocklist_shift = slab->blocklist_shift;
215 :
216 : Assert(nfree >= 0 && nfree <= slab->chunksPerBlock);
217 :
218 : /*
219 : * Determine the blocklist index based on the number of free chunks. We
220 : * must ensure that 0 free chunks is dedicated to index 0. Everything
221 : * else must be >= 1 and < SLAB_BLOCKLIST_COUNT.
222 : *
223 : * To make this as efficient as possible, we exploit some two's complement
224 : * arithmetic where we reverse the sign before bit shifting. This results
225 : * in an nfree of 0 using index 0 and anything non-zero staying non-zero.
226 : * This is exploiting 0 and -0 being the same in two's complement. When
227 : * we're done, we just need to flip the sign back over again for a
228 : * positive index.
229 : */
230 11925370 : index = -((-nfree) >> blocklist_shift);
231 :
232 : if (nfree == 0)
233 : Assert(index == 0);
234 : else
235 : Assert(index >= 1 && index < SLAB_BLOCKLIST_COUNT);
236 :
237 11925370 : return index;
238 : }
239 :
240 : /*
241 : * SlabFindNextBlockListIndex
242 : * Search blocklist for blocks which have free chunks and return the
243 : * index of the blocklist found containing at least 1 block with free
244 : * chunks. If no block can be found we return 0.
245 : *
246 : * Note: We give priority to fuller blocks so that these are filled before
247 : * emptier blocks. This is done to increase the chances that mostly-empty
248 : * blocks will eventually become completely empty so they can be free'd.
249 : */
250 : static int32
251 199524 : SlabFindNextBlockListIndex(SlabContext *slab)
252 : {
253 : /* start at 1 as blocklist[0] is for full blocks. */
254 343742 : for (int i = 1; i < SLAB_BLOCKLIST_COUNT; i++)
255 : {
256 : /* return the first found non-empty index */
257 277226 : if (!dlist_is_empty(&slab->blocklist[i]))
258 133008 : return i;
259 : }
260 :
261 : /* no blocks with free space */
262 66516 : return 0;
263 : }
264 :
265 : /*
266 : * SlabGetNextFreeChunk
267 : * Return the next free chunk in block and update the block to account
268 : * for the returned chunk now being used.
269 : */
270 : static inline MemoryChunk *
271 3893128 : SlabGetNextFreeChunk(SlabContext *slab, SlabBlock *block)
272 : {
273 : MemoryChunk *chunk;
274 :
275 : Assert(block->nfree > 0);
276 :
277 3893128 : if (block->freehead != NULL)
278 : {
279 3508548 : chunk = block->freehead;
280 :
281 : /*
282 : * Pop the chunk from the linked list of free chunks. The pointer to
283 : * the next free chunk is stored in the chunk itself.
284 : */
285 : VALGRIND_MAKE_MEM_DEFINED(SlabChunkGetPointer(chunk), sizeof(MemoryChunk *));
286 3508548 : block->freehead = *(MemoryChunk **) SlabChunkGetPointer(chunk);
287 :
288 : /* check nothing stomped on the free chunk's memory */
289 : Assert(block->freehead == NULL ||
290 : (block->freehead >= SlabBlockGetChunk(slab, block, 0) &&
291 : block->freehead <= SlabBlockGetChunk(slab, block, slab->chunksPerBlock - 1) &&
292 : SlabChunkMod(slab, block, block->freehead) == 0));
293 : }
294 : else
295 : {
296 : Assert(block->nunused > 0);
297 :
298 384580 : chunk = block->unused;
299 384580 : block->unused = (MemoryChunk *) (((char *) block->unused) + slab->fullChunkSize);
300 384580 : block->nunused--;
301 : }
302 :
303 3893128 : block->nfree--;
304 :
305 3893128 : return chunk;
306 : }
307 :
308 : /*
309 : * SlabContextCreate
310 : * Create a new Slab context.
311 : *
312 : * parent: parent context, or NULL if top-level context
313 : * name: name of context (must be statically allocated)
314 : * blockSize: allocation block size
315 : * chunkSize: allocation chunk size
316 : *
317 : * The Slab_CHUNKHDRSZ + MAXALIGN(chunkSize + 1) may not exceed
318 : * MEMORYCHUNK_MAX_VALUE.
319 : * 'blockSize' may not exceed MEMORYCHUNK_MAX_BLOCKOFFSET.
320 : */
321 : MemoryContext
322 1139900 : SlabContextCreate(MemoryContext parent,
323 : const char *name,
324 : Size blockSize,
325 : Size chunkSize)
326 : {
327 : int chunksPerBlock;
328 : Size fullChunkSize;
329 : SlabContext *slab;
330 : int i;
331 :
332 : /* ensure MemoryChunk's size is properly maxaligned */
333 : StaticAssertDecl(Slab_CHUNKHDRSZ == MAXALIGN(Slab_CHUNKHDRSZ),
334 : "sizeof(MemoryChunk) is not maxaligned");
335 : Assert(blockSize <= MEMORYCHUNK_MAX_BLOCKOFFSET);
336 :
337 : /*
338 : * Ensure there's enough space to store the pointer to the next free chunk
339 : * in the memory of the (otherwise) unused allocation.
340 : */
341 1139900 : if (chunkSize < sizeof(MemoryChunk *))
342 0 : chunkSize = sizeof(MemoryChunk *);
343 :
344 : /* length of the maxaligned chunk including the chunk header */
345 : #ifdef MEMORY_CONTEXT_CHECKING
346 : /* ensure there's always space for the sentinel byte */
347 : fullChunkSize = Slab_CHUNKHDRSZ + MAXALIGN(chunkSize + 1);
348 : #else
349 1139900 : fullChunkSize = Slab_CHUNKHDRSZ + MAXALIGN(chunkSize);
350 : #endif
351 :
352 : Assert(fullChunkSize <= MEMORYCHUNK_MAX_VALUE);
353 :
354 : /* compute the number of chunks that will fit on each block */
355 1139900 : chunksPerBlock = (blockSize - Slab_BLOCKHDRSZ) / fullChunkSize;
356 :
357 : /* Make sure the block can store at least one chunk. */
358 1139900 : if (chunksPerBlock == 0)
359 0 : elog(ERROR, "block size %zu for slab is too small for %zu-byte chunks",
360 : blockSize, chunkSize);
361 :
362 :
363 :
364 1139900 : slab = (SlabContext *) malloc(Slab_CONTEXT_HDRSZ(chunksPerBlock));
365 1139900 : if (slab == NULL)
366 : {
367 0 : MemoryContextStats(TopMemoryContext);
368 0 : ereport(ERROR,
369 : (errcode(ERRCODE_OUT_OF_MEMORY),
370 : errmsg("out of memory"),
371 : errdetail("Failed while creating memory context \"%s\".",
372 : name)));
373 : }
374 :
375 : /*
376 : * Avoid writing code that can fail between here and MemoryContextCreate;
377 : * we'd leak the header if we ereport in this stretch.
378 : */
379 :
380 : /* See comments about Valgrind interactions in aset.c */
381 : VALGRIND_CREATE_MEMPOOL(slab, 0, false);
382 : /* This vchunk covers the SlabContext only */
383 : VALGRIND_MEMPOOL_ALLOC(slab, slab, sizeof(SlabContext));
384 :
385 : /* Fill in SlabContext-specific header fields */
386 1139900 : slab->chunkSize = (uint32) chunkSize;
387 1139900 : slab->fullChunkSize = (uint32) fullChunkSize;
388 1139900 : slab->blockSize = (uint32) blockSize;
389 1139900 : slab->chunksPerBlock = chunksPerBlock;
390 1139900 : slab->curBlocklistIndex = 0;
391 :
392 : /*
393 : * Compute a shift that guarantees that shifting chunksPerBlock with it is
394 : * < SLAB_BLOCKLIST_COUNT - 1. The reason that we subtract 1 from
395 : * SLAB_BLOCKLIST_COUNT in this calculation is that we reserve the 0th
396 : * blocklist element for blocks which have no free chunks.
397 : *
398 : * We calculate the number of bits to shift by rather than a divisor to
399 : * divide by as performing division each time we need to find the
400 : * blocklist index would be much slower.
401 : */
402 1139900 : slab->blocklist_shift = 0;
403 7293642 : while ((slab->chunksPerBlock >> slab->blocklist_shift) >= (SLAB_BLOCKLIST_COUNT - 1))
404 6153742 : slab->blocklist_shift++;
405 :
406 : /* initialize the list to store empty blocks to be reused */
407 1139900 : dclist_init(&slab->emptyblocks);
408 :
409 : /* initialize each blocklist slot */
410 4559600 : for (i = 0; i < SLAB_BLOCKLIST_COUNT; i++)
411 3419700 : dlist_init(&slab->blocklist[i]);
412 :
413 : #ifdef MEMORY_CONTEXT_CHECKING
414 : /* set the isChunkFree pointer right after the end of the context */
415 : slab->isChunkFree = (bool *) ((char *) slab + sizeof(SlabContext));
416 : #endif
417 :
418 : /* Finally, do the type-independent part of context creation */
419 1139900 : MemoryContextCreate((MemoryContext) slab,
420 : T_SlabContext,
421 : MCTX_SLAB_ID,
422 : parent,
423 : name);
424 :
425 1139900 : return (MemoryContext) slab;
426 : }
427 :
428 : /*
429 : * SlabReset
430 : * Frees all memory which is allocated in the given set.
431 : *
432 : * The code simply frees all the blocks in the context - we don't keep any
433 : * keeper blocks or anything like that.
434 : */
435 : void
436 1139088 : SlabReset(MemoryContext context)
437 : {
438 1139088 : SlabContext *slab = (SlabContext *) context;
439 : dlist_mutable_iter miter;
440 : int i;
441 :
442 : Assert(SlabIsValid(slab));
443 :
444 : #ifdef MEMORY_CONTEXT_CHECKING
445 : /* Check for corruption and leaks before freeing */
446 : SlabCheck(context);
447 : #endif
448 :
449 : /* release any retained empty blocks */
450 1141854 : dclist_foreach_modify(miter, &slab->emptyblocks)
451 : {
452 2766 : SlabBlock *block = dlist_container(SlabBlock, node, miter.cur);
453 :
454 2766 : dclist_delete_from(&slab->emptyblocks, miter.cur);
455 :
456 : #ifdef CLOBBER_FREED_MEMORY
457 : wipe_mem(block, slab->blockSize);
458 : #endif
459 :
460 : /* As in aset.c, free block-header vchunks explicitly */
461 : VALGRIND_MEMPOOL_FREE(slab, block);
462 :
463 2766 : free(block);
464 2766 : context->mem_allocated -= slab->blockSize;
465 : }
466 :
467 : /* walk over blocklist and free the blocks */
468 4556352 : for (i = 0; i < SLAB_BLOCKLIST_COUNT; i++)
469 : {
470 3644562 : dlist_foreach_modify(miter, &slab->blocklist[i])
471 : {
472 227298 : SlabBlock *block = dlist_container(SlabBlock, node, miter.cur);
473 :
474 227298 : dlist_delete(miter.cur);
475 :
476 : #ifdef CLOBBER_FREED_MEMORY
477 : wipe_mem(block, slab->blockSize);
478 : #endif
479 :
480 : /* As in aset.c, free block-header vchunks explicitly */
481 : VALGRIND_MEMPOOL_FREE(slab, block);
482 :
483 227298 : free(block);
484 227298 : context->mem_allocated -= slab->blockSize;
485 : }
486 : }
487 :
488 : /*
489 : * Instruct Valgrind to throw away all the vchunks associated with this
490 : * context, except for the one covering the SlabContext. This gets rid of
491 : * the vchunks for whatever user data is getting discarded by the context
492 : * reset.
493 : */
494 : VALGRIND_MEMPOOL_TRIM(slab, slab, sizeof(SlabContext));
495 :
496 1139088 : slab->curBlocklistIndex = 0;
497 :
498 : Assert(context->mem_allocated == 0);
499 1139088 : }
500 :
501 : /*
502 : * SlabDelete
503 : * Free all memory which is allocated in the given context.
504 : */
505 : void
506 1139088 : SlabDelete(MemoryContext context)
507 : {
508 : /* Reset to release all the SlabBlocks */
509 1139088 : SlabReset(context);
510 :
511 : /* Destroy the vpool -- see notes in aset.c */
512 : VALGRIND_DESTROY_MEMPOOL(context);
513 :
514 : /* And free the context header */
515 1139088 : free(context);
516 1139088 : }
517 :
518 : /*
519 : * Small helper for allocating a new chunk from a chunk, to avoid duplicating
520 : * the code between SlabAlloc() and SlabAllocFromNewBlock().
521 : */
522 : static inline void *
523 4127042 : SlabAllocSetupNewChunk(MemoryContext context, SlabBlock *block,
524 : MemoryChunk *chunk, Size size)
525 : {
526 4127042 : SlabContext *slab = (SlabContext *) context;
527 :
528 : /*
529 : * Check that the chunk pointer is actually somewhere on the block and is
530 : * aligned as expected.
531 : */
532 : Assert(chunk >= SlabBlockGetChunk(slab, block, 0));
533 : Assert(chunk <= SlabBlockGetChunk(slab, block, slab->chunksPerBlock - 1));
534 : Assert(SlabChunkMod(slab, block, chunk) == 0);
535 :
536 : /* Prepare to initialize the chunk header. */
537 : VALGRIND_MAKE_MEM_UNDEFINED(chunk, Slab_CHUNKHDRSZ);
538 :
539 4127042 : MemoryChunkSetHdrMask(chunk, block, MAXALIGN(slab->chunkSize), MCTX_SLAB_ID);
540 :
541 : #ifdef MEMORY_CONTEXT_CHECKING
542 : /* slab mark to catch clobber of "unused" space */
543 : Assert(slab->chunkSize < (slab->fullChunkSize - Slab_CHUNKHDRSZ));
544 : set_sentinel(MemoryChunkGetPointer(chunk), size);
545 : VALGRIND_MAKE_MEM_NOACCESS(((char *) chunk) + Slab_CHUNKHDRSZ +
546 : slab->chunkSize,
547 : slab->fullChunkSize -
548 : (slab->chunkSize + Slab_CHUNKHDRSZ));
549 : #endif
550 :
551 : #ifdef RANDOMIZE_ALLOCATED_MEMORY
552 : /* fill the allocated space with junk */
553 : randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
554 : #endif
555 :
556 : /* Disallow access to the chunk header. */
557 : VALGRIND_MAKE_MEM_NOACCESS(chunk, Slab_CHUNKHDRSZ);
558 :
559 4127042 : return MemoryChunkGetPointer(chunk);
560 : }
561 :
562 : pg_noinline
563 : static void *
564 293842 : SlabAllocFromNewBlock(MemoryContext context, Size size, int flags)
565 : {
566 293842 : SlabContext *slab = (SlabContext *) context;
567 : SlabBlock *block;
568 : MemoryChunk *chunk;
569 : dlist_head *blocklist;
570 : int blocklist_idx;
571 :
572 : /* to save allocating a new one, first check the empty blocks list */
573 293842 : if (dclist_count(&slab->emptyblocks) > 0)
574 : {
575 59928 : dlist_node *node = dclist_pop_head_node(&slab->emptyblocks);
576 :
577 59928 : block = dlist_container(SlabBlock, node, node);
578 :
579 : /*
580 : * SlabFree() should have left this block in a valid state with all
581 : * chunks free. Ensure that's the case.
582 : */
583 : Assert(block->nfree == slab->chunksPerBlock);
584 :
585 : /* fetch the next chunk from this block */
586 59928 : chunk = SlabGetNextFreeChunk(slab, block);
587 : }
588 : else
589 : {
590 233914 : block = (SlabBlock *) malloc(slab->blockSize);
591 :
592 233914 : if (unlikely(block == NULL))
593 0 : return MemoryContextAllocationFailure(context, size, flags);
594 :
595 : /* Make a vchunk covering the new block's header */
596 : VALGRIND_MEMPOOL_ALLOC(slab, block, Slab_BLOCKHDRSZ);
597 :
598 233914 : block->slab = slab;
599 233914 : context->mem_allocated += slab->blockSize;
600 :
601 : /* use the first chunk in the new block */
602 233914 : chunk = SlabBlockGetChunk(slab, block, 0);
603 :
604 233914 : block->nfree = slab->chunksPerBlock - 1;
605 233914 : block->unused = SlabBlockGetChunk(slab, block, 1);
606 233914 : block->freehead = NULL;
607 233914 : block->nunused = slab->chunksPerBlock - 1;
608 : }
609 :
610 : /* find the blocklist element for storing blocks with 1 used chunk */
611 293842 : blocklist_idx = SlabBlocklistIndex(slab, block->nfree);
612 293842 : blocklist = &slab->blocklist[blocklist_idx];
613 :
614 : /* this better be empty. We just added a block thinking it was */
615 : Assert(dlist_is_empty(blocklist));
616 :
617 293842 : dlist_push_head(blocklist, &block->node);
618 :
619 293842 : slab->curBlocklistIndex = blocklist_idx;
620 :
621 293842 : return SlabAllocSetupNewChunk(context, block, chunk, size);
622 : }
623 :
624 : /*
625 : * SlabAllocInvalidSize
626 : * Handle raising an ERROR for an invalid size request. We don't do this
627 : * in slab alloc as calling the elog functions would force the compiler
628 : * to setup the stack frame in SlabAlloc. For performance reasons, we
629 : * want to avoid that.
630 : */
631 : pg_noinline
632 : pg_noreturn
633 : static void
634 0 : SlabAllocInvalidSize(MemoryContext context, Size size)
635 : {
636 0 : SlabContext *slab = (SlabContext *) context;
637 :
638 0 : elog(ERROR, "unexpected alloc chunk size %zu (expected %u)", size,
639 : slab->chunkSize);
640 : }
641 :
642 : /*
643 : * SlabAlloc
644 : * Returns a pointer to a newly allocated memory chunk or raises an ERROR
645 : * on allocation failure, or returns NULL when flags contains
646 : * MCXT_ALLOC_NO_OOM. 'size' must be the same size as was specified
647 : * during SlabContextCreate().
648 : *
649 : * This function should only contain the most common code paths. Everything
650 : * else should be in pg_noinline helper functions, thus avoiding the overhead
651 : * of creating a stack frame for the common cases. Allocating memory is often
652 : * a bottleneck in many workloads, so avoiding stack frame setup is
653 : * worthwhile. Helper functions should always directly return the newly
654 : * allocated memory so that we can just return that address directly as a tail
655 : * call.
656 : */
657 : void *
658 4127042 : SlabAlloc(MemoryContext context, Size size, int flags)
659 : {
660 4127042 : SlabContext *slab = (SlabContext *) context;
661 : SlabBlock *block;
662 : MemoryChunk *chunk;
663 :
664 : Assert(SlabIsValid(slab));
665 :
666 : /* sanity check that this is pointing to a valid blocklist */
667 : Assert(slab->curBlocklistIndex >= 0);
668 : Assert(slab->curBlocklistIndex <= SlabBlocklistIndex(slab, slab->chunksPerBlock));
669 :
670 : /*
671 : * Make sure we only allow correct request size. This doubles as the
672 : * MemoryContextCheckSize check.
673 : */
674 4127042 : if (unlikely(size != slab->chunkSize))
675 0 : SlabAllocInvalidSize(context, size);
676 :
677 4127042 : if (unlikely(slab->curBlocklistIndex == 0))
678 : {
679 : /*
680 : * Handle the case when there are no partially filled blocks
681 : * available. This happens either when the last allocation took the
682 : * last chunk in the block, or when SlabFree() free'd the final block.
683 : */
684 293842 : return SlabAllocFromNewBlock(context, size, flags);
685 : }
686 : else
687 : {
688 3833200 : dlist_head *blocklist = &slab->blocklist[slab->curBlocklistIndex];
689 : int new_blocklist_idx;
690 :
691 : Assert(!dlist_is_empty(blocklist));
692 :
693 : /* grab the block from the blocklist */
694 3833200 : block = dlist_head_element(SlabBlock, node, blocklist);
695 :
696 : /* make sure we actually got a valid block, with matching nfree */
697 : Assert(block != NULL);
698 : Assert(slab->curBlocklistIndex == SlabBlocklistIndex(slab, block->nfree));
699 : Assert(block->nfree > 0);
700 :
701 : /* fetch the next chunk from this block */
702 3833200 : chunk = SlabGetNextFreeChunk(slab, block);
703 :
704 : /* get the new blocklist index based on the new free chunk count */
705 3833200 : new_blocklist_idx = SlabBlocklistIndex(slab, block->nfree);
706 :
707 : /*
708 : * Handle the case where the blocklist index changes. This also deals
709 : * with blocks becoming full as only full blocks go at index 0.
710 : */
711 3833200 : if (unlikely(slab->curBlocklistIndex != new_blocklist_idx))
712 : {
713 87920 : dlist_delete_from(blocklist, &block->node);
714 87920 : dlist_push_head(&slab->blocklist[new_blocklist_idx], &block->node);
715 :
716 87920 : if (dlist_is_empty(blocklist))
717 83244 : slab->curBlocklistIndex = SlabFindNextBlockListIndex(slab);
718 : }
719 : }
720 :
721 3833200 : return SlabAllocSetupNewChunk(context, block, chunk, size);
722 : }
723 :
724 : /*
725 : * SlabFree
726 : * Frees allocated memory; memory is removed from the slab.
727 : */
728 : void
729 3899164 : SlabFree(void *pointer)
730 : {
731 3899164 : MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
732 : SlabBlock *block;
733 : SlabContext *slab;
734 : int curBlocklistIdx;
735 : int newBlocklistIdx;
736 :
737 : /* Allow access to the chunk header. */
738 : VALGRIND_MAKE_MEM_DEFINED(chunk, Slab_CHUNKHDRSZ);
739 :
740 3899164 : block = MemoryChunkGetBlock(chunk);
741 :
742 : /*
743 : * For speed reasons we just Assert that the referenced block is good.
744 : * Future field experience may show that this Assert had better become a
745 : * regular runtime test-and-elog check.
746 : */
747 : Assert(SlabBlockIsValid(block));
748 3899164 : slab = block->slab;
749 :
750 : #ifdef MEMORY_CONTEXT_CHECKING
751 : /* Test for someone scribbling on unused space in chunk */
752 : Assert(slab->chunkSize < (slab->fullChunkSize - Slab_CHUNKHDRSZ));
753 : if (!sentinel_ok(pointer, slab->chunkSize))
754 : elog(WARNING, "detected write past chunk end in %s %p",
755 : slab->header.name, chunk);
756 : #endif
757 :
758 : /* push this chunk onto the head of the block's free list */
759 3899164 : *(MemoryChunk **) pointer = block->freehead;
760 3899164 : block->freehead = chunk;
761 :
762 3899164 : block->nfree++;
763 :
764 : Assert(block->nfree > 0);
765 : Assert(block->nfree <= slab->chunksPerBlock);
766 :
767 : #ifdef CLOBBER_FREED_MEMORY
768 : /* don't wipe the free list MemoryChunk pointer stored in the chunk */
769 : wipe_mem((char *) pointer + sizeof(MemoryChunk *),
770 : slab->chunkSize - sizeof(MemoryChunk *));
771 : #endif
772 :
773 3899164 : curBlocklistIdx = SlabBlocklistIndex(slab, block->nfree - 1);
774 3899164 : newBlocklistIdx = SlabBlocklistIndex(slab, block->nfree);
775 :
776 : /*
777 : * Check if the block needs to be moved to another element on the
778 : * blocklist based on it now having 1 more free chunk.
779 : */
780 3899164 : if (unlikely(curBlocklistIdx != newBlocklistIdx))
781 : {
782 : /* do the move */
783 87914 : dlist_delete_from(&slab->blocklist[curBlocklistIdx], &block->node);
784 87914 : dlist_push_head(&slab->blocklist[newBlocklistIdx], &block->node);
785 :
786 : /*
787 : * The blocklist[curBlocklistIdx] may now be empty or we may now be
788 : * able to use a lower-element blocklist. We'll need to redetermine
789 : * what the slab->curBlocklistIndex is if the current blocklist was
790 : * changed or if a lower element one was changed. We must ensure we
791 : * use the list with the fullest block(s).
792 : */
793 87914 : if (slab->curBlocklistIndex >= curBlocklistIdx)
794 : {
795 87914 : slab->curBlocklistIndex = SlabFindNextBlockListIndex(slab);
796 :
797 : /*
798 : * We know there must be a block with at least 1 unused chunk as
799 : * we just pfree'd one. Ensure curBlocklistIndex reflects this.
800 : */
801 : Assert(slab->curBlocklistIndex > 0);
802 : }
803 : }
804 :
805 : /* Handle when a block becomes completely empty */
806 3899164 : if (unlikely(block->nfree == slab->chunksPerBlock))
807 : {
808 : /* remove the block */
809 66456 : dlist_delete_from(&slab->blocklist[newBlocklistIdx], &block->node);
810 :
811 : /*
812 : * To avoid thrashing malloc/free, we keep a list of empty blocks that
813 : * we can reuse again instead of having to malloc a new one.
814 : */
815 66456 : if (dclist_count(&slab->emptyblocks) < SLAB_MAXIMUM_EMPTY_BLOCKS)
816 63534 : dclist_push_head(&slab->emptyblocks, &block->node);
817 : else
818 : {
819 : /*
820 : * When we have enough empty blocks stored already, we actually
821 : * free the block.
822 : */
823 : #ifdef CLOBBER_FREED_MEMORY
824 : wipe_mem(block, slab->blockSize);
825 : #endif
826 :
827 : /* As in aset.c, free block-header vchunks explicitly */
828 : VALGRIND_MEMPOOL_FREE(slab, block);
829 :
830 2922 : free(block);
831 2922 : slab->header.mem_allocated -= slab->blockSize;
832 : }
833 :
834 : /*
835 : * Check if we need to reset the blocklist index. This is required
836 : * when the blocklist this block is on has become completely empty.
837 : */
838 100502 : if (slab->curBlocklistIndex == newBlocklistIdx &&
839 34046 : dlist_is_empty(&slab->blocklist[newBlocklistIdx]))
840 28366 : slab->curBlocklistIndex = SlabFindNextBlockListIndex(slab);
841 : }
842 3899164 : }
843 :
844 : /*
845 : * SlabRealloc
846 : * Change the allocated size of a chunk.
847 : *
848 : * As Slab is designed for allocating equally-sized chunks of memory, it can't
849 : * do an actual chunk size change. We try to be gentle and allow calls with
850 : * exactly the same size, as in that case we can simply return the same
851 : * chunk. When the size differs, we throw an error.
852 : *
853 : * We could also allow requests with size < chunkSize. That however seems
854 : * rather pointless - Slab is meant for chunks of constant size, and moreover
855 : * realloc is usually used to enlarge the chunk.
856 : */
857 : void *
858 0 : SlabRealloc(void *pointer, Size size, int flags)
859 : {
860 0 : MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
861 : SlabBlock *block;
862 : SlabContext *slab;
863 :
864 : /* Allow access to the chunk header. */
865 : VALGRIND_MAKE_MEM_DEFINED(chunk, Slab_CHUNKHDRSZ);
866 :
867 0 : block = MemoryChunkGetBlock(chunk);
868 :
869 : /* Disallow access to the chunk header. */
870 : VALGRIND_MAKE_MEM_NOACCESS(chunk, Slab_CHUNKHDRSZ);
871 :
872 : /*
873 : * Try to verify that we have a sane block pointer: the block header
874 : * should reference a slab context. (We use a test-and-elog, not just
875 : * Assert, because it seems highly likely that we're here in error in the
876 : * first place.)
877 : */
878 0 : if (!SlabBlockIsValid(block))
879 0 : elog(ERROR, "could not find block containing chunk %p", chunk);
880 0 : slab = block->slab;
881 :
882 : /* can't do actual realloc with slab, but let's try to be gentle */
883 0 : if (size == slab->chunkSize)
884 0 : return pointer;
885 :
886 0 : elog(ERROR, "slab allocator does not support realloc()");
887 : return NULL; /* keep compiler quiet */
888 : }
889 :
890 : /*
891 : * SlabGetChunkContext
892 : * Return the MemoryContext that 'pointer' belongs to.
893 : */
894 : MemoryContext
895 0 : SlabGetChunkContext(void *pointer)
896 : {
897 0 : MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
898 : SlabBlock *block;
899 :
900 : /* Allow access to the chunk header. */
901 : VALGRIND_MAKE_MEM_DEFINED(chunk, Slab_CHUNKHDRSZ);
902 :
903 0 : block = MemoryChunkGetBlock(chunk);
904 :
905 : /* Disallow access to the chunk header. */
906 : VALGRIND_MAKE_MEM_NOACCESS(chunk, Slab_CHUNKHDRSZ);
907 :
908 : Assert(SlabBlockIsValid(block));
909 :
910 0 : return &block->slab->header;
911 : }
912 :
913 : /*
914 : * SlabGetChunkSpace
915 : * Given a currently-allocated chunk, determine the total space
916 : * it occupies (including all memory-allocation overhead).
917 : */
918 : Size
919 0 : SlabGetChunkSpace(void *pointer)
920 : {
921 0 : MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
922 : SlabBlock *block;
923 : SlabContext *slab;
924 :
925 : /* Allow access to the chunk header. */
926 : VALGRIND_MAKE_MEM_DEFINED(chunk, Slab_CHUNKHDRSZ);
927 :
928 0 : block = MemoryChunkGetBlock(chunk);
929 :
930 : /* Disallow access to the chunk header. */
931 : VALGRIND_MAKE_MEM_NOACCESS(chunk, Slab_CHUNKHDRSZ);
932 :
933 : Assert(SlabBlockIsValid(block));
934 0 : slab = block->slab;
935 :
936 0 : return slab->fullChunkSize;
937 : }
938 :
939 : /*
940 : * SlabIsEmpty
941 : * Is the slab empty of any allocated space?
942 : */
943 : bool
944 0 : SlabIsEmpty(MemoryContext context)
945 : {
946 : Assert(SlabIsValid((SlabContext *) context));
947 :
948 0 : return (context->mem_allocated == 0);
949 : }
950 :
951 : /*
952 : * SlabStats
953 : * Compute stats about memory consumption of a Slab context.
954 : *
955 : * printfunc: if not NULL, pass a human-readable stats string to this.
956 : * passthru: pass this pointer through to printfunc.
957 : * totals: if not NULL, add stats about this context into *totals.
958 : * print_to_stderr: print stats to stderr if true, elog otherwise.
959 : */
960 : void
961 0 : SlabStats(MemoryContext context,
962 : MemoryStatsPrintFunc printfunc, void *passthru,
963 : MemoryContextCounters *totals,
964 : bool print_to_stderr)
965 : {
966 0 : SlabContext *slab = (SlabContext *) context;
967 0 : Size nblocks = 0;
968 0 : Size freechunks = 0;
969 : Size totalspace;
970 0 : Size freespace = 0;
971 : int i;
972 :
973 : Assert(SlabIsValid(slab));
974 :
975 : /* Include context header in totalspace */
976 0 : totalspace = Slab_CONTEXT_HDRSZ(slab->chunksPerBlock);
977 :
978 : /* Add the space consumed by blocks in the emptyblocks list */
979 0 : totalspace += dclist_count(&slab->emptyblocks) * slab->blockSize;
980 :
981 0 : for (i = 0; i < SLAB_BLOCKLIST_COUNT; i++)
982 : {
983 : dlist_iter iter;
984 :
985 0 : dlist_foreach(iter, &slab->blocklist[i])
986 : {
987 0 : SlabBlock *block = dlist_container(SlabBlock, node, iter.cur);
988 :
989 0 : nblocks++;
990 0 : totalspace += slab->blockSize;
991 0 : freespace += slab->fullChunkSize * block->nfree;
992 0 : freechunks += block->nfree;
993 : }
994 : }
995 :
996 0 : if (printfunc)
997 : {
998 : char stats_string[200];
999 :
1000 : /* XXX should we include free chunks on empty blocks? */
1001 0 : snprintf(stats_string, sizeof(stats_string),
1002 : "%zu total in %zu blocks; %u empty blocks; %zu free (%zu chunks); %zu used",
1003 0 : totalspace, nblocks, dclist_count(&slab->emptyblocks),
1004 : freespace, freechunks, totalspace - freespace);
1005 0 : printfunc(context, passthru, stats_string, print_to_stderr);
1006 : }
1007 :
1008 0 : if (totals)
1009 : {
1010 0 : totals->nblocks += nblocks;
1011 0 : totals->freechunks += freechunks;
1012 0 : totals->totalspace += totalspace;
1013 0 : totals->freespace += freespace;
1014 : }
1015 0 : }
1016 :
1017 :
1018 : #ifdef MEMORY_CONTEXT_CHECKING
1019 :
1020 : /*
1021 : * SlabCheck
1022 : * Walk through all blocks looking for inconsistencies.
1023 : *
1024 : * NOTE: report errors as WARNING, *not* ERROR or FATAL. Otherwise you'll
1025 : * find yourself in an infinite loop when trouble occurs, because this
1026 : * routine will be entered again when elog cleanup tries to release memory!
1027 : */
1028 : void
1029 : SlabCheck(MemoryContext context)
1030 : {
1031 : SlabContext *slab = (SlabContext *) context;
1032 : int i;
1033 : int nblocks = 0;
1034 : const char *name = slab->header.name;
1035 : dlist_iter iter;
1036 :
1037 : Assert(SlabIsValid(slab));
1038 : Assert(slab->chunksPerBlock > 0);
1039 :
1040 : /*
1041 : * Have a look at the empty blocks. These should have all their chunks
1042 : * marked as free. Ensure that's the case.
1043 : */
1044 : dclist_foreach(iter, &slab->emptyblocks)
1045 : {
1046 : SlabBlock *block = dlist_container(SlabBlock, node, iter.cur);
1047 :
1048 : if (block->nfree != slab->chunksPerBlock)
1049 : elog(WARNING, "problem in slab %s: empty block %p should have %d free chunks but has %d chunks free",
1050 : name, block, slab->chunksPerBlock, block->nfree);
1051 : }
1052 :
1053 : /* walk the non-empty block lists */
1054 : for (i = 0; i < SLAB_BLOCKLIST_COUNT; i++)
1055 : {
1056 : int j,
1057 : nfree;
1058 :
1059 : /* walk all blocks on this blocklist */
1060 : dlist_foreach(iter, &slab->blocklist[i])
1061 : {
1062 : SlabBlock *block = dlist_container(SlabBlock, node, iter.cur);
1063 : MemoryChunk *cur_chunk;
1064 :
1065 : /*
1066 : * Make sure the number of free chunks (in the block header)
1067 : * matches the position in the blocklist.
1068 : */
1069 : if (SlabBlocklistIndex(slab, block->nfree) != i)
1070 : elog(WARNING, "problem in slab %s: block %p is on blocklist %d but should be on blocklist %d",
1071 : name, block, i, SlabBlocklistIndex(slab, block->nfree));
1072 :
1073 : /* make sure the block is not empty */
1074 : if (block->nfree >= slab->chunksPerBlock)
1075 : elog(WARNING, "problem in slab %s: empty block %p incorrectly stored on blocklist element %d",
1076 : name, block, i);
1077 :
1078 : /* make sure the slab pointer correctly points to this context */
1079 : if (block->slab != slab)
1080 : elog(WARNING, "problem in slab %s: bogus slab link in block %p",
1081 : name, block);
1082 :
1083 : /* reset the array of free chunks for this block */
1084 : memset(slab->isChunkFree, 0, (slab->chunksPerBlock * sizeof(bool)));
1085 : nfree = 0;
1086 :
1087 : /* walk through the block's free list chunks */
1088 : cur_chunk = block->freehead;
1089 : while (cur_chunk != NULL)
1090 : {
1091 : int chunkidx = SlabChunkIndex(slab, block, cur_chunk);
1092 :
1093 : /*
1094 : * Ensure the free list link points to something on the block
1095 : * at an address aligned according to the full chunk size.
1096 : */
1097 : if (cur_chunk < SlabBlockGetChunk(slab, block, 0) ||
1098 : cur_chunk > SlabBlockGetChunk(slab, block, slab->chunksPerBlock - 1) ||
1099 : SlabChunkMod(slab, block, cur_chunk) != 0)
1100 : elog(WARNING, "problem in slab %s: bogus free list link %p in block %p",
1101 : name, cur_chunk, block);
1102 :
1103 : /* count the chunk and mark it free on the free chunk array */
1104 : nfree++;
1105 : slab->isChunkFree[chunkidx] = true;
1106 :
1107 : /* read pointer of the next free chunk */
1108 : VALGRIND_MAKE_MEM_DEFINED(MemoryChunkGetPointer(cur_chunk), sizeof(MemoryChunk *));
1109 : cur_chunk = *(MemoryChunk **) SlabChunkGetPointer(cur_chunk);
1110 : }
1111 :
1112 : /* check that the unused pointer matches what nunused claims */
1113 : if (SlabBlockGetChunk(slab, block, slab->chunksPerBlock - block->nunused) !=
1114 : block->unused)
1115 : elog(WARNING, "problem in slab %s: mismatch detected between nunused chunks and unused pointer in block %p",
1116 : name, block);
1117 :
1118 : /*
1119 : * count the remaining free chunks that have yet to make it onto
1120 : * the block's free list.
1121 : */
1122 : cur_chunk = block->unused;
1123 : for (j = 0; j < block->nunused; j++)
1124 : {
1125 : int chunkidx = SlabChunkIndex(slab, block, cur_chunk);
1126 :
1127 :
1128 : /* count the chunk as free and mark it as so in the array */
1129 : nfree++;
1130 : if (chunkidx < slab->chunksPerBlock)
1131 : slab->isChunkFree[chunkidx] = true;
1132 :
1133 : /* move forward 1 chunk */
1134 : cur_chunk = (MemoryChunk *) (((char *) cur_chunk) + slab->fullChunkSize);
1135 : }
1136 :
1137 : for (j = 0; j < slab->chunksPerBlock; j++)
1138 : {
1139 : if (!slab->isChunkFree[j])
1140 : {
1141 : MemoryChunk *chunk = SlabBlockGetChunk(slab, block, j);
1142 : SlabBlock *chunkblock;
1143 :
1144 : /* Allow access to the chunk header. */
1145 : VALGRIND_MAKE_MEM_DEFINED(chunk, Slab_CHUNKHDRSZ);
1146 :
1147 : chunkblock = (SlabBlock *) MemoryChunkGetBlock(chunk);
1148 :
1149 : /* Disallow access to the chunk header. */
1150 : VALGRIND_MAKE_MEM_NOACCESS(chunk, Slab_CHUNKHDRSZ);
1151 :
1152 : /*
1153 : * check the chunk's blockoffset correctly points back to
1154 : * the block
1155 : */
1156 : if (chunkblock != block)
1157 : elog(WARNING, "problem in slab %s: bogus block link in block %p, chunk %p",
1158 : name, block, chunk);
1159 :
1160 : /* check the sentinel byte is intact */
1161 : Assert(slab->chunkSize < (slab->fullChunkSize - Slab_CHUNKHDRSZ));
1162 : if (!sentinel_ok(chunk, Slab_CHUNKHDRSZ + slab->chunkSize))
1163 : elog(WARNING, "problem in slab %s: detected write past chunk end in block %p, chunk %p",
1164 : name, block, chunk);
1165 : }
1166 : }
1167 :
1168 : /*
1169 : * Make sure we got the expected number of free chunks (as tracked
1170 : * in the block header).
1171 : */
1172 : if (nfree != block->nfree)
1173 : elog(WARNING, "problem in slab %s: nfree in block %p is %d but %d chunk were found as free",
1174 : name, block, block->nfree, nfree);
1175 :
1176 : nblocks++;
1177 : }
1178 : }
1179 :
1180 : /* the stored empty blocks are tracked in mem_allocated too */
1181 : nblocks += dclist_count(&slab->emptyblocks);
1182 :
1183 : Assert(nblocks * slab->blockSize == context->mem_allocated);
1184 : }
1185 :
1186 : #endif /* MEMORY_CONTEXT_CHECKING */
|