Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * mcxt.c
4 : * POSTGRES memory context management code.
5 : *
6 : * This module handles context management operations that are independent
7 : * of the particular kind of context being operated on. It calls
8 : * context-type-specific operations via the function pointers in a
9 : * context's MemoryContextMethods struct.
10 : *
11 : * A note about Valgrind support: when USE_VALGRIND is defined, we provide
12 : * support for memory leak tracking at the allocation-unit level. Valgrind
13 : * does leak detection by tracking allocated "chunks", which can be grouped
14 : * into "pools". The "chunk" terminology is overloaded, since we use that
15 : * word for our allocation units, and it's sometimes important to distinguish
16 : * those from the Valgrind objects that describe them. To reduce confusion,
17 : * let's use the terms "vchunk" and "vpool" for the Valgrind objects.
18 : *
19 : * We use a separate vpool for each memory context. The context-type-specific
20 : * code is responsible for creating and deleting the vpools, and also for
21 : * creating vchunks to cover its management data structures such as block
22 : * headers. (There must be a vchunk that includes every pointer we want
23 : * Valgrind to consider for leak-tracking purposes.) This module creates
24 : * and deletes the vchunks that cover the caller-visible allocated chunks.
25 : * However, the context-type-specific code must handle cleaning up those
26 : * vchunks too during memory context reset operations.
27 : *
28 : *
29 : * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
30 : * Portions Copyright (c) 1994, Regents of the University of California
31 : *
32 : *
33 : * IDENTIFICATION
34 : * src/backend/utils/mmgr/mcxt.c
35 : *
36 : *-------------------------------------------------------------------------
37 : */
38 :
39 : #include "postgres.h"
40 :
41 : #include "common/int.h"
42 : #include "mb/pg_wchar.h"
43 : #include "miscadmin.h"
44 : #include "utils/memdebug.h"
45 : #include "utils/memutils.h"
46 : #include "utils/memutils_internal.h"
47 : #include "utils/memutils_memorychunk.h"
48 :
49 :
50 : static void BogusFree(void *pointer);
51 : static void *BogusRealloc(void *pointer, Size size, int flags);
52 : static MemoryContext BogusGetChunkContext(void *pointer);
53 : static Size BogusGetChunkSpace(void *pointer);
54 :
55 : /*****************************************************************************
56 : * GLOBAL MEMORY *
57 : *****************************************************************************/
58 : #define BOGUS_MCTX(id) \
59 : [id].free_p = BogusFree, \
60 : [id].realloc = BogusRealloc, \
61 : [id].get_chunk_context = BogusGetChunkContext, \
62 : [id].get_chunk_space = BogusGetChunkSpace
63 :
64 : static const MemoryContextMethods mcxt_methods[] = {
65 : /* aset.c */
66 : [MCTX_ASET_ID].alloc = AllocSetAlloc,
67 : [MCTX_ASET_ID].free_p = AllocSetFree,
68 : [MCTX_ASET_ID].realloc = AllocSetRealloc,
69 : [MCTX_ASET_ID].reset = AllocSetReset,
70 : [MCTX_ASET_ID].delete_context = AllocSetDelete,
71 : [MCTX_ASET_ID].get_chunk_context = AllocSetGetChunkContext,
72 : [MCTX_ASET_ID].get_chunk_space = AllocSetGetChunkSpace,
73 : [MCTX_ASET_ID].is_empty = AllocSetIsEmpty,
74 : [MCTX_ASET_ID].stats = AllocSetStats,
75 : #ifdef MEMORY_CONTEXT_CHECKING
76 : [MCTX_ASET_ID].check = AllocSetCheck,
77 : #endif
78 :
79 : /* generation.c */
80 : [MCTX_GENERATION_ID].alloc = GenerationAlloc,
81 : [MCTX_GENERATION_ID].free_p = GenerationFree,
82 : [MCTX_GENERATION_ID].realloc = GenerationRealloc,
83 : [MCTX_GENERATION_ID].reset = GenerationReset,
84 : [MCTX_GENERATION_ID].delete_context = GenerationDelete,
85 : [MCTX_GENERATION_ID].get_chunk_context = GenerationGetChunkContext,
86 : [MCTX_GENERATION_ID].get_chunk_space = GenerationGetChunkSpace,
87 : [MCTX_GENERATION_ID].is_empty = GenerationIsEmpty,
88 : [MCTX_GENERATION_ID].stats = GenerationStats,
89 : #ifdef MEMORY_CONTEXT_CHECKING
90 : [MCTX_GENERATION_ID].check = GenerationCheck,
91 : #endif
92 :
93 : /* slab.c */
94 : [MCTX_SLAB_ID].alloc = SlabAlloc,
95 : [MCTX_SLAB_ID].free_p = SlabFree,
96 : [MCTX_SLAB_ID].realloc = SlabRealloc,
97 : [MCTX_SLAB_ID].reset = SlabReset,
98 : [MCTX_SLAB_ID].delete_context = SlabDelete,
99 : [MCTX_SLAB_ID].get_chunk_context = SlabGetChunkContext,
100 : [MCTX_SLAB_ID].get_chunk_space = SlabGetChunkSpace,
101 : [MCTX_SLAB_ID].is_empty = SlabIsEmpty,
102 : [MCTX_SLAB_ID].stats = SlabStats,
103 : #ifdef MEMORY_CONTEXT_CHECKING
104 : [MCTX_SLAB_ID].check = SlabCheck,
105 : #endif
106 :
107 : /* alignedalloc.c */
108 : [MCTX_ALIGNED_REDIRECT_ID].alloc = NULL, /* not required */
109 : [MCTX_ALIGNED_REDIRECT_ID].free_p = AlignedAllocFree,
110 : [MCTX_ALIGNED_REDIRECT_ID].realloc = AlignedAllocRealloc,
111 : [MCTX_ALIGNED_REDIRECT_ID].reset = NULL, /* not required */
112 : [MCTX_ALIGNED_REDIRECT_ID].delete_context = NULL, /* not required */
113 : [MCTX_ALIGNED_REDIRECT_ID].get_chunk_context = AlignedAllocGetChunkContext,
114 : [MCTX_ALIGNED_REDIRECT_ID].get_chunk_space = AlignedAllocGetChunkSpace,
115 : [MCTX_ALIGNED_REDIRECT_ID].is_empty = NULL, /* not required */
116 : [MCTX_ALIGNED_REDIRECT_ID].stats = NULL, /* not required */
117 : #ifdef MEMORY_CONTEXT_CHECKING
118 : [MCTX_ALIGNED_REDIRECT_ID].check = NULL, /* not required */
119 : #endif
120 :
121 : /* bump.c */
122 : [MCTX_BUMP_ID].alloc = BumpAlloc,
123 : [MCTX_BUMP_ID].free_p = BumpFree,
124 : [MCTX_BUMP_ID].realloc = BumpRealloc,
125 : [MCTX_BUMP_ID].reset = BumpReset,
126 : [MCTX_BUMP_ID].delete_context = BumpDelete,
127 : [MCTX_BUMP_ID].get_chunk_context = BumpGetChunkContext,
128 : [MCTX_BUMP_ID].get_chunk_space = BumpGetChunkSpace,
129 : [MCTX_BUMP_ID].is_empty = BumpIsEmpty,
130 : [MCTX_BUMP_ID].stats = BumpStats,
131 : #ifdef MEMORY_CONTEXT_CHECKING
132 : [MCTX_BUMP_ID].check = BumpCheck,
133 : #endif
134 :
135 :
136 : /*
137 : * Reserved and unused IDs should have dummy entries here. This allows us
138 : * to fail cleanly if a bogus pointer is passed to pfree or the like. It
139 : * seems sufficient to provide routines for the methods that might get
140 : * invoked from inspection of a chunk (see MCXT_METHOD calls below).
141 : */
142 : BOGUS_MCTX(MCTX_1_RESERVED_GLIBC_ID),
143 : BOGUS_MCTX(MCTX_2_RESERVED_GLIBC_ID),
144 : BOGUS_MCTX(MCTX_8_UNUSED_ID),
145 : BOGUS_MCTX(MCTX_9_UNUSED_ID),
146 : BOGUS_MCTX(MCTX_10_UNUSED_ID),
147 : BOGUS_MCTX(MCTX_11_UNUSED_ID),
148 : BOGUS_MCTX(MCTX_12_UNUSED_ID),
149 : BOGUS_MCTX(MCTX_13_UNUSED_ID),
150 : BOGUS_MCTX(MCTX_14_UNUSED_ID),
151 : BOGUS_MCTX(MCTX_0_RESERVED_UNUSEDMEM_ID),
152 : BOGUS_MCTX(MCTX_15_RESERVED_WIPEDMEM_ID)
153 : };
154 :
155 : #undef BOGUS_MCTX
156 :
157 : /*
158 : * CurrentMemoryContext
159 : * Default memory context for allocations.
160 : */
161 : MemoryContext CurrentMemoryContext = NULL;
162 :
163 : /*
164 : * Standard top-level contexts. For a description of the purpose of each
165 : * of these contexts, refer to src/backend/utils/mmgr/README
166 : */
167 : MemoryContext TopMemoryContext = NULL;
168 : MemoryContext ErrorContext = NULL;
169 : MemoryContext PostmasterContext = NULL;
170 : MemoryContext CacheMemoryContext = NULL;
171 : MemoryContext MessageContext = NULL;
172 : MemoryContext TopTransactionContext = NULL;
173 : MemoryContext CurTransactionContext = NULL;
174 :
175 : /* This is a transient link to the active portal's memory context: */
176 : MemoryContext PortalContext = NULL;
177 :
178 : /* Is memory context logging currently in progress? */
179 : static bool LogMemoryContextInProgress = false;
180 :
181 : static void MemoryContextDeleteOnly(MemoryContext context);
182 : static void MemoryContextCallResetCallbacks(MemoryContext context);
183 : static void MemoryContextStatsInternal(MemoryContext context, int level,
184 : int max_level, int max_children,
185 : MemoryContextCounters *totals,
186 : bool print_to_stderr);
187 : static void MemoryContextStatsPrint(MemoryContext context, void *passthru,
188 : const char *stats_string,
189 : bool print_to_stderr);
190 : pg_noreturn static pg_noinline void add_size_error(Size s1, Size s2);
191 : pg_noreturn static pg_noinline void mul_size_error(Size s1, Size s2);
192 :
193 : /*
194 : * You should not do memory allocations within a critical section, because
195 : * an out-of-memory error will be escalated to a PANIC. To enforce that
196 : * rule, the allocation functions Assert that.
197 : */
198 : #define AssertNotInCriticalSection(context) \
199 : Assert(CritSectionCount == 0 || (context)->allowInCritSection)
200 :
201 : /*
202 : * Call the given function in the MemoryContextMethods for the memory context
203 : * type that 'pointer' belongs to.
204 : */
205 : #define MCXT_METHOD(pointer, method) \
206 : mcxt_methods[GetMemoryChunkMethodID(pointer)].method
207 :
208 : /*
209 : * GetMemoryChunkMethodID
210 : * Return the MemoryContextMethodID from the uint64 chunk header which
211 : * directly precedes 'pointer'.
212 : */
213 : static inline MemoryContextMethodID
214 358204378 : GetMemoryChunkMethodID(const void *pointer)
215 : {
216 : uint64 header;
217 :
218 : /*
219 : * Try to detect bogus pointers handed to us, poorly though we can.
220 : * Presumably, a pointer that isn't MAXALIGNED isn't pointing at an
221 : * allocated chunk.
222 : */
223 : Assert(pointer == (const void *) MAXALIGN(pointer));
224 :
225 : /* Allow access to the uint64 header */
226 : VALGRIND_MAKE_MEM_DEFINED((char *) pointer - sizeof(uint64), sizeof(uint64));
227 :
228 358204378 : header = *((const uint64 *) ((const char *) pointer - sizeof(uint64)));
229 :
230 : /* Disallow access to the uint64 header */
231 : VALGRIND_MAKE_MEM_NOACCESS((char *) pointer - sizeof(uint64), sizeof(uint64));
232 :
233 358204378 : return (MemoryContextMethodID) (header & MEMORY_CONTEXT_METHODID_MASK);
234 : }
235 :
236 : /*
237 : * GetMemoryChunkHeader
238 : * Return the uint64 chunk header which directly precedes 'pointer'.
239 : *
240 : * This is only used after GetMemoryChunkMethodID, so no need for error checks.
241 : */
242 : static inline uint64
243 0 : GetMemoryChunkHeader(const void *pointer)
244 : {
245 : uint64 header;
246 :
247 : /* Allow access to the uint64 header */
248 : VALGRIND_MAKE_MEM_DEFINED((char *) pointer - sizeof(uint64), sizeof(uint64));
249 :
250 0 : header = *((const uint64 *) ((const char *) pointer - sizeof(uint64)));
251 :
252 : /* Disallow access to the uint64 header */
253 : VALGRIND_MAKE_MEM_NOACCESS((char *) pointer - sizeof(uint64), sizeof(uint64));
254 :
255 0 : return header;
256 : }
257 :
258 : /*
259 : * MemoryContextTraverseNext
260 : * Helper function to traverse all descendants of a memory context
261 : * without recursion.
262 : *
263 : * Recursion could lead to out-of-stack errors with deep context hierarchies,
264 : * which would be unpleasant in error cleanup code paths.
265 : *
266 : * To process 'context' and all its descendants, use a loop like this:
267 : *
268 : * <process 'context'>
269 : * for (MemoryContext curr = context->firstchild;
270 : * curr != NULL;
271 : * curr = MemoryContextTraverseNext(curr, context))
272 : * {
273 : * <process 'curr'>
274 : * }
275 : *
276 : * This visits all the contexts in pre-order, that is a node is visited
277 : * before its children.
278 : */
279 : static MemoryContext
280 1421563 : MemoryContextTraverseNext(MemoryContext curr, MemoryContext top)
281 : {
282 : /* After processing a node, traverse to its first child if any */
283 1421563 : if (curr->firstchild != NULL)
284 0 : return curr->firstchild;
285 :
286 : /*
287 : * After processing a childless node, traverse to its next sibling if
288 : * there is one. If there isn't, traverse back up to the parent (which
289 : * has already been visited, and now so have all its descendants). We're
290 : * done if that is "top", otherwise traverse to its next sibling if any,
291 : * otherwise repeat moving up.
292 : */
293 1421563 : while (curr->nextchild == NULL)
294 : {
295 533652 : curr = curr->parent;
296 533652 : if (curr == top)
297 533652 : return NULL;
298 : }
299 887911 : return curr->nextchild;
300 : }
301 :
302 : /*
303 : * Support routines to trap use of invalid memory context method IDs
304 : * (from calling pfree or the like on a bogus pointer). As a possible
305 : * aid in debugging, we report the header word along with the pointer
306 : * address (if we got here, there must be an accessible header word).
307 : */
308 : static void
309 0 : BogusFree(void *pointer)
310 : {
311 0 : elog(ERROR, "pfree called with invalid pointer %p (header 0x%016" PRIx64 ")",
312 : pointer, GetMemoryChunkHeader(pointer));
313 : }
314 :
315 : static void *
316 0 : BogusRealloc(void *pointer, Size size, int flags)
317 : {
318 0 : elog(ERROR, "repalloc called with invalid pointer %p (header 0x%016" PRIx64 ")",
319 : pointer, GetMemoryChunkHeader(pointer));
320 : return NULL; /* keep compiler quiet */
321 : }
322 :
323 : static MemoryContext
324 0 : BogusGetChunkContext(void *pointer)
325 : {
326 0 : elog(ERROR, "GetMemoryChunkContext called with invalid pointer %p (header 0x%016" PRIx64 ")",
327 : pointer, GetMemoryChunkHeader(pointer));
328 : return NULL; /* keep compiler quiet */
329 : }
330 :
331 : static Size
332 0 : BogusGetChunkSpace(void *pointer)
333 : {
334 0 : elog(ERROR, "GetMemoryChunkSpace called with invalid pointer %p (header 0x%016" PRIx64 ")",
335 : pointer, GetMemoryChunkHeader(pointer));
336 : return 0; /* keep compiler quiet */
337 : }
338 :
339 :
340 : /*****************************************************************************
341 : * EXPORTED ROUTINES *
342 : *****************************************************************************/
343 :
344 :
345 : /*
346 : * MemoryContextInit
347 : * Start up the memory-context subsystem.
348 : *
349 : * This must be called before creating contexts or allocating memory in
350 : * contexts. TopMemoryContext and ErrorContext are initialized here;
351 : * other contexts must be created afterwards.
352 : *
353 : * In normal multi-backend operation, this is called once during
354 : * postmaster startup, and not at all by individual backend startup
355 : * (since the backends inherit an already-initialized context subsystem
356 : * by virtue of being forked off the postmaster). But in an EXEC_BACKEND
357 : * build, each process must do this for itself.
358 : *
359 : * In a standalone backend this must be called during backend startup.
360 : */
361 : void
362 2171 : MemoryContextInit(void)
363 : {
364 : Assert(TopMemoryContext == NULL);
365 :
366 : /*
367 : * First, initialize TopMemoryContext, which is the parent of all others.
368 : */
369 2171 : TopMemoryContext = AllocSetContextCreate((MemoryContext) NULL,
370 : "TopMemoryContext",
371 : ALLOCSET_DEFAULT_SIZES);
372 :
373 : /*
374 : * Not having any other place to point CurrentMemoryContext, make it point
375 : * to TopMemoryContext. Caller should change this soon!
376 : */
377 2171 : CurrentMemoryContext = TopMemoryContext;
378 :
379 : /*
380 : * Initialize ErrorContext as an AllocSetContext with slow growth rate ---
381 : * we don't really expect much to be allocated in it. More to the point,
382 : * require it to contain at least 8K at all times. This is the only case
383 : * where retained memory in a context is *essential* --- we want to be
384 : * sure ErrorContext still has some memory even if we've run out
385 : * elsewhere! Also, allow allocations in ErrorContext within a critical
386 : * section. Otherwise a PANIC will cause an assertion failure in the error
387 : * reporting code, before printing out the real cause of the failure.
388 : *
389 : * This should be the last step in this function, as elog.c assumes memory
390 : * management works once ErrorContext is non-null.
391 : */
392 2171 : ErrorContext = AllocSetContextCreate(TopMemoryContext,
393 : "ErrorContext",
394 : 8 * 1024,
395 : 8 * 1024,
396 : 8 * 1024);
397 2171 : MemoryContextAllowInCriticalSection(ErrorContext, true);
398 2171 : }
399 :
400 : /*
401 : * MemoryContextReset
402 : * Release all space allocated within a context and delete all its
403 : * descendant contexts (but not the named context itself).
404 : */
405 : void
406 227781402 : MemoryContextReset(MemoryContext context)
407 : {
408 : Assert(MemoryContextIsValid(context));
409 :
410 : /* save a function call in common case where there are no children */
411 227781402 : if (context->firstchild != NULL)
412 468667 : MemoryContextDeleteChildren(context);
413 :
414 : /* save a function call if no pallocs since startup or last reset */
415 227781402 : if (!context->isReset)
416 30927416 : MemoryContextResetOnly(context);
417 227781402 : }
418 :
419 : /*
420 : * MemoryContextResetOnly
421 : * Release all space allocated within a context.
422 : * Nothing is done to the context's descendant contexts.
423 : */
424 : void
425 34811621 : MemoryContextResetOnly(MemoryContext context)
426 : {
427 : Assert(MemoryContextIsValid(context));
428 :
429 : /* Nothing to do if no pallocs since startup or last reset */
430 34811621 : if (!context->isReset)
431 : {
432 34810801 : MemoryContextCallResetCallbacks(context);
433 :
434 : /*
435 : * If context->ident points into the context's memory, it will become
436 : * a dangling pointer. We could prevent that by setting it to NULL
437 : * here, but that would break valid coding patterns that keep the
438 : * ident elsewhere, e.g. in a parent context. So for now we assume
439 : * the programmer got it right.
440 : */
441 :
442 34810801 : context->methods->reset(context);
443 34810801 : context->isReset = true;
444 : }
445 34811621 : }
446 :
447 : /*
448 : * MemoryContextResetChildren
449 : * Release all space allocated within a context's descendants,
450 : * but don't delete the contexts themselves. The named context
451 : * itself is not touched.
452 : */
453 : void
454 0 : MemoryContextResetChildren(MemoryContext context)
455 : {
456 : Assert(MemoryContextIsValid(context));
457 :
458 0 : for (MemoryContext curr = context->firstchild;
459 0 : curr != NULL;
460 0 : curr = MemoryContextTraverseNext(curr, context))
461 : {
462 0 : MemoryContextResetOnly(curr);
463 : }
464 0 : }
465 :
466 : /*
467 : * MemoryContextDelete
468 : * Delete a context and its descendants, and release all space
469 : * allocated therein.
470 : *
471 : * The type-specific delete routine removes all storage for the context,
472 : * but we have to deal with descendant nodes here.
473 : */
474 : void
475 6199562 : MemoryContextDelete(MemoryContext context)
476 : {
477 : MemoryContext curr;
478 :
479 : Assert(MemoryContextIsValid(context));
480 :
481 : /*
482 : * Delete subcontexts from the bottom up.
483 : *
484 : * Note: Do not use recursion here. A "stack depth limit exceeded" error
485 : * would be unpleasant if we're already in the process of cleaning up from
486 : * transaction abort. We also cannot use MemoryContextTraverseNext() here
487 : * because we modify the tree as we go.
488 : */
489 6199562 : curr = context;
490 : for (;;)
491 1153275 : {
492 : MemoryContext parent;
493 :
494 : /* Descend down until we find a leaf context with no children */
495 8506112 : while (curr->firstchild != NULL)
496 1153275 : curr = curr->firstchild;
497 :
498 : /*
499 : * We're now at a leaf with no children. Free it and continue from the
500 : * parent. Or if this was the original node, we're all done.
501 : */
502 7352837 : parent = curr->parent;
503 7352837 : MemoryContextDeleteOnly(curr);
504 :
505 7352837 : if (curr == context)
506 6199562 : break;
507 1153275 : curr = parent;
508 : }
509 6199562 : }
510 :
511 : /*
512 : * Subroutine of MemoryContextDelete,
513 : * to delete a context that has no children.
514 : * We must also delink the context from its parent, if it has one.
515 : */
516 : static void
517 7352837 : MemoryContextDeleteOnly(MemoryContext context)
518 : {
519 : Assert(MemoryContextIsValid(context));
520 : /* We had better not be deleting TopMemoryContext ... */
521 : Assert(context != TopMemoryContext);
522 : /* And not CurrentMemoryContext, either */
523 : Assert(context != CurrentMemoryContext);
524 : /* All the children should've been deleted already */
525 : Assert(context->firstchild == NULL);
526 :
527 : /*
528 : * It's not entirely clear whether 'tis better to do this before or after
529 : * delinking the context; but an error in a callback will likely result in
530 : * leaking the whole context (if it's not a root context) if we do it
531 : * after, so let's do it before.
532 : */
533 7352837 : MemoryContextCallResetCallbacks(context);
534 :
535 : /*
536 : * We delink the context from its parent before deleting it, so that if
537 : * there's an error we won't have deleted/busted contexts still attached
538 : * to the context tree. Better a leak than a crash.
539 : */
540 7352837 : MemoryContextSetParent(context, NULL);
541 :
542 : /*
543 : * Also reset the context's ident pointer, in case it points into the
544 : * context. This would only matter if someone tries to get stats on the
545 : * (already unlinked) context, which is unlikely, but let's be safe.
546 : */
547 7352837 : context->ident = NULL;
548 :
549 7352837 : context->methods->delete_context(context);
550 7352837 : }
551 :
552 : /*
553 : * MemoryContextDeleteChildren
554 : * Delete all the descendants of the named context and release all
555 : * space allocated therein. The named context itself is not touched.
556 : */
557 : void
558 741132 : MemoryContextDeleteChildren(MemoryContext context)
559 : {
560 : Assert(MemoryContextIsValid(context));
561 :
562 : /*
563 : * MemoryContextDelete will delink the child from me, so just iterate as
564 : * long as there is a child.
565 : */
566 1270538 : while (context->firstchild != NULL)
567 529406 : MemoryContextDelete(context->firstchild);
568 741132 : }
569 :
570 : /*
571 : * MemoryContextRegisterResetCallback
572 : * Register a function to be called before next context reset/delete.
573 : * Such callbacks will be called in reverse order of registration.
574 : *
575 : * The caller is responsible for allocating a MemoryContextCallback struct
576 : * to hold the info about this callback request, and for filling in the
577 : * "func" and "arg" fields in the struct to show what function to call with
578 : * what argument. Typically the callback struct should be allocated within
579 : * the specified context, since that means it will automatically be freed
580 : * when no longer needed.
581 : *
582 : * Note that callers can assume this cannot fail.
583 : */
584 : void
585 66866 : MemoryContextRegisterResetCallback(MemoryContext context,
586 : MemoryContextCallback *cb)
587 : {
588 : Assert(MemoryContextIsValid(context));
589 :
590 : /* Push onto head so this will be called before older registrants. */
591 66866 : cb->next = context->reset_cbs;
592 66866 : context->reset_cbs = cb;
593 : /* Mark the context as non-reset (it probably is already). */
594 66866 : context->isReset = false;
595 66866 : }
596 :
597 : /*
598 : * MemoryContextUnregisterResetCallback
599 : * Undo the effects of MemoryContextRegisterResetCallback.
600 : *
601 : * This can be used if a callback's effects are no longer required
602 : * at some point before the context has been reset/deleted. It is the
603 : * caller's responsibility to pfree the callback struct (if needed).
604 : *
605 : * An assertion failure occurs if the callback was not registered.
606 : * We could alternatively define that case as a no-op, but that seems too
607 : * likely to mask programming errors such as passing the wrong context.
608 : */
609 : void
610 13930 : MemoryContextUnregisterResetCallback(MemoryContext context,
611 : MemoryContextCallback *cb)
612 : {
613 : MemoryContextCallback *prev,
614 : *cur;
615 :
616 : Assert(MemoryContextIsValid(context));
617 :
618 13962 : for (prev = NULL, cur = context->reset_cbs; cur != NULL;
619 32 : prev = cur, cur = cur->next)
620 : {
621 13962 : if (cur != cb)
622 32 : continue;
623 13930 : if (prev)
624 32 : prev->next = cur->next;
625 : else
626 13898 : context->reset_cbs = cur->next;
627 13930 : return;
628 : }
629 : Assert(false);
630 : }
631 :
632 : /*
633 : * MemoryContextCallResetCallbacks
634 : * Internal function to call all registered callbacks for context.
635 : */
636 : static void
637 42163638 : MemoryContextCallResetCallbacks(MemoryContext context)
638 : {
639 : MemoryContextCallback *cb;
640 :
641 : /*
642 : * We pop each callback from the list before calling. That way, if an
643 : * error occurs inside the callback, we won't try to call it a second time
644 : * in the likely event that we reset or delete the context later.
645 : */
646 42216647 : while ((cb = context->reset_cbs) != NULL)
647 : {
648 53009 : context->reset_cbs = cb->next;
649 53009 : cb->func(cb->arg);
650 : }
651 42163638 : }
652 :
653 : /*
654 : * MemoryContextSetIdentifier
655 : * Set the identifier string for a memory context.
656 : *
657 : * An identifier can be provided to help distinguish among different contexts
658 : * of the same kind in memory context stats dumps. The identifier string
659 : * must live at least as long as the context it is for; typically it is
660 : * allocated inside that context, so that it automatically goes away on
661 : * context deletion. Pass id = NULL to forget any old identifier.
662 : */
663 : void
664 3067162 : MemoryContextSetIdentifier(MemoryContext context, const char *id)
665 : {
666 : Assert(MemoryContextIsValid(context));
667 3067162 : context->ident = id;
668 3067162 : }
669 :
670 : /*
671 : * MemoryContextSetParent
672 : * Change a context to belong to a new parent (or no parent).
673 : *
674 : * We provide this as an API function because it is sometimes useful to
675 : * change a context's lifespan after creation. For example, a context
676 : * might be created underneath a transient context, filled with data,
677 : * and then reparented underneath CacheMemoryContext to make it long-lived.
678 : * In this way no special effort is needed to get rid of the context in case
679 : * a failure occurs before its contents are completely set up.
680 : *
681 : * Callers often assume that this function cannot fail, so don't put any
682 : * elog(ERROR) calls in it.
683 : *
684 : * A possible caller error is to reparent a context under itself, creating
685 : * a loop in the context graph. We assert here that context != new_parent,
686 : * but checking for multi-level loops seems more trouble than it's worth.
687 : */
688 : void
689 7536996 : MemoryContextSetParent(MemoryContext context, MemoryContext new_parent)
690 : {
691 : Assert(MemoryContextIsValid(context));
692 : Assert(context != new_parent);
693 :
694 : /* Fast path if it's got correct parent already */
695 7536996 : if (new_parent == context->parent)
696 6396 : return;
697 :
698 : /* Delink from existing parent, if any */
699 7530600 : if (context->parent)
700 : {
701 7530600 : MemoryContext parent = context->parent;
702 :
703 7530600 : if (context->prevchild != NULL)
704 760863 : context->prevchild->nextchild = context->nextchild;
705 : else
706 : {
707 : Assert(parent->firstchild == context);
708 6769737 : parent->firstchild = context->nextchild;
709 : }
710 :
711 7530600 : if (context->nextchild != NULL)
712 3385900 : context->nextchild->prevchild = context->prevchild;
713 : }
714 :
715 : /* And relink */
716 7530600 : if (new_parent)
717 : {
718 : Assert(MemoryContextIsValid(new_parent));
719 177763 : context->parent = new_parent;
720 177763 : context->prevchild = NULL;
721 177763 : context->nextchild = new_parent->firstchild;
722 177763 : if (new_parent->firstchild != NULL)
723 161530 : new_parent->firstchild->prevchild = context;
724 177763 : new_parent->firstchild = context;
725 : }
726 : else
727 : {
728 7352837 : context->parent = NULL;
729 7352837 : context->prevchild = NULL;
730 7352837 : context->nextchild = NULL;
731 : }
732 : }
733 :
734 : /*
735 : * MemoryContextAllowInCriticalSection
736 : * Allow/disallow allocations in this memory context within a critical
737 : * section.
738 : *
739 : * Normally, memory allocations are not allowed within a critical section,
740 : * because a failure would lead to PANIC. There are a few exceptions to
741 : * that, like allocations related to debugging code that is not supposed to
742 : * be enabled in production. This function can be used to exempt specific
743 : * memory contexts from the assertion in palloc().
744 : */
745 : void
746 2948 : MemoryContextAllowInCriticalSection(MemoryContext context, bool allow)
747 : {
748 : Assert(MemoryContextIsValid(context));
749 :
750 2948 : context->allowInCritSection = allow;
751 2948 : }
752 :
753 : /*
754 : * GetMemoryChunkContext
755 : * Given a currently-allocated chunk, determine the MemoryContext that
756 : * the chunk belongs to.
757 : */
758 : MemoryContext
759 2753265 : GetMemoryChunkContext(void *pointer)
760 : {
761 2753265 : return MCXT_METHOD(pointer, get_chunk_context) (pointer);
762 : }
763 :
764 : /*
765 : * GetMemoryChunkSpace
766 : * Given a currently-allocated chunk, determine the total space
767 : * it occupies (including all memory-allocation overhead).
768 : *
769 : * This is useful for measuring the total space occupied by a set of
770 : * allocated chunks.
771 : */
772 : Size
773 19787074 : GetMemoryChunkSpace(void *pointer)
774 : {
775 19787074 : return MCXT_METHOD(pointer, get_chunk_space) (pointer);
776 : }
777 :
778 : /*
779 : * MemoryContextGetParent
780 : * Get the parent context (if any) of the specified context
781 : */
782 : MemoryContext
783 11185 : MemoryContextGetParent(MemoryContext context)
784 : {
785 : Assert(MemoryContextIsValid(context));
786 :
787 11185 : return context->parent;
788 : }
789 :
790 : /*
791 : * MemoryContextIsEmpty
792 : * Is a memory context empty of any allocated space?
793 : */
794 : bool
795 5625 : MemoryContextIsEmpty(MemoryContext context)
796 : {
797 : Assert(MemoryContextIsValid(context));
798 :
799 : /*
800 : * For now, we consider a memory context nonempty if it has any children;
801 : * perhaps this should be changed later.
802 : */
803 5625 : if (context->firstchild != NULL)
804 2 : return false;
805 : /* Otherwise use the type-specific inquiry */
806 5623 : return context->methods->is_empty(context);
807 : }
808 :
809 : /*
810 : * Find the memory allocated to blocks for this memory context. If recurse is
811 : * true, also include children.
812 : */
813 : Size
814 1260486 : MemoryContextMemAllocated(MemoryContext context, bool recurse)
815 : {
816 1260486 : Size total = context->mem_allocated;
817 :
818 : Assert(MemoryContextIsValid(context));
819 :
820 1260486 : if (recurse)
821 : {
822 1260486 : for (MemoryContext curr = context->firstchild;
823 2682029 : curr != NULL;
824 1421543 : curr = MemoryContextTraverseNext(curr, context))
825 : {
826 1421543 : total += curr->mem_allocated;
827 : }
828 : }
829 :
830 1260486 : return total;
831 : }
832 :
833 : /*
834 : * Return the memory consumption statistics about the given context and its
835 : * children.
836 : */
837 : void
838 20 : MemoryContextMemConsumed(MemoryContext context,
839 : MemoryContextCounters *consumed)
840 : {
841 : Assert(MemoryContextIsValid(context));
842 :
843 20 : memset(consumed, 0, sizeof(*consumed));
844 :
845 : /* Examine the context itself */
846 20 : context->methods->stats(context, NULL, NULL, consumed, false);
847 :
848 : /* Examine children, using iteration not recursion */
849 20 : for (MemoryContext curr = context->firstchild;
850 20 : curr != NULL;
851 0 : curr = MemoryContextTraverseNext(curr, context))
852 : {
853 0 : curr->methods->stats(curr, NULL, NULL, consumed, false);
854 : }
855 20 : }
856 :
857 : /*
858 : * MemoryContextStats
859 : * Print statistics about the named context and all its descendants.
860 : *
861 : * This is just a debugging utility, so it's not very fancy. However, we do
862 : * make some effort to summarize when the output would otherwise be very long.
863 : * The statistics are sent to stderr.
864 : */
865 : void
866 0 : MemoryContextStats(MemoryContext context)
867 : {
868 : /* Hard-wired limits are usually good enough */
869 0 : MemoryContextStatsDetail(context, 100, 100, true);
870 0 : }
871 :
872 : /*
873 : * MemoryContextStatsDetail
874 : *
875 : * Entry point for use if you want to vary the number of child contexts shown.
876 : *
877 : * If print_to_stderr is true, print statistics about the memory contexts
878 : * with fprintf(stderr), otherwise use ereport().
879 : */
880 : void
881 12 : MemoryContextStatsDetail(MemoryContext context,
882 : int max_level, int max_children,
883 : bool print_to_stderr)
884 : {
885 : MemoryContextCounters grand_totals;
886 :
887 12 : memset(&grand_totals, 0, sizeof(grand_totals));
888 :
889 12 : MemoryContextStatsInternal(context, 1, max_level, max_children,
890 : &grand_totals, print_to_stderr);
891 :
892 12 : if (print_to_stderr)
893 0 : fprintf(stderr,
894 : "Grand total: %zu bytes in %zu blocks; %zu free (%zu chunks); %zu used\n",
895 : grand_totals.totalspace, grand_totals.nblocks,
896 : grand_totals.freespace, grand_totals.freechunks,
897 0 : grand_totals.totalspace - grand_totals.freespace);
898 : else
899 : {
900 : /*
901 : * Use LOG_SERVER_ONLY to prevent the memory contexts from being sent
902 : * to the connected client.
903 : *
904 : * We don't buffer the information about all memory contexts in a
905 : * backend into StringInfo and log it as one message. That would
906 : * require the buffer to be enlarged, risking an OOM as there could be
907 : * a large number of memory contexts in a backend. Instead, we log
908 : * one message per memory context.
909 : */
910 12 : ereport(LOG_SERVER_ONLY,
911 : (errhidestmt(true),
912 : errhidecontext(true),
913 : errmsg_internal("Grand total: %zu bytes in %zu blocks; %zu free (%zu chunks); %zu used",
914 : grand_totals.totalspace, grand_totals.nblocks,
915 : grand_totals.freespace, grand_totals.freechunks,
916 : grand_totals.totalspace - grand_totals.freespace)));
917 : }
918 12 : }
919 :
920 : /*
921 : * MemoryContextStatsInternal
922 : * One recursion level for MemoryContextStats
923 : *
924 : * Print stats for this context if possible, but in any case accumulate counts
925 : * into *totals (if not NULL).
926 : */
927 : static void
928 1202 : MemoryContextStatsInternal(MemoryContext context, int level,
929 : int max_level, int max_children,
930 : MemoryContextCounters *totals,
931 : bool print_to_stderr)
932 : {
933 : MemoryContext child;
934 : int ichild;
935 :
936 : Assert(MemoryContextIsValid(context));
937 :
938 : /* Examine the context itself */
939 1202 : context->methods->stats(context,
940 : MemoryContextStatsPrint,
941 : &level,
942 : totals, print_to_stderr);
943 :
944 : /*
945 : * Examine children.
946 : *
947 : * If we are past the recursion depth limit or already running low on
948 : * stack, do not print them explicitly but just summarize them. Similarly,
949 : * if there are more than max_children of them, we do not print the rest
950 : * explicitly, but just summarize them.
951 : */
952 1202 : child = context->firstchild;
953 1202 : ichild = 0;
954 1202 : if (level <= max_level && !stack_is_too_deep())
955 : {
956 2392 : for (; child != NULL && ichild < max_children;
957 1190 : child = child->nextchild, ichild++)
958 : {
959 1190 : MemoryContextStatsInternal(child, level + 1,
960 : max_level, max_children,
961 : totals,
962 : print_to_stderr);
963 : }
964 : }
965 :
966 1202 : if (child != NULL)
967 : {
968 : /* Summarize the rest of the children, avoiding recursion. */
969 : MemoryContextCounters local_totals;
970 :
971 4 : memset(&local_totals, 0, sizeof(local_totals));
972 :
973 4 : ichild = 0;
974 24 : while (child != NULL)
975 : {
976 20 : child->methods->stats(child, NULL, NULL, &local_totals, false);
977 20 : ichild++;
978 20 : child = MemoryContextTraverseNext(child, context);
979 : }
980 :
981 4 : if (print_to_stderr)
982 : {
983 0 : for (int i = 0; i < level; i++)
984 0 : fprintf(stderr, " ");
985 0 : fprintf(stderr,
986 : "%d more child contexts containing %zu total in %zu blocks; %zu free (%zu chunks); %zu used\n",
987 : ichild,
988 : local_totals.totalspace,
989 : local_totals.nblocks,
990 : local_totals.freespace,
991 : local_totals.freechunks,
992 0 : local_totals.totalspace - local_totals.freespace);
993 : }
994 : else
995 4 : ereport(LOG_SERVER_ONLY,
996 : (errhidestmt(true),
997 : errhidecontext(true),
998 : errmsg_internal("level: %d; %d more child contexts containing %zu total in %zu blocks; %zu free (%zu chunks); %zu used",
999 : level,
1000 : ichild,
1001 : local_totals.totalspace,
1002 : local_totals.nblocks,
1003 : local_totals.freespace,
1004 : local_totals.freechunks,
1005 : local_totals.totalspace - local_totals.freespace)));
1006 :
1007 4 : if (totals)
1008 : {
1009 4 : totals->nblocks += local_totals.nblocks;
1010 4 : totals->freechunks += local_totals.freechunks;
1011 4 : totals->totalspace += local_totals.totalspace;
1012 4 : totals->freespace += local_totals.freespace;
1013 : }
1014 : }
1015 1202 : }
1016 :
1017 : /*
1018 : * MemoryContextStatsPrint
1019 : * Print callback used by MemoryContextStatsInternal
1020 : *
1021 : * For now, the passthru pointer just points to "int level"; later we might
1022 : * make that more complicated.
1023 : */
1024 : static void
1025 1202 : MemoryContextStatsPrint(MemoryContext context, void *passthru,
1026 : const char *stats_string,
1027 : bool print_to_stderr)
1028 : {
1029 1202 : int level = *(int *) passthru;
1030 1202 : const char *name = context->name;
1031 1202 : const char *ident = context->ident;
1032 : char truncated_ident[110];
1033 : int i;
1034 :
1035 : /*
1036 : * It seems preferable to label dynahash contexts with just the hash table
1037 : * name. Those are already unique enough, so the "dynahash" part isn't
1038 : * very helpful, and this way is more consistent with pre-v11 practice.
1039 : */
1040 1202 : if (ident && strcmp(name, "dynahash") == 0)
1041 : {
1042 120 : name = ident;
1043 120 : ident = NULL;
1044 : }
1045 :
1046 1202 : truncated_ident[0] = '\0';
1047 :
1048 1202 : if (ident)
1049 : {
1050 : /*
1051 : * Some contexts may have very long identifiers (e.g., SQL queries).
1052 : * Arbitrarily truncate at 100 bytes, but be careful not to break
1053 : * multibyte characters. Also, replace ASCII control characters, such
1054 : * as newlines, with spaces.
1055 : */
1056 892 : int idlen = strlen(ident);
1057 892 : bool truncated = false;
1058 :
1059 892 : strcpy(truncated_ident, ": ");
1060 892 : i = strlen(truncated_ident);
1061 :
1062 892 : if (idlen > 100)
1063 : {
1064 0 : idlen = pg_mbcliplen(ident, idlen, 100);
1065 0 : truncated = true;
1066 : }
1067 :
1068 25214 : while (idlen-- > 0)
1069 : {
1070 24322 : unsigned char c = *ident++;
1071 :
1072 24322 : if (c < ' ')
1073 0 : c = ' ';
1074 24322 : truncated_ident[i++] = c;
1075 : }
1076 892 : truncated_ident[i] = '\0';
1077 :
1078 892 : if (truncated)
1079 0 : strcat(truncated_ident, "...");
1080 : }
1081 :
1082 1202 : if (print_to_stderr)
1083 : {
1084 0 : for (i = 1; i < level; i++)
1085 0 : fprintf(stderr, " ");
1086 0 : fprintf(stderr, "%s: %s%s\n", name, stats_string, truncated_ident);
1087 : }
1088 : else
1089 1202 : ereport(LOG_SERVER_ONLY,
1090 : (errhidestmt(true),
1091 : errhidecontext(true),
1092 : errmsg_internal("level: %d; %s: %s%s",
1093 : level, name, stats_string, truncated_ident)));
1094 1202 : }
1095 :
1096 : /*
1097 : * MemoryContextCheck
1098 : * Check all chunks in the named context and its children.
1099 : *
1100 : * This is just a debugging utility, so it's not fancy.
1101 : */
1102 : #ifdef MEMORY_CONTEXT_CHECKING
1103 : void
1104 : MemoryContextCheck(MemoryContext context)
1105 : {
1106 : Assert(MemoryContextIsValid(context));
1107 : context->methods->check(context);
1108 :
1109 : for (MemoryContext curr = context->firstchild;
1110 : curr != NULL;
1111 : curr = MemoryContextTraverseNext(curr, context))
1112 : {
1113 : Assert(MemoryContextIsValid(curr));
1114 : curr->methods->check(curr);
1115 : }
1116 : }
1117 : #endif
1118 :
1119 : /*
1120 : * MemoryContextCreate
1121 : * Context-type-independent part of context creation.
1122 : *
1123 : * This is only intended to be called by context-type-specific
1124 : * context creation routines, not by the unwashed masses.
1125 : *
1126 : * The memory context creation procedure goes like this:
1127 : * 1. Context-type-specific routine makes some initial space allocation,
1128 : * including enough space for the context header. If it fails,
1129 : * it can ereport() with no damage done.
1130 : * 2. Context-type-specific routine sets up all type-specific fields of
1131 : * the header (those beyond MemoryContextData proper), as well as any
1132 : * other management fields it needs to have a fully valid context.
1133 : * Usually, failure in this step is impossible, but if it's possible
1134 : * the initial space allocation should be freed before ereport'ing.
1135 : * 3. Context-type-specific routine calls MemoryContextCreate() to fill in
1136 : * the generic header fields and link the context into the context tree.
1137 : * 4. We return to the context-type-specific routine, which finishes
1138 : * up type-specific initialization. This routine can now do things
1139 : * that might fail (like allocate more memory), so long as it's
1140 : * sure the node is left in a state that delete will handle.
1141 : *
1142 : * node: the as-yet-uninitialized common part of the context header node.
1143 : * tag: NodeTag code identifying the memory context type.
1144 : * method_id: MemoryContextMethodID of the context-type being created.
1145 : * parent: parent context, or NULL if this will be a top-level context.
1146 : * name: name of context (must be statically allocated).
1147 : *
1148 : * Context routines generally assume that MemoryContextCreate can't fail,
1149 : * so this can contain Assert but not elog/ereport.
1150 : */
1151 : void
1152 9545471 : MemoryContextCreate(MemoryContext node,
1153 : NodeTag tag,
1154 : MemoryContextMethodID method_id,
1155 : MemoryContext parent,
1156 : const char *name)
1157 : {
1158 : /* Creating new memory contexts is not allowed in a critical section */
1159 : Assert(CritSectionCount == 0);
1160 :
1161 : /* Validate parent, to help prevent crazy context linkages */
1162 : Assert(parent == NULL || MemoryContextIsValid(parent));
1163 : Assert(node != parent);
1164 :
1165 : /* Initialize all standard fields of memory context header */
1166 9545471 : node->type = tag;
1167 9545471 : node->isReset = true;
1168 9545471 : node->methods = &mcxt_methods[method_id];
1169 9545471 : node->parent = parent;
1170 9545471 : node->firstchild = NULL;
1171 9545471 : node->mem_allocated = 0;
1172 9545471 : node->prevchild = NULL;
1173 9545471 : node->name = name;
1174 9545471 : node->ident = NULL;
1175 9545471 : node->reset_cbs = NULL;
1176 :
1177 : /* OK to link node into context tree */
1178 9545471 : if (parent)
1179 : {
1180 9543243 : node->nextchild = parent->firstchild;
1181 9543243 : if (parent->firstchild != NULL)
1182 5478593 : parent->firstchild->prevchild = node;
1183 9543243 : parent->firstchild = node;
1184 : /* inherit allowInCritSection flag from parent */
1185 9543243 : node->allowInCritSection = parent->allowInCritSection;
1186 : }
1187 : else
1188 : {
1189 2228 : node->nextchild = NULL;
1190 2228 : node->allowInCritSection = false;
1191 : }
1192 9545471 : }
1193 :
1194 : /*
1195 : * MemoryContextAllocationFailure
1196 : * For use by MemoryContextMethods implementations to handle when malloc
1197 : * returns NULL. The behavior is specific to whether MCXT_ALLOC_NO_OOM
1198 : * is in 'flags'.
1199 : */
1200 : void *
1201 0 : MemoryContextAllocationFailure(MemoryContext context, Size size, int flags)
1202 : {
1203 0 : if ((flags & MCXT_ALLOC_NO_OOM) == 0)
1204 : {
1205 0 : if (TopMemoryContext)
1206 0 : MemoryContextStats(TopMemoryContext);
1207 0 : ereport(ERROR,
1208 : (errcode(ERRCODE_OUT_OF_MEMORY),
1209 : errmsg("out of memory"),
1210 : errdetail("Failed on request of size %zu in memory context \"%s\".",
1211 : size, context->name)));
1212 : }
1213 0 : return NULL;
1214 : }
1215 :
1216 : /*
1217 : * MemoryContextSizeFailure
1218 : * For use by MemoryContextMethods implementations to handle invalid
1219 : * memory allocation request sizes.
1220 : */
1221 : void
1222 0 : MemoryContextSizeFailure(MemoryContext context, Size size, int flags)
1223 : {
1224 0 : elog(ERROR, "invalid memory alloc request size %zu", size);
1225 : }
1226 :
1227 : /*
1228 : * MemoryContextAlloc
1229 : * Allocate space within the specified context.
1230 : *
1231 : * This could be turned into a macro, but we'd have to import
1232 : * nodes/memnodes.h into postgres.h which seems a bad idea.
1233 : */
1234 : void *
1235 125606048 : MemoryContextAlloc(MemoryContext context, Size size)
1236 : {
1237 : void *ret;
1238 :
1239 : Assert(MemoryContextIsValid(context));
1240 : AssertNotInCriticalSection(context);
1241 :
1242 125606048 : context->isReset = false;
1243 :
1244 : /*
1245 : * For efficiency reasons, we purposefully offload the handling of
1246 : * allocation failures to the MemoryContextMethods implementation as this
1247 : * allows these checks to be performed only when an actual malloc needs to
1248 : * be done to request more memory from the OS. Additionally, not having
1249 : * to execute any instructions after this call allows the compiler to use
1250 : * the sibling call optimization. If you're considering adding code after
1251 : * this call, consider making it the responsibility of the 'alloc'
1252 : * function instead.
1253 : */
1254 125606048 : ret = context->methods->alloc(context, size, 0);
1255 :
1256 : VALGRIND_MEMPOOL_ALLOC(context, ret, size);
1257 :
1258 125606048 : return ret;
1259 : }
1260 :
1261 : /*
1262 : * MemoryContextAllocZero
1263 : * Like MemoryContextAlloc, but clears allocated memory
1264 : *
1265 : * We could just call MemoryContextAlloc then clear the memory, but this
1266 : * is a very common combination, so we provide the combined operation.
1267 : */
1268 : void *
1269 28523653 : MemoryContextAllocZero(MemoryContext context, Size size)
1270 : {
1271 : void *ret;
1272 :
1273 : Assert(MemoryContextIsValid(context));
1274 : AssertNotInCriticalSection(context);
1275 :
1276 28523653 : context->isReset = false;
1277 :
1278 28523653 : ret = context->methods->alloc(context, size, 0);
1279 :
1280 : VALGRIND_MEMPOOL_ALLOC(context, ret, size);
1281 :
1282 353885381 : MemSetAligned(ret, 0, size);
1283 :
1284 28523653 : return ret;
1285 : }
1286 :
1287 : /*
1288 : * MemoryContextAllocExtended
1289 : * Allocate space within the specified context using the given flags.
1290 : */
1291 : void *
1292 5200252 : MemoryContextAllocExtended(MemoryContext context, Size size, int flags)
1293 : {
1294 : void *ret;
1295 :
1296 : Assert(MemoryContextIsValid(context));
1297 : AssertNotInCriticalSection(context);
1298 :
1299 5200252 : if (!((flags & MCXT_ALLOC_HUGE) != 0 ? AllocHugeSizeIsValid(size) :
1300 : AllocSizeIsValid(size)))
1301 0 : elog(ERROR, "invalid memory alloc request size %zu", size);
1302 :
1303 5200252 : context->isReset = false;
1304 :
1305 5200252 : ret = context->methods->alloc(context, size, flags);
1306 5200252 : if (unlikely(ret == NULL))
1307 0 : return NULL;
1308 :
1309 : VALGRIND_MEMPOOL_ALLOC(context, ret, size);
1310 :
1311 5200252 : if ((flags & MCXT_ALLOC_ZERO) != 0)
1312 369683 : MemSetAligned(ret, 0, size);
1313 :
1314 5200252 : return ret;
1315 : }
1316 :
1317 : /*
1318 : * HandleLogMemoryContextInterrupt
1319 : * Handle receipt of an interrupt indicating logging of memory
1320 : * contexts.
1321 : *
1322 : * All the actual work is deferred to ProcessLogMemoryContextInterrupt(),
1323 : * because we cannot safely emit a log message inside the signal handler.
1324 : */
1325 : void
1326 12 : HandleLogMemoryContextInterrupt(void)
1327 : {
1328 12 : InterruptPending = true;
1329 12 : LogMemoryContextPending = true;
1330 : /* latch will be set by procsignal_sigusr1_handler */
1331 12 : }
1332 :
1333 : /*
1334 : * ProcessLogMemoryContextInterrupt
1335 : * Perform logging of memory contexts of this backend process.
1336 : *
1337 : * Any backend that participates in ProcSignal signaling must arrange
1338 : * to call this function if we see LogMemoryContextPending set.
1339 : * It is called from CHECK_FOR_INTERRUPTS(), which is enough because
1340 : * the target process for logging of memory contexts is a backend.
1341 : */
1342 : void
1343 12 : ProcessLogMemoryContextInterrupt(void)
1344 : {
1345 12 : LogMemoryContextPending = false;
1346 :
1347 : /*
1348 : * Exit immediately if memory context logging is already in progress. This
1349 : * prevents recursive calls, which could occur if logging is requested
1350 : * repeatedly and rapidly, potentially leading to infinite recursion and a
1351 : * crash.
1352 : */
1353 12 : if (LogMemoryContextInProgress)
1354 0 : return;
1355 12 : LogMemoryContextInProgress = true;
1356 :
1357 12 : PG_TRY();
1358 : {
1359 : /*
1360 : * Use LOG_SERVER_ONLY to prevent this message from being sent to the
1361 : * connected client.
1362 : */
1363 12 : ereport(LOG_SERVER_ONLY,
1364 : (errhidestmt(true),
1365 : errhidecontext(true),
1366 : errmsg("logging memory contexts of PID %d", MyProcPid)));
1367 :
1368 : /*
1369 : * When a backend process is consuming huge memory, logging all its
1370 : * memory contexts might overrun available disk space. To prevent
1371 : * this, we limit the depth of the hierarchy, as well as the number of
1372 : * child contexts to log per parent to 100.
1373 : *
1374 : * As with MemoryContextStats(), we suppose that practical cases where
1375 : * the dump gets long will typically be huge numbers of siblings under
1376 : * the same parent context; while the additional debugging value from
1377 : * seeing details about individual siblings beyond 100 will not be
1378 : * large.
1379 : */
1380 12 : MemoryContextStatsDetail(TopMemoryContext, 100, 100, false);
1381 : }
1382 0 : PG_FINALLY();
1383 : {
1384 12 : LogMemoryContextInProgress = false;
1385 : }
1386 12 : PG_END_TRY();
1387 : }
1388 :
1389 : void *
1390 441513772 : palloc(Size size)
1391 : {
1392 : /* duplicates MemoryContextAlloc to avoid increased overhead */
1393 : void *ret;
1394 441513772 : MemoryContext context = CurrentMemoryContext;
1395 :
1396 : Assert(MemoryContextIsValid(context));
1397 : AssertNotInCriticalSection(context);
1398 :
1399 441513772 : context->isReset = false;
1400 :
1401 : /*
1402 : * For efficiency reasons, we purposefully offload the handling of
1403 : * allocation failures to the MemoryContextMethods implementation as this
1404 : * allows these checks to be performed only when an actual malloc needs to
1405 : * be done to request more memory from the OS. Additionally, not having
1406 : * to execute any instructions after this call allows the compiler to use
1407 : * the sibling call optimization. If you're considering adding code after
1408 : * this call, consider making it the responsibility of the 'alloc'
1409 : * function instead.
1410 : */
1411 441513772 : ret = context->methods->alloc(context, size, 0);
1412 : /* We expect OOM to be handled by the alloc function */
1413 : Assert(ret != NULL);
1414 : VALGRIND_MEMPOOL_ALLOC(context, ret, size);
1415 :
1416 441513772 : return ret;
1417 : }
1418 :
1419 : void *
1420 331429839 : palloc0(Size size)
1421 : {
1422 : /* duplicates MemoryContextAllocZero to avoid increased overhead */
1423 : void *ret;
1424 331429839 : MemoryContext context = CurrentMemoryContext;
1425 :
1426 : Assert(MemoryContextIsValid(context));
1427 : AssertNotInCriticalSection(context);
1428 :
1429 331429839 : context->isReset = false;
1430 :
1431 331429839 : ret = context->methods->alloc(context, size, 0);
1432 : /* We expect OOM to be handled by the alloc function */
1433 : Assert(ret != NULL);
1434 : VALGRIND_MEMPOOL_ALLOC(context, ret, size);
1435 :
1436 3034794917 : MemSetAligned(ret, 0, size);
1437 :
1438 331429839 : return ret;
1439 : }
1440 :
1441 : void *
1442 17090207 : palloc_extended(Size size, int flags)
1443 : {
1444 : /* duplicates MemoryContextAllocExtended to avoid increased overhead */
1445 : void *ret;
1446 17090207 : MemoryContext context = CurrentMemoryContext;
1447 :
1448 : Assert(MemoryContextIsValid(context));
1449 : AssertNotInCriticalSection(context);
1450 :
1451 17090207 : context->isReset = false;
1452 :
1453 17090207 : ret = context->methods->alloc(context, size, flags);
1454 17090207 : if (unlikely(ret == NULL))
1455 : {
1456 : /* NULL can be returned only when using MCXT_ALLOC_NO_OOM */
1457 : Assert(flags & MCXT_ALLOC_NO_OOM);
1458 0 : return NULL;
1459 : }
1460 :
1461 : VALGRIND_MEMPOOL_ALLOC(context, ret, size);
1462 :
1463 17090207 : if ((flags & MCXT_ALLOC_ZERO) != 0)
1464 3036 : MemSetAligned(ret, 0, size);
1465 :
1466 17090207 : return ret;
1467 : }
1468 :
1469 : /*
1470 : * MemoryContextAllocAligned
1471 : * Allocate 'size' bytes of memory in 'context' aligned to 'alignto'
1472 : * bytes.
1473 : *
1474 : * Currently, we align addresses by requesting additional bytes from the
1475 : * MemoryContext's standard allocator function and then aligning the returned
1476 : * address by the required alignment. This means that the given MemoryContext
1477 : * must support providing us with a chunk of memory that's larger than 'size'.
1478 : * For allocators such as Slab, that's not going to work, as slab only allows
1479 : * chunks of the size that's specified when the context is created.
1480 : *
1481 : * 'alignto' must be a power of 2.
1482 : * 'flags' may be 0 or set the same as MemoryContextAllocExtended().
1483 : */
1484 : void *
1485 2056272 : MemoryContextAllocAligned(MemoryContext context,
1486 : Size size, Size alignto, int flags)
1487 : {
1488 : MemoryChunk *alignedchunk;
1489 : Size alloc_size;
1490 : void *unaligned;
1491 : void *aligned;
1492 :
1493 : /*
1494 : * Restrict alignto to ensure that it can fit into the "value" field of
1495 : * the redirection MemoryChunk, and that the distance back to the start of
1496 : * the unaligned chunk will fit into the space available for that. This
1497 : * isn't a limitation in practice, since it wouldn't make much sense to
1498 : * waste that much space.
1499 : */
1500 : Assert(alignto < (128 * 1024 * 1024));
1501 :
1502 : /* ensure alignto is a power of 2 */
1503 : Assert((alignto & (alignto - 1)) == 0);
1504 :
1505 : /*
1506 : * If the alignment requirements are less than what we already guarantee
1507 : * then just use the standard allocation function.
1508 : */
1509 2056272 : if (unlikely(alignto <= MAXIMUM_ALIGNOF))
1510 0 : return MemoryContextAllocExtended(context, size, flags);
1511 :
1512 : /*
1513 : * We implement aligned pointers by simply allocating enough memory for
1514 : * the requested size plus the alignment and an additional "redirection"
1515 : * MemoryChunk. This additional MemoryChunk is required for operations
1516 : * such as pfree when used on the pointer returned by this function. We
1517 : * use this redirection MemoryChunk in order to find the pointer to the
1518 : * memory that was returned by the MemoryContextAllocExtended call below.
1519 : * We do that by "borrowing" the block offset field and instead of using
1520 : * that to find the offset into the owning block, we use it to find the
1521 : * original allocated address.
1522 : *
1523 : * Here we must allocate enough extra memory so that we can still align
1524 : * the pointer returned by MemoryContextAllocExtended and also have enough
1525 : * space for the redirection MemoryChunk. Since allocations will already
1526 : * be at least aligned by MAXIMUM_ALIGNOF, we can subtract that amount
1527 : * from the allocation size to save a little memory.
1528 : */
1529 2056272 : alloc_size = size + PallocAlignedExtraBytes(alignto);
1530 :
1531 : #ifdef MEMORY_CONTEXT_CHECKING
1532 : /* ensure there's space for a sentinel byte */
1533 : alloc_size += 1;
1534 : #endif
1535 :
1536 : /*
1537 : * Perform the actual allocation, but do not pass down MCXT_ALLOC_ZERO.
1538 : * This ensures that wasted bytes beyond the aligned chunk do not become
1539 : * DEFINED.
1540 : */
1541 2056272 : unaligned = MemoryContextAllocExtended(context, alloc_size,
1542 : flags & ~MCXT_ALLOC_ZERO);
1543 :
1544 : /* compute the aligned pointer */
1545 2056272 : aligned = (void *) TYPEALIGN(alignto, (char *) unaligned +
1546 : sizeof(MemoryChunk));
1547 :
1548 2056272 : alignedchunk = PointerGetMemoryChunk(aligned);
1549 :
1550 : /*
1551 : * We set the redirect MemoryChunk so that the block offset calculation is
1552 : * used to point back to the 'unaligned' allocated chunk. This allows us
1553 : * to use MemoryChunkGetBlock() to find the unaligned chunk when we need
1554 : * to perform operations such as pfree() and repalloc().
1555 : *
1556 : * We store 'alignto' in the MemoryChunk's 'value' so that we know what
1557 : * the alignment was set to should we ever be asked to realloc this
1558 : * pointer.
1559 : */
1560 2056272 : MemoryChunkSetHdrMask(alignedchunk, unaligned, alignto,
1561 : MCTX_ALIGNED_REDIRECT_ID);
1562 :
1563 : /* double check we produced a correctly aligned pointer */
1564 : Assert((void *) TYPEALIGN(alignto, aligned) == aligned);
1565 :
1566 : #ifdef MEMORY_CONTEXT_CHECKING
1567 : alignedchunk->requested_size = size;
1568 : /* set mark to catch clobber of "unused" space */
1569 : set_sentinel(aligned, size);
1570 : #endif
1571 :
1572 : /*
1573 : * MemoryContextAllocExtended marked the whole unaligned chunk as a
1574 : * vchunk. Undo that, instead making just the aligned chunk be a vchunk.
1575 : * This prevents Valgrind from complaining that the vchunk is possibly
1576 : * leaked, since only pointers to the aligned chunk will exist.
1577 : *
1578 : * After these calls, the aligned chunk will be marked UNDEFINED, and all
1579 : * the rest of the unaligned chunk (the redirection chunk header, the
1580 : * padding bytes before it, and any wasted trailing bytes) will be marked
1581 : * NOACCESS, which is what we want.
1582 : */
1583 : VALGRIND_MEMPOOL_FREE(context, unaligned);
1584 : VALGRIND_MEMPOOL_ALLOC(context, aligned, size);
1585 :
1586 : /* Now zero (and make DEFINED) just the aligned chunk, if requested */
1587 2056272 : if ((flags & MCXT_ALLOC_ZERO) != 0)
1588 106905267 : MemSetAligned(aligned, 0, size);
1589 :
1590 2056272 : return aligned;
1591 : }
1592 :
1593 : /*
1594 : * palloc_aligned
1595 : * Allocate 'size' bytes returning a pointer that's aligned to the
1596 : * 'alignto' boundary.
1597 : *
1598 : * Currently, we align addresses by requesting additional bytes from the
1599 : * MemoryContext's standard allocator function and then aligning the returned
1600 : * address by the required alignment. This means that the given MemoryContext
1601 : * must support providing us with a chunk of memory that's larger than 'size'.
1602 : * For allocators such as Slab, that's not going to work, as slab only allows
1603 : * chunks of the size that's specified when the context is created.
1604 : *
1605 : * 'alignto' must be a power of 2.
1606 : * 'flags' may be 0 or set the same as MemoryContextAllocExtended().
1607 : */
1608 : void *
1609 1981078 : palloc_aligned(Size size, Size alignto, int flags)
1610 : {
1611 1981078 : return MemoryContextAllocAligned(CurrentMemoryContext, size, alignto, flags);
1612 : }
1613 :
1614 : /*
1615 : * pfree
1616 : * Release an allocated chunk.
1617 : */
1618 : void
1619 329963312 : pfree(void *pointer)
1620 : {
1621 : #ifdef USE_VALGRIND
1622 : MemoryContext context = GetMemoryChunkContext(pointer);
1623 : #endif
1624 :
1625 329963312 : MCXT_METHOD(pointer, free_p) (pointer);
1626 :
1627 : VALGRIND_MEMPOOL_FREE(context, pointer);
1628 329963312 : }
1629 :
1630 : /*
1631 : * repalloc
1632 : * Adjust the size of a previously allocated chunk.
1633 : */
1634 : void *
1635 5635845 : repalloc(void *pointer, Size size)
1636 : {
1637 : #if defined(USE_ASSERT_CHECKING) || defined(USE_VALGRIND)
1638 : MemoryContext context = GetMemoryChunkContext(pointer);
1639 : #endif
1640 : void *ret;
1641 :
1642 : AssertNotInCriticalSection(context);
1643 :
1644 : /* isReset must be false already */
1645 : Assert(!context->isReset);
1646 :
1647 : /*
1648 : * For efficiency reasons, we purposefully offload the handling of
1649 : * allocation failures to the MemoryContextMethods implementation as this
1650 : * allows these checks to be performed only when an actual malloc needs to
1651 : * be done to request more memory from the OS. Additionally, not having
1652 : * to execute any instructions after this call allows the compiler to use
1653 : * the sibling call optimization. If you're considering adding code after
1654 : * this call, consider making it the responsibility of the 'realloc'
1655 : * function instead.
1656 : */
1657 5635845 : ret = MCXT_METHOD(pointer, realloc) (pointer, size, 0);
1658 :
1659 : VALGRIND_MEMPOOL_CHANGE(context, pointer, ret, size);
1660 :
1661 5635845 : return ret;
1662 : }
1663 :
1664 : /*
1665 : * repalloc_extended
1666 : * Adjust the size of a previously allocated chunk,
1667 : * with HUGE and NO_OOM options.
1668 : */
1669 : void *
1670 64882 : repalloc_extended(void *pointer, Size size, int flags)
1671 : {
1672 : #if defined(USE_ASSERT_CHECKING) || defined(USE_VALGRIND)
1673 : MemoryContext context = GetMemoryChunkContext(pointer);
1674 : #endif
1675 : void *ret;
1676 :
1677 : AssertNotInCriticalSection(context);
1678 :
1679 : /* isReset must be false already */
1680 : Assert(!context->isReset);
1681 :
1682 : /*
1683 : * For efficiency reasons, we purposefully offload the handling of
1684 : * allocation failures to the MemoryContextMethods implementation as this
1685 : * allows these checks to be performed only when an actual malloc needs to
1686 : * be done to request more memory from the OS. Additionally, not having
1687 : * to execute any instructions after this call allows the compiler to use
1688 : * the sibling call optimization. If you're considering adding code after
1689 : * this call, consider making it the responsibility of the 'realloc'
1690 : * function instead.
1691 : */
1692 64882 : ret = MCXT_METHOD(pointer, realloc) (pointer, size, flags);
1693 64882 : if (unlikely(ret == NULL))
1694 0 : return NULL;
1695 :
1696 : VALGRIND_MEMPOOL_CHANGE(context, pointer, ret, size);
1697 :
1698 64882 : return ret;
1699 : }
1700 :
1701 : /*
1702 : * repalloc0
1703 : * Adjust the size of a previously allocated chunk and zero out the added
1704 : * space.
1705 : */
1706 : void *
1707 41593 : repalloc0(void *pointer, Size oldsize, Size size)
1708 : {
1709 : void *ret;
1710 :
1711 : /* catch wrong argument order */
1712 41593 : if (unlikely(oldsize > size))
1713 0 : elog(ERROR, "invalid repalloc0 call: oldsize %zu, new size %zu",
1714 : oldsize, size);
1715 :
1716 41593 : ret = repalloc(pointer, size);
1717 41593 : memset((char *) ret + oldsize, 0, (size - oldsize));
1718 41593 : return ret;
1719 : }
1720 :
1721 : /*
1722 : * Support for safe calculation of memory request sizes
1723 : *
1724 : * These functions perform the requested calculation, but throw error if the
1725 : * result overflows.
1726 : *
1727 : * An important property of these functions is that if an argument was a
1728 : * negative signed int before promotion (implying overflow in calculating it)
1729 : * we will detect that as an error. That happens because we reject results
1730 : * larger than SIZE_MAX / 2 later on, in the actual allocation step.
1731 : */
1732 : Size
1733 503262 : add_size(Size s1, Size s2)
1734 : {
1735 : Size result;
1736 :
1737 503262 : if (unlikely(pg_add_size_overflow(s1, s2, &result)))
1738 0 : add_size_error(s1, s2);
1739 503262 : return result;
1740 : }
1741 :
1742 : pg_noreturn static pg_noinline void
1743 0 : add_size_error(Size s1, Size s2)
1744 : {
1745 0 : ereport(ERROR,
1746 : (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
1747 : errmsg("invalid memory allocation request size %zu + %zu",
1748 : s1, s2)));
1749 : }
1750 :
1751 : Size
1752 216040 : mul_size(Size s1, Size s2)
1753 : {
1754 : Size result;
1755 :
1756 216040 : if (unlikely(pg_mul_size_overflow(s1, s2, &result)))
1757 0 : mul_size_error(s1, s2);
1758 216040 : return result;
1759 : }
1760 :
1761 : pg_noreturn static pg_noinline void
1762 0 : mul_size_error(Size s1, Size s2)
1763 : {
1764 0 : ereport(ERROR,
1765 : (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
1766 : errmsg("invalid memory allocation request size %zu * %zu",
1767 : s1, s2)));
1768 : }
1769 :
1770 : /*
1771 : * palloc_mul
1772 : * Equivalent to palloc(mul_size(s1, s2)).
1773 : */
1774 : void *
1775 43535762 : palloc_mul(Size s1, Size s2)
1776 : {
1777 : /* inline mul_size() for efficiency */
1778 : Size req;
1779 :
1780 43535762 : if (unlikely(pg_mul_size_overflow(s1, s2, &req)))
1781 0 : mul_size_error(s1, s2);
1782 43535762 : return palloc(req);
1783 : }
1784 :
1785 : /*
1786 : * palloc0_mul
1787 : * Equivalent to palloc0(mul_size(s1, s2)).
1788 : *
1789 : * This is comparable to standard calloc's behavior.
1790 : */
1791 : void *
1792 5771516 : palloc0_mul(Size s1, Size s2)
1793 : {
1794 : /* inline mul_size() for efficiency */
1795 : Size req;
1796 :
1797 5771516 : if (unlikely(pg_mul_size_overflow(s1, s2, &req)))
1798 0 : mul_size_error(s1, s2);
1799 5771516 : return palloc0(req);
1800 : }
1801 :
1802 : /*
1803 : * palloc_mul_extended
1804 : * Equivalent to palloc_extended(mul_size(s1, s2), flags).
1805 : */
1806 : void *
1807 13463216 : palloc_mul_extended(Size s1, Size s2, int flags)
1808 : {
1809 : /* inline mul_size() for efficiency */
1810 : Size req;
1811 :
1812 13463216 : if (unlikely(pg_mul_size_overflow(s1, s2, &req)))
1813 0 : mul_size_error(s1, s2);
1814 13463216 : return palloc_extended(req, flags);
1815 : }
1816 :
1817 : /*
1818 : * repalloc_mul
1819 : * Equivalent to repalloc(p, mul_size(s1, s2)).
1820 : */
1821 : void *
1822 33626 : repalloc_mul(void *p, Size s1, Size s2)
1823 : {
1824 : /* inline mul_size() for efficiency */
1825 : Size req;
1826 :
1827 33626 : if (unlikely(pg_mul_size_overflow(s1, s2, &req)))
1828 0 : mul_size_error(s1, s2);
1829 33626 : return repalloc(p, req);
1830 : }
1831 :
1832 : /*
1833 : * repalloc_mul_extended
1834 : * Equivalent to repalloc_extended(p, mul_size(s1, s2), flags).
1835 : */
1836 : void *
1837 454 : repalloc_mul_extended(void *p, Size s1, Size s2, int flags)
1838 : {
1839 : /* inline mul_size() for efficiency */
1840 : Size req;
1841 :
1842 454 : if (unlikely(pg_mul_size_overflow(s1, s2, &req)))
1843 0 : mul_size_error(s1, s2);
1844 454 : return repalloc_extended(p, req, flags);
1845 : }
1846 :
1847 : /*
1848 : * MemoryContextAllocHuge
1849 : * Allocate (possibly-expansive) space within the specified context.
1850 : *
1851 : * See considerations in comment at MaxAllocHugeSize.
1852 : */
1853 : void *
1854 1459 : MemoryContextAllocHuge(MemoryContext context, Size size)
1855 : {
1856 : void *ret;
1857 :
1858 : Assert(MemoryContextIsValid(context));
1859 : AssertNotInCriticalSection(context);
1860 :
1861 1459 : context->isReset = false;
1862 :
1863 : /*
1864 : * For efficiency reasons, we purposefully offload the handling of
1865 : * allocation failures to the MemoryContextMethods implementation as this
1866 : * allows these checks to be performed only when an actual malloc needs to
1867 : * be done to request more memory from the OS. Additionally, not having
1868 : * to execute any instructions after this call allows the compiler to use
1869 : * the sibling call optimization. If you're considering adding code after
1870 : * this call, consider making it the responsibility of the 'alloc'
1871 : * function instead.
1872 : */
1873 1459 : ret = context->methods->alloc(context, size, MCXT_ALLOC_HUGE);
1874 :
1875 : VALGRIND_MEMPOOL_ALLOC(context, ret, size);
1876 :
1877 1459 : return ret;
1878 : }
1879 :
1880 : /*
1881 : * repalloc_huge
1882 : * Adjust the size of a previously allocated chunk, permitting a large
1883 : * value. The previous allocation need not have been "huge".
1884 : */
1885 : void *
1886 64350 : repalloc_huge(void *pointer, Size size)
1887 : {
1888 : /* this one seems not worth its own implementation */
1889 64350 : return repalloc_extended(pointer, size, MCXT_ALLOC_HUGE);
1890 : }
1891 :
1892 : /*
1893 : * MemoryContextStrdup
1894 : * Like strdup(), but allocate from the specified context
1895 : */
1896 : char *
1897 62314312 : MemoryContextStrdup(MemoryContext context, const char *string)
1898 : {
1899 : char *nstr;
1900 62314312 : Size len = strlen(string) + 1;
1901 :
1902 62314312 : nstr = (char *) MemoryContextAlloc(context, len);
1903 :
1904 62314312 : memcpy(nstr, string, len);
1905 :
1906 62314312 : return nstr;
1907 : }
1908 :
1909 : char *
1910 59963284 : pstrdup(const char *in)
1911 : {
1912 59963284 : return MemoryContextStrdup(CurrentMemoryContext, in);
1913 : }
1914 :
1915 : /*
1916 : * pnstrdup
1917 : * Like pstrdup(), but append null byte to a
1918 : * not-necessarily-null-terminated input string.
1919 : */
1920 : char *
1921 1075031 : pnstrdup(const char *in, Size len)
1922 : {
1923 : char *out;
1924 :
1925 1075031 : len = strnlen(in, len);
1926 :
1927 1075031 : out = palloc(len + 1);
1928 1075031 : memcpy(out, in, len);
1929 1075031 : out[len] = '\0';
1930 :
1931 1075031 : return out;
1932 : }
1933 :
1934 : /*
1935 : * Make copy of string with all trailing newline characters removed.
1936 : */
1937 : char *
1938 279 : pchomp(const char *in)
1939 : {
1940 : size_t n;
1941 :
1942 279 : n = strlen(in);
1943 558 : while (n > 0 && in[n - 1] == '\n')
1944 279 : n--;
1945 279 : return pnstrdup(in, n);
1946 : }
|