Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * mcxt.c
4 : * POSTGRES memory context management code.
5 : *
6 : * This module handles context management operations that are independent
7 : * of the particular kind of context being operated on. It calls
8 : * context-type-specific operations via the function pointers in a
9 : * context's MemoryContextMethods struct.
10 : *
11 : *
12 : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
13 : * Portions Copyright (c) 1994, Regents of the University of California
14 : *
15 : *
16 : * IDENTIFICATION
17 : * src/backend/utils/mmgr/mcxt.c
18 : *
19 : *-------------------------------------------------------------------------
20 : */
21 :
22 : #include "postgres.h"
23 :
24 : #include "mb/pg_wchar.h"
25 : #include "miscadmin.h"
26 : #include "nodes/pg_list.h"
27 : #include "storage/lwlock.h"
28 : #include "storage/ipc.h"
29 : #include "utils/dsa.h"
30 : #include "utils/hsearch.h"
31 : #include "utils/memdebug.h"
32 : #include "utils/memutils.h"
33 : #include "utils/memutils_internal.h"
34 : #include "utils/memutils_memorychunk.h"
35 :
36 :
37 : static void BogusFree(void *pointer);
38 : static void *BogusRealloc(void *pointer, Size size, int flags);
39 : static MemoryContext BogusGetChunkContext(void *pointer);
40 : static Size BogusGetChunkSpace(void *pointer);
41 :
42 : /*****************************************************************************
43 : * GLOBAL MEMORY *
44 : *****************************************************************************/
45 : #define BOGUS_MCTX(id) \
46 : [id].free_p = BogusFree, \
47 : [id].realloc = BogusRealloc, \
48 : [id].get_chunk_context = BogusGetChunkContext, \
49 : [id].get_chunk_space = BogusGetChunkSpace
50 :
51 : static const MemoryContextMethods mcxt_methods[] = {
52 : /* aset.c */
53 : [MCTX_ASET_ID].alloc = AllocSetAlloc,
54 : [MCTX_ASET_ID].free_p = AllocSetFree,
55 : [MCTX_ASET_ID].realloc = AllocSetRealloc,
56 : [MCTX_ASET_ID].reset = AllocSetReset,
57 : [MCTX_ASET_ID].delete_context = AllocSetDelete,
58 : [MCTX_ASET_ID].get_chunk_context = AllocSetGetChunkContext,
59 : [MCTX_ASET_ID].get_chunk_space = AllocSetGetChunkSpace,
60 : [MCTX_ASET_ID].is_empty = AllocSetIsEmpty,
61 : [MCTX_ASET_ID].stats = AllocSetStats,
62 : #ifdef MEMORY_CONTEXT_CHECKING
63 : [MCTX_ASET_ID].check = AllocSetCheck,
64 : #endif
65 :
66 : /* generation.c */
67 : [MCTX_GENERATION_ID].alloc = GenerationAlloc,
68 : [MCTX_GENERATION_ID].free_p = GenerationFree,
69 : [MCTX_GENERATION_ID].realloc = GenerationRealloc,
70 : [MCTX_GENERATION_ID].reset = GenerationReset,
71 : [MCTX_GENERATION_ID].delete_context = GenerationDelete,
72 : [MCTX_GENERATION_ID].get_chunk_context = GenerationGetChunkContext,
73 : [MCTX_GENERATION_ID].get_chunk_space = GenerationGetChunkSpace,
74 : [MCTX_GENERATION_ID].is_empty = GenerationIsEmpty,
75 : [MCTX_GENERATION_ID].stats = GenerationStats,
76 : #ifdef MEMORY_CONTEXT_CHECKING
77 : [MCTX_GENERATION_ID].check = GenerationCheck,
78 : #endif
79 :
80 : /* slab.c */
81 : [MCTX_SLAB_ID].alloc = SlabAlloc,
82 : [MCTX_SLAB_ID].free_p = SlabFree,
83 : [MCTX_SLAB_ID].realloc = SlabRealloc,
84 : [MCTX_SLAB_ID].reset = SlabReset,
85 : [MCTX_SLAB_ID].delete_context = SlabDelete,
86 : [MCTX_SLAB_ID].get_chunk_context = SlabGetChunkContext,
87 : [MCTX_SLAB_ID].get_chunk_space = SlabGetChunkSpace,
88 : [MCTX_SLAB_ID].is_empty = SlabIsEmpty,
89 : [MCTX_SLAB_ID].stats = SlabStats,
90 : #ifdef MEMORY_CONTEXT_CHECKING
91 : [MCTX_SLAB_ID].check = SlabCheck,
92 : #endif
93 :
94 : /* alignedalloc.c */
95 : [MCTX_ALIGNED_REDIRECT_ID].alloc = NULL, /* not required */
96 : [MCTX_ALIGNED_REDIRECT_ID].free_p = AlignedAllocFree,
97 : [MCTX_ALIGNED_REDIRECT_ID].realloc = AlignedAllocRealloc,
98 : [MCTX_ALIGNED_REDIRECT_ID].reset = NULL, /* not required */
99 : [MCTX_ALIGNED_REDIRECT_ID].delete_context = NULL, /* not required */
100 : [MCTX_ALIGNED_REDIRECT_ID].get_chunk_context = AlignedAllocGetChunkContext,
101 : [MCTX_ALIGNED_REDIRECT_ID].get_chunk_space = AlignedAllocGetChunkSpace,
102 : [MCTX_ALIGNED_REDIRECT_ID].is_empty = NULL, /* not required */
103 : [MCTX_ALIGNED_REDIRECT_ID].stats = NULL, /* not required */
104 : #ifdef MEMORY_CONTEXT_CHECKING
105 : [MCTX_ALIGNED_REDIRECT_ID].check = NULL, /* not required */
106 : #endif
107 :
108 : /* bump.c */
109 : [MCTX_BUMP_ID].alloc = BumpAlloc,
110 : [MCTX_BUMP_ID].free_p = BumpFree,
111 : [MCTX_BUMP_ID].realloc = BumpRealloc,
112 : [MCTX_BUMP_ID].reset = BumpReset,
113 : [MCTX_BUMP_ID].delete_context = BumpDelete,
114 : [MCTX_BUMP_ID].get_chunk_context = BumpGetChunkContext,
115 : [MCTX_BUMP_ID].get_chunk_space = BumpGetChunkSpace,
116 : [MCTX_BUMP_ID].is_empty = BumpIsEmpty,
117 : [MCTX_BUMP_ID].stats = BumpStats,
118 : #ifdef MEMORY_CONTEXT_CHECKING
119 : [MCTX_BUMP_ID].check = BumpCheck,
120 : #endif
121 :
122 :
123 : /*
124 : * Reserved and unused IDs should have dummy entries here. This allows us
125 : * to fail cleanly if a bogus pointer is passed to pfree or the like. It
126 : * seems sufficient to provide routines for the methods that might get
127 : * invoked from inspection of a chunk (see MCXT_METHOD calls below).
128 : */
129 : BOGUS_MCTX(MCTX_1_RESERVED_GLIBC_ID),
130 : BOGUS_MCTX(MCTX_2_RESERVED_GLIBC_ID),
131 : BOGUS_MCTX(MCTX_8_UNUSED_ID),
132 : BOGUS_MCTX(MCTX_9_UNUSED_ID),
133 : BOGUS_MCTX(MCTX_10_UNUSED_ID),
134 : BOGUS_MCTX(MCTX_11_UNUSED_ID),
135 : BOGUS_MCTX(MCTX_12_UNUSED_ID),
136 : BOGUS_MCTX(MCTX_13_UNUSED_ID),
137 : BOGUS_MCTX(MCTX_14_UNUSED_ID),
138 : BOGUS_MCTX(MCTX_0_RESERVED_UNUSEDMEM_ID),
139 : BOGUS_MCTX(MCTX_15_RESERVED_WIPEDMEM_ID)
140 : };
141 :
142 : #undef BOGUS_MCTX
143 : /*
144 : * This is passed to MemoryContextStatsInternal to determine whether
145 : * to print context statistics or not and where to print them logs or
146 : * stderr.
147 : */
148 : typedef enum PrintDestination
149 : {
150 : PRINT_STATS_TO_STDERR = 0,
151 : PRINT_STATS_TO_LOGS,
152 : PRINT_STATS_NONE
153 : } PrintDestination;
154 :
155 : /*
156 : * CurrentMemoryContext
157 : * Default memory context for allocations.
158 : */
159 : MemoryContext CurrentMemoryContext = NULL;
160 :
161 : /*
162 : * Standard top-level contexts. For a description of the purpose of each
163 : * of these contexts, refer to src/backend/utils/mmgr/README
164 : */
165 : MemoryContext TopMemoryContext = NULL;
166 : MemoryContext ErrorContext = NULL;
167 : MemoryContext PostmasterContext = NULL;
168 : MemoryContext CacheMemoryContext = NULL;
169 : MemoryContext MessageContext = NULL;
170 : MemoryContext TopTransactionContext = NULL;
171 : MemoryContext CurTransactionContext = NULL;
172 :
173 : /* This is a transient link to the active portal's memory context: */
174 : MemoryContext PortalContext = NULL;
175 : dsa_area *MemoryStatsDsaArea = NULL;
176 :
177 : static void MemoryContextDeleteOnly(MemoryContext context);
178 : static void MemoryContextCallResetCallbacks(MemoryContext context);
179 : static void MemoryContextStatsInternal(MemoryContext context, int level,
180 : int max_level, int max_children,
181 : MemoryContextCounters *totals,
182 : PrintDestination print_location,
183 : int *num_contexts);
184 : static void MemoryContextStatsPrint(MemoryContext context, void *passthru,
185 : const char *stats_string,
186 : bool print_to_stderr);
187 : static void PublishMemoryContext(MemoryStatsEntry *memcxt_info,
188 : int curr_id, MemoryContext context,
189 : List *path,
190 : MemoryContextCounters stat,
191 : int num_contexts, dsa_area *area,
192 : int max_levels);
193 : static void compute_contexts_count_and_ids(List *contexts, HTAB *context_id_lookup,
194 : int *stats_count,
195 : bool summary);
196 : static List *compute_context_path(MemoryContext c, HTAB *context_id_lookup);
197 : static void free_memorycontextstate_dsa(dsa_area *area, int total_stats,
198 : dsa_pointer prev_dsa_pointer);
199 : static void end_memorycontext_reporting(void);
200 :
201 : /*
202 : * You should not do memory allocations within a critical section, because
203 : * an out-of-memory error will be escalated to a PANIC. To enforce that
204 : * rule, the allocation functions Assert that.
205 : */
206 : #define AssertNotInCriticalSection(context) \
207 : Assert(CritSectionCount == 0 || (context)->allowInCritSection)
208 :
209 : /*
210 : * Call the given function in the MemoryContextMethods for the memory context
211 : * type that 'pointer' belongs to.
212 : */
213 : #define MCXT_METHOD(pointer, method) \
214 : mcxt_methods[GetMemoryChunkMethodID(pointer)].method
215 :
216 : /*
217 : * GetMemoryChunkMethodID
218 : * Return the MemoryContextMethodID from the uint64 chunk header which
219 : * directly precedes 'pointer'.
220 : */
221 : static inline MemoryContextMethodID
222 542580654 : GetMemoryChunkMethodID(const void *pointer)
223 : {
224 : uint64 header;
225 :
226 : /*
227 : * Try to detect bogus pointers handed to us, poorly though we can.
228 : * Presumably, a pointer that isn't MAXALIGNED isn't pointing at an
229 : * allocated chunk.
230 : */
231 : Assert(pointer == (const void *) MAXALIGN(pointer));
232 :
233 : /* Allow access to the uint64 header */
234 : VALGRIND_MAKE_MEM_DEFINED((char *) pointer - sizeof(uint64), sizeof(uint64));
235 :
236 542580654 : header = *((const uint64 *) ((const char *) pointer - sizeof(uint64)));
237 :
238 : /* Disallow access to the uint64 header */
239 : VALGRIND_MAKE_MEM_NOACCESS((char *) pointer - sizeof(uint64), sizeof(uint64));
240 :
241 542580654 : return (MemoryContextMethodID) (header & MEMORY_CONTEXT_METHODID_MASK);
242 : }
243 :
244 : /*
245 : * GetMemoryChunkHeader
246 : * Return the uint64 chunk header which directly precedes 'pointer'.
247 : *
248 : * This is only used after GetMemoryChunkMethodID, so no need for error checks.
249 : */
250 : static inline uint64
251 0 : GetMemoryChunkHeader(const void *pointer)
252 : {
253 : uint64 header;
254 :
255 : /* Allow access to the uint64 header */
256 : VALGRIND_MAKE_MEM_DEFINED((char *) pointer - sizeof(uint64), sizeof(uint64));
257 :
258 0 : header = *((const uint64 *) ((const char *) pointer - sizeof(uint64)));
259 :
260 : /* Disallow access to the uint64 header */
261 : VALGRIND_MAKE_MEM_NOACCESS((char *) pointer - sizeof(uint64), sizeof(uint64));
262 :
263 0 : return header;
264 : }
265 :
266 : /*
267 : * MemoryContextTraverseNext
268 : * Helper function to traverse all descendants of a memory context
269 : * without recursion.
270 : *
271 : * Recursion could lead to out-of-stack errors with deep context hierarchies,
272 : * which would be unpleasant in error cleanup code paths.
273 : *
274 : * To process 'context' and all its descendants, use a loop like this:
275 : *
276 : * <process 'context'>
277 : * for (MemoryContext curr = context->firstchild;
278 : * curr != NULL;
279 : * curr = MemoryContextTraverseNext(curr, context))
280 : * {
281 : * <process 'curr'>
282 : * }
283 : *
284 : * This visits all the contexts in pre-order, that is a node is visited
285 : * before its children.
286 : */
287 : static MemoryContext
288 1271498 : MemoryContextTraverseNext(MemoryContext curr, MemoryContext top)
289 : {
290 : /* After processing a node, traverse to its first child if any */
291 1271498 : if (curr->firstchild != NULL)
292 0 : return curr->firstchild;
293 :
294 : /*
295 : * After processing a childless node, traverse to its next sibling if
296 : * there is one. If there isn't, traverse back up to the parent (which
297 : * has already been visited, and now so have all its descendants). We're
298 : * done if that is "top", otherwise traverse to its next sibling if any,
299 : * otherwise repeat moving up.
300 : */
301 1271498 : while (curr->nextchild == NULL)
302 : {
303 650960 : curr = curr->parent;
304 650960 : if (curr == top)
305 650960 : return NULL;
306 : }
307 620538 : return curr->nextchild;
308 : }
309 :
310 : /*
311 : * Support routines to trap use of invalid memory context method IDs
312 : * (from calling pfree or the like on a bogus pointer). As a possible
313 : * aid in debugging, we report the header word along with the pointer
314 : * address (if we got here, there must be an accessible header word).
315 : */
316 : static void
317 0 : BogusFree(void *pointer)
318 : {
319 0 : elog(ERROR, "pfree called with invalid pointer %p (header 0x%016" PRIx64 ")",
320 : pointer, GetMemoryChunkHeader(pointer));
321 : }
322 :
323 : static void *
324 0 : BogusRealloc(void *pointer, Size size, int flags)
325 : {
326 0 : elog(ERROR, "repalloc called with invalid pointer %p (header 0x%016" PRIx64 ")",
327 : pointer, GetMemoryChunkHeader(pointer));
328 : return NULL; /* keep compiler quiet */
329 : }
330 :
331 : static MemoryContext
332 0 : BogusGetChunkContext(void *pointer)
333 : {
334 0 : elog(ERROR, "GetMemoryChunkContext called with invalid pointer %p (header 0x%016" PRIx64 ")",
335 : pointer, GetMemoryChunkHeader(pointer));
336 : return NULL; /* keep compiler quiet */
337 : }
338 :
339 : static Size
340 0 : BogusGetChunkSpace(void *pointer)
341 : {
342 0 : elog(ERROR, "GetMemoryChunkSpace called with invalid pointer %p (header 0x%016" PRIx64 ")",
343 : pointer, GetMemoryChunkHeader(pointer));
344 : return 0; /* keep compiler quiet */
345 : }
346 :
347 :
348 : /*****************************************************************************
349 : * EXPORTED ROUTINES *
350 : *****************************************************************************/
351 :
352 :
353 : /*
354 : * MemoryContextInit
355 : * Start up the memory-context subsystem.
356 : *
357 : * This must be called before creating contexts or allocating memory in
358 : * contexts. TopMemoryContext and ErrorContext are initialized here;
359 : * other contexts must be created afterwards.
360 : *
361 : * In normal multi-backend operation, this is called once during
362 : * postmaster startup, and not at all by individual backend startup
363 : * (since the backends inherit an already-initialized context subsystem
364 : * by virtue of being forked off the postmaster). But in an EXEC_BACKEND
365 : * build, each process must do this for itself.
366 : *
367 : * In a standalone backend this must be called during backend startup.
368 : */
369 : void
370 3664 : MemoryContextInit(void)
371 : {
372 : Assert(TopMemoryContext == NULL);
373 :
374 : /*
375 : * First, initialize TopMemoryContext, which is the parent of all others.
376 : */
377 3664 : TopMemoryContext = AllocSetContextCreate((MemoryContext) NULL,
378 : "TopMemoryContext",
379 : ALLOCSET_DEFAULT_SIZES);
380 :
381 : /*
382 : * Not having any other place to point CurrentMemoryContext, make it point
383 : * to TopMemoryContext. Caller should change this soon!
384 : */
385 3664 : CurrentMemoryContext = TopMemoryContext;
386 :
387 : /*
388 : * Initialize ErrorContext as an AllocSetContext with slow growth rate ---
389 : * we don't really expect much to be allocated in it. More to the point,
390 : * require it to contain at least 8K at all times. This is the only case
391 : * where retained memory in a context is *essential* --- we want to be
392 : * sure ErrorContext still has some memory even if we've run out
393 : * elsewhere! Also, allow allocations in ErrorContext within a critical
394 : * section. Otherwise a PANIC will cause an assertion failure in the error
395 : * reporting code, before printing out the real cause of the failure.
396 : *
397 : * This should be the last step in this function, as elog.c assumes memory
398 : * management works once ErrorContext is non-null.
399 : */
400 3664 : ErrorContext = AllocSetContextCreate(TopMemoryContext,
401 : "ErrorContext",
402 : 8 * 1024,
403 : 8 * 1024,
404 : 8 * 1024);
405 3664 : MemoryContextAllowInCriticalSection(ErrorContext, true);
406 3664 : }
407 :
408 : /*
409 : * MemoryContextReset
410 : * Release all space allocated within a context and delete all its
411 : * descendant contexts (but not the named context itself).
412 : */
413 : void
414 328054784 : MemoryContextReset(MemoryContext context)
415 : {
416 : Assert(MemoryContextIsValid(context));
417 :
418 : /* save a function call in common case where there are no children */
419 328054784 : if (context->firstchild != NULL)
420 524806 : MemoryContextDeleteChildren(context);
421 :
422 : /* save a function call if no pallocs since startup or last reset */
423 328054784 : if (!context->isReset)
424 49879844 : MemoryContextResetOnly(context);
425 328054784 : }
426 :
427 : /*
428 : * MemoryContextResetOnly
429 : * Release all space allocated within a context.
430 : * Nothing is done to the context's descendant contexts.
431 : */
432 : void
433 55695282 : MemoryContextResetOnly(MemoryContext context)
434 : {
435 : Assert(MemoryContextIsValid(context));
436 :
437 : /* Nothing to do if no pallocs since startup or last reset */
438 55695282 : if (!context->isReset)
439 : {
440 55694102 : MemoryContextCallResetCallbacks(context);
441 :
442 : /*
443 : * If context->ident points into the context's memory, it will become
444 : * a dangling pointer. We could prevent that by setting it to NULL
445 : * here, but that would break valid coding patterns that keep the
446 : * ident elsewhere, e.g. in a parent context. So for now we assume
447 : * the programmer got it right.
448 : */
449 :
450 55694102 : context->methods->reset(context);
451 55694102 : context->isReset = true;
452 : VALGRIND_DESTROY_MEMPOOL(context);
453 : VALGRIND_CREATE_MEMPOOL(context, 0, false);
454 : }
455 55695282 : }
456 :
457 : /*
458 : * MemoryContextResetChildren
459 : * Release all space allocated within a context's descendants,
460 : * but don't delete the contexts themselves. The named context
461 : * itself is not touched.
462 : */
463 : void
464 0 : MemoryContextResetChildren(MemoryContext context)
465 : {
466 : Assert(MemoryContextIsValid(context));
467 :
468 0 : for (MemoryContext curr = context->firstchild;
469 : curr != NULL;
470 0 : curr = MemoryContextTraverseNext(curr, context))
471 : {
472 0 : MemoryContextResetOnly(curr);
473 : }
474 0 : }
475 :
476 : /*
477 : * MemoryContextDelete
478 : * Delete a context and its descendants, and release all space
479 : * allocated therein.
480 : *
481 : * The type-specific delete routine removes all storage for the context,
482 : * but we have to deal with descendant nodes here.
483 : */
484 : void
485 9403166 : MemoryContextDelete(MemoryContext context)
486 : {
487 : MemoryContext curr;
488 :
489 : Assert(MemoryContextIsValid(context));
490 :
491 : /*
492 : * Delete subcontexts from the bottom up.
493 : *
494 : * Note: Do not use recursion here. A "stack depth limit exceeded" error
495 : * would be unpleasant if we're already in the process of cleaning up from
496 : * transaction abort. We also cannot use MemoryContextTraverseNext() here
497 : * because we modify the tree as we go.
498 : */
499 9403166 : curr = context;
500 : for (;;)
501 1523216 : {
502 : MemoryContext parent;
503 :
504 : /* Descend down until we find a leaf context with no children */
505 12449598 : while (curr->firstchild != NULL)
506 1523216 : curr = curr->firstchild;
507 :
508 : /*
509 : * We're now at a leaf with no children. Free it and continue from the
510 : * parent. Or if this was the original node, we're all done.
511 : */
512 10926382 : parent = curr->parent;
513 10926382 : MemoryContextDeleteOnly(curr);
514 :
515 10926382 : if (curr == context)
516 9403166 : break;
517 1523216 : curr = parent;
518 : }
519 9403166 : }
520 :
521 : /*
522 : * Subroutine of MemoryContextDelete,
523 : * to delete a context that has no children.
524 : * We must also delink the context from its parent, if it has one.
525 : */
526 : static void
527 10926382 : MemoryContextDeleteOnly(MemoryContext context)
528 : {
529 : Assert(MemoryContextIsValid(context));
530 : /* We had better not be deleting TopMemoryContext ... */
531 : Assert(context != TopMemoryContext);
532 : /* And not CurrentMemoryContext, either */
533 : Assert(context != CurrentMemoryContext);
534 : /* All the children should've been deleted already */
535 : Assert(context->firstchild == NULL);
536 :
537 : /*
538 : * It's not entirely clear whether 'tis better to do this before or after
539 : * delinking the context; but an error in a callback will likely result in
540 : * leaking the whole context (if it's not a root context) if we do it
541 : * after, so let's do it before.
542 : */
543 10926382 : MemoryContextCallResetCallbacks(context);
544 :
545 : /*
546 : * We delink the context from its parent before deleting it, so that if
547 : * there's an error we won't have deleted/busted contexts still attached
548 : * to the context tree. Better a leak than a crash.
549 : */
550 10926382 : MemoryContextSetParent(context, NULL);
551 :
552 : /*
553 : * Also reset the context's ident pointer, in case it points into the
554 : * context. This would only matter if someone tries to get stats on the
555 : * (already unlinked) context, which is unlikely, but let's be safe.
556 : */
557 10926382 : context->ident = NULL;
558 :
559 10926382 : context->methods->delete_context(context);
560 :
561 : VALGRIND_DESTROY_MEMPOOL(context);
562 10926382 : }
563 :
564 : /*
565 : * MemoryContextDeleteChildren
566 : * Delete all the descendants of the named context and release all
567 : * space allocated therein. The named context itself is not touched.
568 : */
569 : void
570 965162 : MemoryContextDeleteChildren(MemoryContext context)
571 : {
572 : Assert(MemoryContextIsValid(context));
573 :
574 : /*
575 : * MemoryContextDelete will delink the child from me, so just iterate as
576 : * long as there is a child.
577 : */
578 1582470 : while (context->firstchild != NULL)
579 617308 : MemoryContextDelete(context->firstchild);
580 965162 : }
581 :
582 : /*
583 : * MemoryContextRegisterResetCallback
584 : * Register a function to be called before next context reset/delete.
585 : * Such callbacks will be called in reverse order of registration.
586 : *
587 : * The caller is responsible for allocating a MemoryContextCallback struct
588 : * to hold the info about this callback request, and for filling in the
589 : * "func" and "arg" fields in the struct to show what function to call with
590 : * what argument. Typically the callback struct should be allocated within
591 : * the specified context, since that means it will automatically be freed
592 : * when no longer needed.
593 : *
594 : * There is no API for deregistering a callback once registered. If you
595 : * want it to not do anything anymore, adjust the state pointed to by its
596 : * "arg" to indicate that.
597 : */
598 : void
599 81842 : MemoryContextRegisterResetCallback(MemoryContext context,
600 : MemoryContextCallback *cb)
601 : {
602 : Assert(MemoryContextIsValid(context));
603 :
604 : /* Push onto head so this will be called before older registrants. */
605 81842 : cb->next = context->reset_cbs;
606 81842 : context->reset_cbs = cb;
607 : /* Mark the context as non-reset (it probably is already). */
608 81842 : context->isReset = false;
609 81842 : }
610 :
611 : /*
612 : * MemoryContextCallResetCallbacks
613 : * Internal function to call all registered callbacks for context.
614 : */
615 : static void
616 66620484 : MemoryContextCallResetCallbacks(MemoryContext context)
617 : {
618 : MemoryContextCallback *cb;
619 :
620 : /*
621 : * We pop each callback from the list before calling. That way, if an
622 : * error occurs inside the callback, we won't try to call it a second time
623 : * in the likely event that we reset or delete the context later.
624 : */
625 66702298 : while ((cb = context->reset_cbs) != NULL)
626 : {
627 81814 : context->reset_cbs = cb->next;
628 81814 : cb->func(cb->arg);
629 : }
630 66620484 : }
631 :
632 : /*
633 : * MemoryContextSetIdentifier
634 : * Set the identifier string for a memory context.
635 : *
636 : * An identifier can be provided to help distinguish among different contexts
637 : * of the same kind in memory context stats dumps. The identifier string
638 : * must live at least as long as the context it is for; typically it is
639 : * allocated inside that context, so that it automatically goes away on
640 : * context deletion. Pass id = NULL to forget any old identifier.
641 : */
642 : void
643 4841158 : MemoryContextSetIdentifier(MemoryContext context, const char *id)
644 : {
645 : Assert(MemoryContextIsValid(context));
646 4841158 : context->ident = id;
647 4841158 : }
648 :
649 : /*
650 : * MemoryContextSetParent
651 : * Change a context to belong to a new parent (or no parent).
652 : *
653 : * We provide this as an API function because it is sometimes useful to
654 : * change a context's lifespan after creation. For example, a context
655 : * might be created underneath a transient context, filled with data,
656 : * and then reparented underneath CacheMemoryContext to make it long-lived.
657 : * In this way no special effort is needed to get rid of the context in case
658 : * a failure occurs before its contents are completely set up.
659 : *
660 : * Callers often assume that this function cannot fail, so don't put any
661 : * elog(ERROR) calls in it.
662 : *
663 : * A possible caller error is to reparent a context under itself, creating
664 : * a loop in the context graph. We assert here that context != new_parent,
665 : * but checking for multi-level loops seems more trouble than it's worth.
666 : */
667 : void
668 11291996 : MemoryContextSetParent(MemoryContext context, MemoryContext new_parent)
669 : {
670 : Assert(MemoryContextIsValid(context));
671 : Assert(context != new_parent);
672 :
673 : /* Fast path if it's got correct parent already */
674 11291996 : if (new_parent == context->parent)
675 9402 : return;
676 :
677 : /* Delink from existing parent, if any */
678 11282594 : if (context->parent)
679 : {
680 11282594 : MemoryContext parent = context->parent;
681 :
682 11282594 : if (context->prevchild != NULL)
683 1152444 : context->prevchild->nextchild = context->nextchild;
684 : else
685 : {
686 : Assert(parent->firstchild == context);
687 10130150 : parent->firstchild = context->nextchild;
688 : }
689 :
690 11282594 : if (context->nextchild != NULL)
691 4671514 : context->nextchild->prevchild = context->prevchild;
692 : }
693 :
694 : /* And relink */
695 11282594 : if (new_parent)
696 : {
697 : Assert(MemoryContextIsValid(new_parent));
698 356212 : context->parent = new_parent;
699 356212 : context->prevchild = NULL;
700 356212 : context->nextchild = new_parent->firstchild;
701 356212 : if (new_parent->firstchild != NULL)
702 249460 : new_parent->firstchild->prevchild = context;
703 356212 : new_parent->firstchild = context;
704 : }
705 : else
706 : {
707 10926382 : context->parent = NULL;
708 10926382 : context->prevchild = NULL;
709 10926382 : context->nextchild = NULL;
710 : }
711 : }
712 :
713 : /*
714 : * MemoryContextAllowInCriticalSection
715 : * Allow/disallow allocations in this memory context within a critical
716 : * section.
717 : *
718 : * Normally, memory allocations are not allowed within a critical section,
719 : * because a failure would lead to PANIC. There are a few exceptions to
720 : * that, like allocations related to debugging code that is not supposed to
721 : * be enabled in production. This function can be used to exempt specific
722 : * memory contexts from the assertion in palloc().
723 : */
724 : void
725 4884 : MemoryContextAllowInCriticalSection(MemoryContext context, bool allow)
726 : {
727 : Assert(MemoryContextIsValid(context));
728 :
729 4884 : context->allowInCritSection = allow;
730 4884 : }
731 :
732 : /*
733 : * GetMemoryChunkContext
734 : * Given a currently-allocated chunk, determine the MemoryContext that
735 : * the chunk belongs to.
736 : */
737 : MemoryContext
738 3680438 : GetMemoryChunkContext(void *pointer)
739 : {
740 3680438 : return MCXT_METHOD(pointer, get_chunk_context) (pointer);
741 : }
742 :
743 : /*
744 : * GetMemoryChunkSpace
745 : * Given a currently-allocated chunk, determine the total space
746 : * it occupies (including all memory-allocation overhead).
747 : *
748 : * This is useful for measuring the total space occupied by a set of
749 : * allocated chunks.
750 : */
751 : Size
752 36767458 : GetMemoryChunkSpace(void *pointer)
753 : {
754 36767458 : return MCXT_METHOD(pointer, get_chunk_space) (pointer);
755 : }
756 :
757 : /*
758 : * MemoryContextGetParent
759 : * Get the parent context (if any) of the specified context
760 : */
761 : MemoryContext
762 17438 : MemoryContextGetParent(MemoryContext context)
763 : {
764 : Assert(MemoryContextIsValid(context));
765 :
766 17438 : return context->parent;
767 : }
768 :
769 : /*
770 : * MemoryContextIsEmpty
771 : * Is a memory context empty of any allocated space?
772 : */
773 : bool
774 10732 : MemoryContextIsEmpty(MemoryContext context)
775 : {
776 : Assert(MemoryContextIsValid(context));
777 :
778 : /*
779 : * For now, we consider a memory context nonempty if it has any children;
780 : * perhaps this should be changed later.
781 : */
782 10732 : if (context->firstchild != NULL)
783 2 : return false;
784 : /* Otherwise use the type-specific inquiry */
785 10730 : return context->methods->is_empty(context);
786 : }
787 :
788 : /*
789 : * Find the memory allocated to blocks for this memory context. If recurse is
790 : * true, also include children.
791 : */
792 : Size
793 1797716 : MemoryContextMemAllocated(MemoryContext context, bool recurse)
794 : {
795 1797716 : Size total = context->mem_allocated;
796 :
797 : Assert(MemoryContextIsValid(context));
798 :
799 1797716 : if (recurse)
800 : {
801 3069214 : for (MemoryContext curr = context->firstchild;
802 : curr != NULL;
803 1271498 : curr = MemoryContextTraverseNext(curr, context))
804 : {
805 1271498 : total += curr->mem_allocated;
806 : }
807 : }
808 :
809 1797716 : return total;
810 : }
811 :
812 : /*
813 : * Return the memory consumption statistics about the given context and its
814 : * children.
815 : */
816 : void
817 30 : MemoryContextMemConsumed(MemoryContext context,
818 : MemoryContextCounters *consumed)
819 : {
820 : Assert(MemoryContextIsValid(context));
821 :
822 30 : memset(consumed, 0, sizeof(*consumed));
823 :
824 : /* Examine the context itself */
825 30 : context->methods->stats(context, NULL, NULL, consumed, false);
826 :
827 : /* Examine children, using iteration not recursion */
828 30 : for (MemoryContext curr = context->firstchild;
829 : curr != NULL;
830 0 : curr = MemoryContextTraverseNext(curr, context))
831 : {
832 0 : curr->methods->stats(curr, NULL, NULL, consumed, false);
833 : }
834 30 : }
835 :
836 : /*
837 : * MemoryContextStats
838 : * Print statistics about the named context and all its descendants.
839 : *
840 : * This is just a debugging utility, so it's not very fancy. However, we do
841 : * make some effort to summarize when the output would otherwise be very long.
842 : * The statistics are sent to stderr.
843 : */
844 : void
845 0 : MemoryContextStats(MemoryContext context)
846 : {
847 : /* Hard-wired limits are usually good enough */
848 0 : MemoryContextStatsDetail(context, 100, 100, true);
849 0 : }
850 :
851 : /*
852 : * MemoryContextStatsDetail
853 : *
854 : * Entry point for use if you want to vary the number of child contexts shown.
855 : *
856 : * If print_to_stderr is true, print statistics about the memory contexts
857 : * with fprintf(stderr), otherwise use ereport().
858 : */
859 : void
860 18 : MemoryContextStatsDetail(MemoryContext context,
861 : int max_level, int max_children,
862 : bool print_to_stderr)
863 : {
864 : MemoryContextCounters grand_totals;
865 : int num_contexts;
866 : PrintDestination print_location;
867 :
868 18 : memset(&grand_totals, 0, sizeof(grand_totals));
869 :
870 18 : if (print_to_stderr)
871 0 : print_location = PRINT_STATS_TO_STDERR;
872 : else
873 18 : print_location = PRINT_STATS_TO_LOGS;
874 :
875 : /* num_contexts report number of contexts aggregated in the output */
876 18 : MemoryContextStatsInternal(context, 1, max_level, max_children,
877 : &grand_totals, print_location, &num_contexts);
878 :
879 18 : if (print_to_stderr)
880 0 : fprintf(stderr,
881 : "Grand total: %zu bytes in %zu blocks; %zu free (%zu chunks); %zu used\n",
882 : grand_totals.totalspace, grand_totals.nblocks,
883 : grand_totals.freespace, grand_totals.freechunks,
884 0 : grand_totals.totalspace - grand_totals.freespace);
885 : else
886 : {
887 : /*
888 : * Use LOG_SERVER_ONLY to prevent the memory contexts from being sent
889 : * to the connected client.
890 : *
891 : * We don't buffer the information about all memory contexts in a
892 : * backend into StringInfo and log it as one message. That would
893 : * require the buffer to be enlarged, risking an OOM as there could be
894 : * a large number of memory contexts in a backend. Instead, we log
895 : * one message per memory context.
896 : */
897 18 : ereport(LOG_SERVER_ONLY,
898 : (errhidestmt(true),
899 : errhidecontext(true),
900 : errmsg_internal("Grand total: %zu bytes in %zu blocks; %zu free (%zu chunks); %zu used",
901 : grand_totals.totalspace, grand_totals.nblocks,
902 : grand_totals.freespace, grand_totals.freechunks,
903 : grand_totals.totalspace - grand_totals.freespace)));
904 : }
905 18 : }
906 :
907 : /*
908 : * MemoryContextStatsInternal
909 : * One recursion level for MemoryContextStats
910 : *
911 : * Print stats for this context if possible, but in any case accumulate counts
912 : * into *totals (if not NULL). The callers should make sure that print_location
913 : * is set to PRINT_STATS_TO_STDERR or PRINT_STATS_TO_LOGS or PRINT_STATS_NONE.
914 : */
915 : static void
916 1614 : MemoryContextStatsInternal(MemoryContext context, int level,
917 : int max_level, int max_children,
918 : MemoryContextCounters *totals,
919 : PrintDestination print_location, int *num_contexts)
920 : {
921 : MemoryContext child;
922 : int ichild;
923 :
924 : Assert(MemoryContextIsValid(context));
925 :
926 : /* Examine the context itself */
927 1614 : switch (print_location)
928 : {
929 0 : case PRINT_STATS_TO_STDERR:
930 0 : context->methods->stats(context,
931 : MemoryContextStatsPrint,
932 : &level,
933 : totals, true);
934 0 : break;
935 :
936 1614 : case PRINT_STATS_TO_LOGS:
937 1614 : context->methods->stats(context,
938 : MemoryContextStatsPrint,
939 : &level,
940 : totals, false);
941 1614 : break;
942 :
943 0 : case PRINT_STATS_NONE:
944 :
945 : /*
946 : * Do not print the statistics if print_location is
947 : * PRINT_STATS_NONE, only compute totals. This is used in
948 : * reporting of memory context statistics via a sql function. Last
949 : * parameter is not relevant.
950 : */
951 0 : context->methods->stats(context,
952 : NULL,
953 : NULL,
954 : totals, false);
955 0 : break;
956 : }
957 :
958 : /* Increment the context count for each of the recursive call */
959 1614 : *num_contexts = *num_contexts + 1;
960 :
961 : /*
962 : * Examine children.
963 : *
964 : * If we are past the recursion depth limit or already running low on
965 : * stack, do not print them explicitly but just summarize them. Similarly,
966 : * if there are more than max_children of them, we do not print the rest
967 : * explicitly, but just summarize them.
968 : */
969 1614 : child = context->firstchild;
970 1614 : ichild = 0;
971 1614 : if (level <= max_level && !stack_is_too_deep())
972 : {
973 3210 : for (; child != NULL && ichild < max_children;
974 1596 : child = child->nextchild, ichild++)
975 : {
976 1596 : MemoryContextStatsInternal(child, level + 1,
977 : max_level, max_children,
978 : totals,
979 : print_location, num_contexts);
980 : }
981 : }
982 :
983 1614 : if (child != NULL)
984 : {
985 : /* Summarize the rest of the children, avoiding recursion. */
986 : MemoryContextCounters local_totals;
987 :
988 0 : memset(&local_totals, 0, sizeof(local_totals));
989 :
990 0 : ichild = 0;
991 0 : while (child != NULL)
992 : {
993 0 : child->methods->stats(child, NULL, NULL, &local_totals, false);
994 0 : ichild++;
995 0 : child = MemoryContextTraverseNext(child, context);
996 : }
997 :
998 : /*
999 : * Add the count of children contexts which are traversed in the
1000 : * non-recursive manner.
1001 : */
1002 0 : *num_contexts = *num_contexts + ichild;
1003 :
1004 0 : if (print_location == PRINT_STATS_TO_STDERR)
1005 : {
1006 0 : for (int i = 0; i < level; i++)
1007 0 : fprintf(stderr, " ");
1008 0 : fprintf(stderr,
1009 : "%d more child contexts containing %zu total in %zu blocks; %zu free (%zu chunks); %zu used\n",
1010 : ichild,
1011 : local_totals.totalspace,
1012 : local_totals.nblocks,
1013 : local_totals.freespace,
1014 : local_totals.freechunks,
1015 0 : local_totals.totalspace - local_totals.freespace);
1016 : }
1017 0 : else if (print_location == PRINT_STATS_TO_LOGS)
1018 0 : ereport(LOG_SERVER_ONLY,
1019 : (errhidestmt(true),
1020 : errhidecontext(true),
1021 : errmsg_internal("level: %d; %d more child contexts containing %zu total in %zu blocks; %zu free (%zu chunks); %zu used",
1022 : level,
1023 : ichild,
1024 : local_totals.totalspace,
1025 : local_totals.nblocks,
1026 : local_totals.freespace,
1027 : local_totals.freechunks,
1028 : local_totals.totalspace - local_totals.freespace)));
1029 :
1030 0 : if (totals)
1031 : {
1032 0 : totals->nblocks += local_totals.nblocks;
1033 0 : totals->freechunks += local_totals.freechunks;
1034 0 : totals->totalspace += local_totals.totalspace;
1035 0 : totals->freespace += local_totals.freespace;
1036 : }
1037 : }
1038 1614 : }
1039 :
1040 : /*
1041 : * MemoryContextStatsPrint
1042 : * Print callback used by MemoryContextStatsInternal
1043 : *
1044 : * For now, the passthru pointer just points to "int level"; later we might
1045 : * make that more complicated.
1046 : */
1047 : static void
1048 1614 : MemoryContextStatsPrint(MemoryContext context, void *passthru,
1049 : const char *stats_string,
1050 : bool print_to_stderr)
1051 : {
1052 1614 : int level = *(int *) passthru;
1053 1614 : const char *name = context->name;
1054 1614 : const char *ident = context->ident;
1055 : char truncated_ident[110];
1056 : int i;
1057 :
1058 : /*
1059 : * It seems preferable to label dynahash contexts with just the hash table
1060 : * name. Those are already unique enough, so the "dynahash" part isn't
1061 : * very helpful, and this way is more consistent with pre-v11 practice.
1062 : */
1063 1614 : if (ident && strcmp(name, "dynahash") == 0)
1064 : {
1065 210 : name = ident;
1066 210 : ident = NULL;
1067 : }
1068 :
1069 1614 : truncated_ident[0] = '\0';
1070 :
1071 1614 : if (ident)
1072 : {
1073 : /*
1074 : * Some contexts may have very long identifiers (e.g., SQL queries).
1075 : * Arbitrarily truncate at 100 bytes, but be careful not to break
1076 : * multibyte characters. Also, replace ASCII control characters, such
1077 : * as newlines, with spaces.
1078 : */
1079 1134 : int idlen = strlen(ident);
1080 1134 : bool truncated = false;
1081 :
1082 1134 : strcpy(truncated_ident, ": ");
1083 1134 : i = strlen(truncated_ident);
1084 :
1085 1134 : if (idlen > 100)
1086 : {
1087 0 : idlen = pg_mbcliplen(ident, idlen, 100);
1088 0 : truncated = true;
1089 : }
1090 :
1091 31688 : while (idlen-- > 0)
1092 : {
1093 30554 : unsigned char c = *ident++;
1094 :
1095 30554 : if (c < ' ')
1096 0 : c = ' ';
1097 30554 : truncated_ident[i++] = c;
1098 : }
1099 1134 : truncated_ident[i] = '\0';
1100 :
1101 1134 : if (truncated)
1102 0 : strcat(truncated_ident, "...");
1103 : }
1104 :
1105 1614 : if (print_to_stderr)
1106 : {
1107 0 : for (i = 1; i < level; i++)
1108 0 : fprintf(stderr, " ");
1109 0 : fprintf(stderr, "%s: %s%s\n", name, stats_string, truncated_ident);
1110 : }
1111 : else
1112 1614 : ereport(LOG_SERVER_ONLY,
1113 : (errhidestmt(true),
1114 : errhidecontext(true),
1115 : errmsg_internal("level: %d; %s: %s%s",
1116 : level, name, stats_string, truncated_ident)));
1117 1614 : }
1118 :
1119 : /*
1120 : * MemoryContextCheck
1121 : * Check all chunks in the named context and its children.
1122 : *
1123 : * This is just a debugging utility, so it's not fancy.
1124 : */
1125 : #ifdef MEMORY_CONTEXT_CHECKING
1126 : void
1127 : MemoryContextCheck(MemoryContext context)
1128 : {
1129 : Assert(MemoryContextIsValid(context));
1130 : context->methods->check(context);
1131 :
1132 : for (MemoryContext curr = context->firstchild;
1133 : curr != NULL;
1134 : curr = MemoryContextTraverseNext(curr, context))
1135 : {
1136 : Assert(MemoryContextIsValid(curr));
1137 : curr->methods->check(curr);
1138 : }
1139 : }
1140 : #endif
1141 :
1142 : /*
1143 : * MemoryContextCreate
1144 : * Context-type-independent part of context creation.
1145 : *
1146 : * This is only intended to be called by context-type-specific
1147 : * context creation routines, not by the unwashed masses.
1148 : *
1149 : * The memory context creation procedure goes like this:
1150 : * 1. Context-type-specific routine makes some initial space allocation,
1151 : * including enough space for the context header. If it fails,
1152 : * it can ereport() with no damage done.
1153 : * 2. Context-type-specific routine sets up all type-specific fields of
1154 : * the header (those beyond MemoryContextData proper), as well as any
1155 : * other management fields it needs to have a fully valid context.
1156 : * Usually, failure in this step is impossible, but if it's possible
1157 : * the initial space allocation should be freed before ereport'ing.
1158 : * 3. Context-type-specific routine calls MemoryContextCreate() to fill in
1159 : * the generic header fields and link the context into the context tree.
1160 : * 4. We return to the context-type-specific routine, which finishes
1161 : * up type-specific initialization. This routine can now do things
1162 : * that might fail (like allocate more memory), so long as it's
1163 : * sure the node is left in a state that delete will handle.
1164 : *
1165 : * node: the as-yet-uninitialized common part of the context header node.
1166 : * tag: NodeTag code identifying the memory context type.
1167 : * method_id: MemoryContextMethodID of the context-type being created.
1168 : * parent: parent context, or NULL if this will be a top-level context.
1169 : * name: name of context (must be statically allocated).
1170 : *
1171 : * Context routines generally assume that MemoryContextCreate can't fail,
1172 : * so this can contain Assert but not elog/ereport.
1173 : */
1174 : void
1175 14599508 : MemoryContextCreate(MemoryContext node,
1176 : NodeTag tag,
1177 : MemoryContextMethodID method_id,
1178 : MemoryContext parent,
1179 : const char *name)
1180 : {
1181 : /* Creating new memory contexts is not allowed in a critical section */
1182 : Assert(CritSectionCount == 0);
1183 :
1184 : /* Validate parent, to help prevent crazy context linkages */
1185 : Assert(parent == NULL || MemoryContextIsValid(parent));
1186 : Assert(node != parent);
1187 :
1188 : /* Initialize all standard fields of memory context header */
1189 14599508 : node->type = tag;
1190 14599508 : node->isReset = true;
1191 14599508 : node->methods = &mcxt_methods[method_id];
1192 14599508 : node->parent = parent;
1193 14599508 : node->firstchild = NULL;
1194 14599508 : node->mem_allocated = 0;
1195 14599508 : node->prevchild = NULL;
1196 14599508 : node->name = name;
1197 14599508 : node->ident = NULL;
1198 14599508 : node->reset_cbs = NULL;
1199 :
1200 : /* OK to link node into context tree */
1201 14599508 : if (parent)
1202 : {
1203 14595746 : node->nextchild = parent->firstchild;
1204 14595746 : if (parent->firstchild != NULL)
1205 8171908 : parent->firstchild->prevchild = node;
1206 14595746 : parent->firstchild = node;
1207 : /* inherit allowInCritSection flag from parent */
1208 14595746 : node->allowInCritSection = parent->allowInCritSection;
1209 : }
1210 : else
1211 : {
1212 3762 : node->nextchild = NULL;
1213 3762 : node->allowInCritSection = false;
1214 : }
1215 :
1216 : VALGRIND_CREATE_MEMPOOL(node, 0, false);
1217 14599508 : }
1218 :
1219 : /*
1220 : * MemoryContextAllocationFailure
1221 : * For use by MemoryContextMethods implementations to handle when malloc
1222 : * returns NULL. The behavior is specific to whether MCXT_ALLOC_NO_OOM
1223 : * is in 'flags'.
1224 : */
1225 : void *
1226 0 : MemoryContextAllocationFailure(MemoryContext context, Size size, int flags)
1227 : {
1228 0 : if ((flags & MCXT_ALLOC_NO_OOM) == 0)
1229 : {
1230 0 : if (TopMemoryContext)
1231 0 : MemoryContextStats(TopMemoryContext);
1232 0 : ereport(ERROR,
1233 : (errcode(ERRCODE_OUT_OF_MEMORY),
1234 : errmsg("out of memory"),
1235 : errdetail("Failed on request of size %zu in memory context \"%s\".",
1236 : size, context->name)));
1237 : }
1238 0 : return NULL;
1239 : }
1240 :
1241 : /*
1242 : * MemoryContextSizeFailure
1243 : * For use by MemoryContextMethods implementations to handle invalid
1244 : * memory allocation request sizes.
1245 : */
1246 : void
1247 0 : MemoryContextSizeFailure(MemoryContext context, Size size, int flags)
1248 : {
1249 0 : elog(ERROR, "invalid memory alloc request size %zu", size);
1250 : }
1251 :
1252 : /*
1253 : * MemoryContextAlloc
1254 : * Allocate space within the specified context.
1255 : *
1256 : * This could be turned into a macro, but we'd have to import
1257 : * nodes/memnodes.h into postgres.h which seems a bad idea.
1258 : */
1259 : void *
1260 183971736 : MemoryContextAlloc(MemoryContext context, Size size)
1261 : {
1262 : void *ret;
1263 :
1264 : Assert(MemoryContextIsValid(context));
1265 : AssertNotInCriticalSection(context);
1266 :
1267 183971736 : context->isReset = false;
1268 :
1269 : /*
1270 : * For efficiency reasons, we purposefully offload the handling of
1271 : * allocation failures to the MemoryContextMethods implementation as this
1272 : * allows these checks to be performed only when an actual malloc needs to
1273 : * be done to request more memory from the OS. Additionally, not having
1274 : * to execute any instructions after this call allows the compiler to use
1275 : * the sibling call optimization. If you're considering adding code after
1276 : * this call, consider making it the responsibility of the 'alloc'
1277 : * function instead.
1278 : */
1279 183971736 : ret = context->methods->alloc(context, size, 0);
1280 :
1281 : VALGRIND_MEMPOOL_ALLOC(context, ret, size);
1282 :
1283 183971736 : return ret;
1284 : }
1285 :
1286 : /*
1287 : * MemoryContextAllocZero
1288 : * Like MemoryContextAlloc, but clears allocated memory
1289 : *
1290 : * We could just call MemoryContextAlloc then clear the memory, but this
1291 : * is a very common combination, so we provide the combined operation.
1292 : */
1293 : void *
1294 44375738 : MemoryContextAllocZero(MemoryContext context, Size size)
1295 : {
1296 : void *ret;
1297 :
1298 : Assert(MemoryContextIsValid(context));
1299 : AssertNotInCriticalSection(context);
1300 :
1301 44375738 : context->isReset = false;
1302 :
1303 44375738 : ret = context->methods->alloc(context, size, 0);
1304 :
1305 : VALGRIND_MEMPOOL_ALLOC(context, ret, size);
1306 :
1307 542777084 : MemSetAligned(ret, 0, size);
1308 :
1309 44375738 : return ret;
1310 : }
1311 :
1312 : /*
1313 : * MemoryContextAllocExtended
1314 : * Allocate space within the specified context using the given flags.
1315 : */
1316 : void *
1317 7515842 : MemoryContextAllocExtended(MemoryContext context, Size size, int flags)
1318 : {
1319 : void *ret;
1320 :
1321 : Assert(MemoryContextIsValid(context));
1322 : AssertNotInCriticalSection(context);
1323 :
1324 7515842 : if (!((flags & MCXT_ALLOC_HUGE) != 0 ? AllocHugeSizeIsValid(size) :
1325 : AllocSizeIsValid(size)))
1326 0 : elog(ERROR, "invalid memory alloc request size %zu", size);
1327 :
1328 7515842 : context->isReset = false;
1329 :
1330 7515842 : ret = context->methods->alloc(context, size, flags);
1331 7515842 : if (unlikely(ret == NULL))
1332 0 : return NULL;
1333 :
1334 : VALGRIND_MEMPOOL_ALLOC(context, ret, size);
1335 :
1336 7515842 : if ((flags & MCXT_ALLOC_ZERO) != 0)
1337 219550538 : MemSetAligned(ret, 0, size);
1338 :
1339 7515842 : return ret;
1340 : }
1341 :
1342 : /*
1343 : * HandleLogMemoryContextInterrupt
1344 : * Handle receipt of an interrupt indicating logging of memory
1345 : * contexts.
1346 : *
1347 : * All the actual work is deferred to ProcessLogMemoryContextInterrupt(),
1348 : * because we cannot safely emit a log message inside the signal handler.
1349 : */
1350 : void
1351 18 : HandleLogMemoryContextInterrupt(void)
1352 : {
1353 18 : InterruptPending = true;
1354 18 : LogMemoryContextPending = true;
1355 : /* latch will be set by procsignal_sigusr1_handler */
1356 18 : }
1357 :
1358 : /*
1359 : * HandleGetMemoryContextInterrupt
1360 : * Handle receipt of an interrupt indicating a request to publish memory
1361 : * contexts statistics.
1362 : *
1363 : * All the actual work is deferred to ProcessGetMemoryContextInterrupt() as
1364 : * this cannot be performed in a signal handler.
1365 : */
1366 : void
1367 12 : HandleGetMemoryContextInterrupt(void)
1368 : {
1369 12 : InterruptPending = true;
1370 12 : PublishMemoryContextPending = true;
1371 : /* latch will be set by procsignal_sigusr1_handler */
1372 12 : }
1373 :
1374 : /*
1375 : * ProcessLogMemoryContextInterrupt
1376 : * Perform logging of memory contexts of this backend process.
1377 : *
1378 : * Any backend that participates in ProcSignal signaling must arrange
1379 : * to call this function if we see LogMemoryContextPending set.
1380 : * It is called from CHECK_FOR_INTERRUPTS(), which is enough because
1381 : * the target process for logging of memory contexts is a backend.
1382 : */
1383 : void
1384 18 : ProcessLogMemoryContextInterrupt(void)
1385 : {
1386 18 : LogMemoryContextPending = false;
1387 :
1388 : /*
1389 : * Use LOG_SERVER_ONLY to prevent this message from being sent to the
1390 : * connected client.
1391 : */
1392 18 : ereport(LOG_SERVER_ONLY,
1393 : (errhidestmt(true),
1394 : errhidecontext(true),
1395 : errmsg("logging memory contexts of PID %d", MyProcPid)));
1396 :
1397 : /*
1398 : * When a backend process is consuming huge memory, logging all its memory
1399 : * contexts might overrun available disk space. To prevent this, we limit
1400 : * the depth of the hierarchy, as well as the number of child contexts to
1401 : * log per parent to 100.
1402 : *
1403 : * As with MemoryContextStats(), we suppose that practical cases where the
1404 : * dump gets long will typically be huge numbers of siblings under the
1405 : * same parent context; while the additional debugging value from seeing
1406 : * details about individual siblings beyond 100 will not be large.
1407 : */
1408 18 : MemoryContextStatsDetail(TopMemoryContext, 100, 100, false);
1409 18 : }
1410 :
1411 : /*
1412 : * ProcessGetMemoryContextInterrupt
1413 : * Generate information about memory contexts used by the process.
1414 : *
1415 : * Performs a breadth first search on the memory context tree, thus parents
1416 : * statistics are reported before their children in the monitoring function
1417 : * output.
1418 : *
1419 : * Statistics for all the processes are shared via the same dynamic shared
1420 : * area. Statistics written by each process are tracked independently in
1421 : * per-process DSA pointers. These pointers are stored in static shared memory.
1422 : *
1423 : * We calculate maximum number of context's statistics that can be displayed
1424 : * using a pre-determined limit for memory available per process for this
1425 : * utility maximum size of statistics for each context. The remaining context
1426 : * statistics if any are captured as a cumulative total at the end of
1427 : * individual context's statistics.
1428 : *
1429 : * If summary is true, we capture the level 1 and level 2 contexts
1430 : * statistics. For that we traverse the memory context tree recursively in
1431 : * depth first search manner to cover all the children of a parent context, to
1432 : * be able to display a cumulative total of memory consumption by a parent at
1433 : * level 2 and all its children.
1434 : */
1435 : void
1436 12 : ProcessGetMemoryContextInterrupt(void)
1437 : {
1438 : List *contexts;
1439 : HASHCTL ctl;
1440 : HTAB *context_id_lookup;
1441 12 : int context_id = 0;
1442 : MemoryStatsEntry *meminfo;
1443 12 : bool summary = false;
1444 : int max_stats;
1445 12 : int idx = MyProcNumber;
1446 12 : int stats_count = 0;
1447 12 : int stats_num = 0;
1448 : MemoryContextCounters stat;
1449 12 : int num_individual_stats = 0;
1450 :
1451 12 : PublishMemoryContextPending = false;
1452 :
1453 : /*
1454 : * The hash table is used for constructing "path" column of the view,
1455 : * similar to its local backend counterpart.
1456 : */
1457 12 : ctl.keysize = sizeof(MemoryContext);
1458 12 : ctl.entrysize = sizeof(MemoryStatsContextId);
1459 12 : ctl.hcxt = CurrentMemoryContext;
1460 :
1461 12 : context_id_lookup = hash_create("pg_get_remote_backend_memory_contexts",
1462 : 256,
1463 : &ctl,
1464 : HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
1465 :
1466 : /* List of contexts to process in the next round - start at the top. */
1467 12 : contexts = list_make1(TopMemoryContext);
1468 :
1469 : /* Compute the number of stats that can fit in the defined limit */
1470 12 : max_stats =
1471 : MEMORY_CONTEXT_REPORT_MAX_PER_BACKEND / MAX_MEMORY_CONTEXT_STATS_SIZE;
1472 12 : LWLockAcquire(&memCxtState[idx].lw_lock, LW_EXCLUSIVE);
1473 12 : summary = memCxtState[idx].summary;
1474 12 : LWLockRelease(&memCxtState[idx].lw_lock);
1475 :
1476 : /*
1477 : * Traverse the memory context tree to find total number of contexts. If
1478 : * summary is requested report the total number of contexts at level 1 and
1479 : * 2 from the top. Also, populate the hash table of context ids.
1480 : */
1481 12 : compute_contexts_count_and_ids(contexts, context_id_lookup, &stats_count,
1482 : summary);
1483 :
1484 : /*
1485 : * Allocate memory in this process's DSA for storing statistics of the
1486 : * memory contexts upto max_stats, for contexts that don't fit within a
1487 : * limit, a cumulative total is written as the last record in the DSA
1488 : * segment.
1489 : */
1490 12 : stats_num = Min(stats_count, max_stats);
1491 :
1492 12 : LWLockAcquire(&memCxtArea->lw_lock, LW_EXCLUSIVE);
1493 :
1494 : /*
1495 : * Create a DSA and send handle to the client process after storing the
1496 : * context statistics. If number of contexts exceed a predefined
1497 : * limit(8MB), a cumulative total is stored for such contexts.
1498 : */
1499 12 : if (memCxtArea->memstats_dsa_handle == DSA_HANDLE_INVALID)
1500 : {
1501 6 : MemoryContext oldcontext = CurrentMemoryContext;
1502 : dsa_handle handle;
1503 :
1504 6 : MemoryContextSwitchTo(TopMemoryContext);
1505 :
1506 6 : MemoryStatsDsaArea = dsa_create(memCxtArea->lw_lock.tranche);
1507 :
1508 6 : handle = dsa_get_handle(MemoryStatsDsaArea);
1509 6 : MemoryContextSwitchTo(oldcontext);
1510 :
1511 6 : dsa_pin_mapping(MemoryStatsDsaArea);
1512 :
1513 : /*
1514 : * Pin the DSA area, this is to make sure the area remains attachable
1515 : * even if current backend exits. This is done so that the statistics
1516 : * are published even if the process exits while a client is waiting.
1517 : */
1518 6 : dsa_pin(MemoryStatsDsaArea);
1519 :
1520 : /* Set the handle in shared memory */
1521 6 : memCxtArea->memstats_dsa_handle = handle;
1522 : }
1523 :
1524 : /*
1525 : * If DSA exists, created by another process publishing statistics, attach
1526 : * to it.
1527 : */
1528 6 : else if (MemoryStatsDsaArea == NULL)
1529 : {
1530 0 : MemoryContext oldcontext = CurrentMemoryContext;
1531 :
1532 0 : MemoryContextSwitchTo(TopMemoryContext);
1533 0 : MemoryStatsDsaArea = dsa_attach(memCxtArea->memstats_dsa_handle);
1534 0 : MemoryContextSwitchTo(oldcontext);
1535 0 : dsa_pin_mapping(MemoryStatsDsaArea);
1536 : }
1537 12 : LWLockRelease(&memCxtArea->lw_lock);
1538 :
1539 : /*
1540 : * Hold the process lock to protect writes to process specific memory. Two
1541 : * processes publishing statistics do not block each other.
1542 : */
1543 12 : LWLockAcquire(&memCxtState[idx].lw_lock, LW_EXCLUSIVE);
1544 12 : memCxtState[idx].proc_id = MyProcPid;
1545 :
1546 12 : if (DsaPointerIsValid(memCxtState[idx].memstats_dsa_pointer))
1547 : {
1548 : /*
1549 : * Free any previous allocations, free the name, ident and path
1550 : * pointers before freeing the pointer that contains them.
1551 : */
1552 0 : free_memorycontextstate_dsa(MemoryStatsDsaArea, memCxtState[idx].total_stats,
1553 0 : memCxtState[idx].memstats_dsa_pointer);
1554 : }
1555 :
1556 : /*
1557 : * Assigning total stats before allocating memory so that memory cleanup
1558 : * can run if any subsequent dsa_allocate call to allocate name/ident/path
1559 : * fails.
1560 : */
1561 12 : memCxtState[idx].total_stats = stats_num;
1562 24 : memCxtState[idx].memstats_dsa_pointer =
1563 12 : dsa_allocate0(MemoryStatsDsaArea, stats_num * sizeof(MemoryStatsEntry));
1564 :
1565 : meminfo = (MemoryStatsEntry *)
1566 12 : dsa_get_address(MemoryStatsDsaArea, memCxtState[idx].memstats_dsa_pointer);
1567 :
1568 12 : if (summary)
1569 : {
1570 0 : int cxt_id = 0;
1571 0 : List *path = NIL;
1572 :
1573 : /* Copy TopMemoryContext statistics to DSA */
1574 0 : memset(&stat, 0, sizeof(stat));
1575 0 : (*TopMemoryContext->methods->stats) (TopMemoryContext, NULL, NULL,
1576 : &stat, true);
1577 0 : path = lcons_int(1, path);
1578 0 : PublishMemoryContext(meminfo, cxt_id, TopMemoryContext, path, stat,
1579 : 1, MemoryStatsDsaArea, 100);
1580 0 : cxt_id = cxt_id + 1;
1581 :
1582 : /*
1583 : * Copy statistics for each of TopMemoryContexts children. This
1584 : * includes statistics of at most 100 children per node, with each
1585 : * child node limited to a depth of 100 in its subtree.
1586 : */
1587 0 : for (MemoryContext c = TopMemoryContext->firstchild; c != NULL;
1588 0 : c = c->nextchild)
1589 : {
1590 : MemoryContextCounters grand_totals;
1591 0 : int num_contexts = 0;
1592 :
1593 0 : path = NIL;
1594 0 : memset(&grand_totals, 0, sizeof(grand_totals));
1595 :
1596 0 : MemoryContextStatsInternal(c, 1, 100, 100, &grand_totals,
1597 : PRINT_STATS_NONE, &num_contexts);
1598 :
1599 0 : path = compute_context_path(c, context_id_lookup);
1600 :
1601 : /*
1602 : * Register the stats entry first, that way the cleanup handler
1603 : * can reach it in case of allocation failures of one or more
1604 : * members.
1605 : */
1606 0 : memCxtState[idx].total_stats = cxt_id++;
1607 0 : PublishMemoryContext(meminfo, cxt_id, c, path,
1608 : grand_totals, num_contexts, MemoryStatsDsaArea, 100);
1609 : }
1610 0 : memCxtState[idx].total_stats = cxt_id;
1611 :
1612 0 : end_memorycontext_reporting();
1613 :
1614 : /* Notify waiting backends and return */
1615 0 : hash_destroy(context_id_lookup);
1616 :
1617 0 : return;
1618 : }
1619 :
1620 1140 : foreach_ptr(MemoryContextData, cur, contexts)
1621 : {
1622 1116 : List *path = NIL;
1623 :
1624 : /*
1625 : * Figure out the transient context_id of this context and each of its
1626 : * ancestors, to compute a path for this context.
1627 : */
1628 1116 : path = compute_context_path(cur, context_id_lookup);
1629 :
1630 : /* Examine the context stats */
1631 1116 : memset(&stat, 0, sizeof(stat));
1632 1116 : (*cur->methods->stats) (cur, NULL, NULL, &stat, true);
1633 :
1634 : /* Account for saving one statistics slot for cumulative reporting */
1635 1116 : if (context_id < (max_stats - 1) || stats_count <= max_stats)
1636 : {
1637 : /* Copy statistics to DSA memory */
1638 1116 : PublishMemoryContext(meminfo, context_id, cur, path, stat, 1, MemoryStatsDsaArea, 100);
1639 : }
1640 : else
1641 : {
1642 0 : meminfo[max_stats - 1].totalspace += stat.totalspace;
1643 0 : meminfo[max_stats - 1].nblocks += stat.nblocks;
1644 0 : meminfo[max_stats - 1].freespace += stat.freespace;
1645 0 : meminfo[max_stats - 1].freechunks += stat.freechunks;
1646 : }
1647 :
1648 : /*
1649 : * DSA max limit per process is reached, write aggregate of the
1650 : * remaining statistics.
1651 : *
1652 : * We can store contexts from 0 to max_stats - 1. When stats_count is
1653 : * greater than max_stats, we stop reporting individual statistics
1654 : * when context_id equals max_stats - 2. As we use max_stats - 1 array
1655 : * slot for reporting cumulative statistics or "Remaining Totals".
1656 : */
1657 1116 : if (stats_count > max_stats && context_id == (max_stats - 2))
1658 : {
1659 : char *nameptr;
1660 0 : int namelen = strlen("Remaining Totals");
1661 :
1662 0 : num_individual_stats = context_id + 1;
1663 0 : meminfo[max_stats - 1].name = dsa_allocate(MemoryStatsDsaArea, namelen + 1);
1664 0 : nameptr = dsa_get_address(MemoryStatsDsaArea, meminfo[max_stats - 1].name);
1665 0 : strncpy(nameptr, "Remaining Totals", namelen);
1666 0 : meminfo[max_stats - 1].ident = InvalidDsaPointer;
1667 0 : meminfo[max_stats - 1].path = InvalidDsaPointer;
1668 0 : meminfo[max_stats - 1].type = 0;
1669 : }
1670 1116 : context_id++;
1671 : }
1672 :
1673 : /*
1674 : * Statistics are not aggregated, i.e individual statistics reported when
1675 : * stats_count <= max_stats.
1676 : */
1677 12 : if (stats_count <= max_stats)
1678 : {
1679 12 : memCxtState[idx].total_stats = context_id;
1680 : }
1681 : /* Report number of aggregated memory contexts */
1682 : else
1683 : {
1684 0 : meminfo[max_stats - 1].num_agg_stats = context_id -
1685 : num_individual_stats;
1686 :
1687 : /*
1688 : * Total stats equals num_individual_stats + 1 record for cumulative
1689 : * statistics.
1690 : */
1691 0 : memCxtState[idx].total_stats = num_individual_stats + 1;
1692 : }
1693 :
1694 : /* Notify waiting backends and return */
1695 12 : end_memorycontext_reporting();
1696 :
1697 12 : hash_destroy(context_id_lookup);
1698 : }
1699 :
1700 : /*
1701 : * Update timestamp and signal all the waiting client backends after copying
1702 : * all the statistics.
1703 : */
1704 : static void
1705 12 : end_memorycontext_reporting(void)
1706 : {
1707 12 : memCxtState[MyProcNumber].stats_timestamp = GetCurrentTimestamp();
1708 12 : LWLockRelease(&memCxtState[MyProcNumber].lw_lock);
1709 12 : ConditionVariableBroadcast(&memCxtState[MyProcNumber].memcxt_cv);
1710 12 : }
1711 :
1712 : /*
1713 : * compute_context_path
1714 : *
1715 : * Append the transient context_id of this context and each of its ancestors
1716 : * to a list, in order to compute a path.
1717 : */
1718 : static List *
1719 1116 : compute_context_path(MemoryContext c, HTAB *context_id_lookup)
1720 : {
1721 : bool found;
1722 1116 : List *path = NIL;
1723 : MemoryContext cur_context;
1724 :
1725 4480 : for (cur_context = c; cur_context != NULL; cur_context = cur_context->parent)
1726 : {
1727 : MemoryStatsContextId *cur_entry;
1728 :
1729 3364 : cur_entry = hash_search(context_id_lookup, &cur_context, HASH_FIND, &found);
1730 :
1731 3364 : if (!found)
1732 0 : elog(ERROR, "hash table corrupted, can't construct path value");
1733 :
1734 3364 : path = lcons_int(cur_entry->context_id, path);
1735 : }
1736 :
1737 1116 : return path;
1738 : }
1739 :
1740 : /*
1741 : * Return the number of contexts allocated currently by the backend
1742 : * Assign context ids to each of the contexts.
1743 : */
1744 : static void
1745 12 : compute_contexts_count_and_ids(List *contexts, HTAB *context_id_lookup,
1746 : int *stats_count, bool summary)
1747 : {
1748 1140 : foreach_ptr(MemoryContextData, cur, contexts)
1749 : {
1750 : MemoryStatsContextId *entry;
1751 : bool found;
1752 :
1753 1116 : entry = (MemoryStatsContextId *) hash_search(context_id_lookup, &cur,
1754 : HASH_ENTER, &found);
1755 : Assert(!found);
1756 :
1757 : /*
1758 : * context id starts with 1 so increment the stats_count before
1759 : * assigning.
1760 : */
1761 1116 : entry->context_id = ++(*stats_count);
1762 :
1763 : /* Append the children of the current context to the main list. */
1764 2220 : for (MemoryContext c = cur->firstchild; c != NULL; c = c->nextchild)
1765 : {
1766 1104 : if (summary)
1767 : {
1768 0 : entry = (MemoryStatsContextId *) hash_search(context_id_lookup, &c,
1769 : HASH_ENTER, &found);
1770 : Assert(!found);
1771 :
1772 0 : entry->context_id = ++(*stats_count);
1773 : }
1774 :
1775 1104 : contexts = lappend(contexts, c);
1776 : }
1777 :
1778 : /*
1779 : * In summary mode only the first two level (from top) contexts are
1780 : * displayed.
1781 : */
1782 1116 : if (summary)
1783 0 : break;
1784 : }
1785 12 : }
1786 :
1787 : /*
1788 : * PublishMemoryContext
1789 : *
1790 : * Copy the memory context statistics of a single context to a DSA memory
1791 : */
1792 : static void
1793 1116 : PublishMemoryContext(MemoryStatsEntry *memcxt_info, int curr_id,
1794 : MemoryContext context, List *path,
1795 : MemoryContextCounters stat, int num_contexts,
1796 : dsa_area *area, int max_levels)
1797 : {
1798 1116 : const char *ident = context->ident;
1799 1116 : const char *name = context->name;
1800 : int *path_list;
1801 :
1802 : /*
1803 : * To be consistent with logging output, we label dynahash contexts with
1804 : * just the hash table name as with MemoryContextStatsPrint().
1805 : */
1806 1116 : if (context->ident && strncmp(context->name, "dynahash", 8) == 0)
1807 : {
1808 134 : name = context->ident;
1809 134 : ident = NULL;
1810 : }
1811 :
1812 1116 : if (name != NULL)
1813 : {
1814 1116 : int namelen = strlen(name);
1815 : char *nameptr;
1816 :
1817 1116 : if (strlen(name) >= MEMORY_CONTEXT_IDENT_SHMEM_SIZE)
1818 0 : namelen = pg_mbcliplen(name, namelen,
1819 : MEMORY_CONTEXT_IDENT_SHMEM_SIZE - 1);
1820 :
1821 1116 : memcxt_info[curr_id].name = dsa_allocate(area, namelen + 1);
1822 1116 : nameptr = (char *) dsa_get_address(area, memcxt_info[curr_id].name);
1823 1116 : strlcpy(nameptr, name, namelen + 1);
1824 : }
1825 : else
1826 0 : memcxt_info[curr_id].name = InvalidDsaPointer;
1827 :
1828 : /* Trim and copy the identifier if it is not set to NULL */
1829 1116 : if (ident != NULL)
1830 : {
1831 724 : int idlen = strlen(context->ident);
1832 : char *identptr;
1833 :
1834 : /*
1835 : * Some identifiers such as SQL query string can be very long,
1836 : * truncate oversize identifiers.
1837 : */
1838 724 : if (idlen >= MEMORY_CONTEXT_IDENT_SHMEM_SIZE)
1839 42 : idlen = pg_mbcliplen(ident, idlen,
1840 : MEMORY_CONTEXT_IDENT_SHMEM_SIZE - 1);
1841 :
1842 724 : memcxt_info[curr_id].ident = dsa_allocate(area, idlen + 1);
1843 724 : identptr = (char *) dsa_get_address(area, memcxt_info[curr_id].ident);
1844 724 : strlcpy(identptr, ident, idlen + 1);
1845 : }
1846 : else
1847 392 : memcxt_info[curr_id].ident = InvalidDsaPointer;
1848 :
1849 : /* Allocate DSA memory for storing path information */
1850 1116 : if (path == NIL)
1851 0 : memcxt_info[curr_id].path = InvalidDsaPointer;
1852 : else
1853 : {
1854 1116 : int levels = Min(list_length(path), max_levels);
1855 :
1856 1116 : memcxt_info[curr_id].path_length = levels;
1857 1116 : memcxt_info[curr_id].path = dsa_allocate0(area, levels * sizeof(int));
1858 1116 : memcxt_info[curr_id].levels = list_length(path);
1859 1116 : path_list = (int *) dsa_get_address(area, memcxt_info[curr_id].path);
1860 :
1861 4480 : foreach_int(i, path)
1862 : {
1863 3364 : path_list[foreach_current_index(i)] = i;
1864 3364 : if (--levels == 0)
1865 1116 : break;
1866 : }
1867 : }
1868 1116 : memcxt_info[curr_id].type = context->type;
1869 1116 : memcxt_info[curr_id].totalspace = stat.totalspace;
1870 1116 : memcxt_info[curr_id].nblocks = stat.nblocks;
1871 1116 : memcxt_info[curr_id].freespace = stat.freespace;
1872 1116 : memcxt_info[curr_id].freechunks = stat.freechunks;
1873 1116 : memcxt_info[curr_id].num_agg_stats = num_contexts;
1874 1116 : }
1875 :
1876 : /*
1877 : * free_memorycontextstate_dsa
1878 : *
1879 : * Worker for freeing resources from a MemoryStatsEntry. Callers are
1880 : * responsible for ensuring that the DSA pointer is valid.
1881 : */
1882 : static void
1883 12 : free_memorycontextstate_dsa(dsa_area *area, int total_stats,
1884 : dsa_pointer prev_dsa_pointer)
1885 : {
1886 : MemoryStatsEntry *meminfo;
1887 :
1888 12 : meminfo = (MemoryStatsEntry *) dsa_get_address(area, prev_dsa_pointer);
1889 : Assert(meminfo != NULL);
1890 1128 : for (int i = 0; i < total_stats; i++)
1891 : {
1892 1116 : if (DsaPointerIsValid(meminfo[i].name))
1893 1116 : dsa_free(area, meminfo[i].name);
1894 :
1895 1116 : if (DsaPointerIsValid(meminfo[i].ident))
1896 724 : dsa_free(area, meminfo[i].ident);
1897 :
1898 1116 : if (DsaPointerIsValid(meminfo[i].path))
1899 1116 : dsa_free(area, meminfo[i].path);
1900 : }
1901 :
1902 12 : dsa_free(area, memCxtState[MyProcNumber].memstats_dsa_pointer);
1903 12 : memCxtState[MyProcNumber].memstats_dsa_pointer = InvalidDsaPointer;
1904 12 : }
1905 :
1906 : /*
1907 : * Free the memory context statistics stored by this process
1908 : * in DSA area.
1909 : */
1910 : void
1911 43390 : AtProcExit_memstats_cleanup(int code, Datum arg)
1912 : {
1913 43390 : int idx = MyProcNumber;
1914 :
1915 43390 : if (memCxtArea->memstats_dsa_handle == DSA_HANDLE_INVALID)
1916 40832 : return;
1917 :
1918 2558 : LWLockAcquire(&memCxtState[idx].lw_lock, LW_EXCLUSIVE);
1919 :
1920 2558 : if (!DsaPointerIsValid(memCxtState[idx].memstats_dsa_pointer))
1921 : {
1922 2546 : LWLockRelease(&memCxtState[idx].lw_lock);
1923 2546 : return;
1924 : }
1925 :
1926 : /* If the dsa mapping could not be found, attach to the area */
1927 12 : if (MemoryStatsDsaArea == NULL)
1928 0 : MemoryStatsDsaArea = dsa_attach(memCxtArea->memstats_dsa_handle);
1929 :
1930 : /*
1931 : * Free the memory context statistics, free the name, ident and path
1932 : * pointers before freeing the pointer that contains these pointers and
1933 : * integer statistics.
1934 : */
1935 12 : free_memorycontextstate_dsa(MemoryStatsDsaArea, memCxtState[idx].total_stats,
1936 12 : memCxtState[idx].memstats_dsa_pointer);
1937 :
1938 12 : dsa_detach(MemoryStatsDsaArea);
1939 12 : LWLockRelease(&memCxtState[idx].lw_lock);
1940 : }
1941 :
1942 : void *
1943 667349574 : palloc(Size size)
1944 : {
1945 : /* duplicates MemoryContextAlloc to avoid increased overhead */
1946 : void *ret;
1947 667349574 : MemoryContext context = CurrentMemoryContext;
1948 :
1949 : Assert(MemoryContextIsValid(context));
1950 : AssertNotInCriticalSection(context);
1951 :
1952 667349574 : context->isReset = false;
1953 :
1954 : /*
1955 : * For efficiency reasons, we purposefully offload the handling of
1956 : * allocation failures to the MemoryContextMethods implementation as this
1957 : * allows these checks to be performed only when an actual malloc needs to
1958 : * be done to request more memory from the OS. Additionally, not having
1959 : * to execute any instructions after this call allows the compiler to use
1960 : * the sibling call optimization. If you're considering adding code after
1961 : * this call, consider making it the responsibility of the 'alloc'
1962 : * function instead.
1963 : */
1964 667349574 : ret = context->methods->alloc(context, size, 0);
1965 : /* We expect OOM to be handled by the alloc function */
1966 : Assert(ret != NULL);
1967 : VALGRIND_MEMPOOL_ALLOC(context, ret, size);
1968 :
1969 667349574 : return ret;
1970 : }
1971 :
1972 : void *
1973 478009242 : palloc0(Size size)
1974 : {
1975 : /* duplicates MemoryContextAllocZero to avoid increased overhead */
1976 : void *ret;
1977 478009242 : MemoryContext context = CurrentMemoryContext;
1978 :
1979 : Assert(MemoryContextIsValid(context));
1980 : AssertNotInCriticalSection(context);
1981 :
1982 478009242 : context->isReset = false;
1983 :
1984 478009242 : ret = context->methods->alloc(context, size, 0);
1985 : /* We expect OOM to be handled by the alloc function */
1986 : Assert(ret != NULL);
1987 : VALGRIND_MEMPOOL_ALLOC(context, ret, size);
1988 :
1989 4389852882 : MemSetAligned(ret, 0, size);
1990 :
1991 478009242 : return ret;
1992 : }
1993 :
1994 : void *
1995 22951584 : palloc_extended(Size size, int flags)
1996 : {
1997 : /* duplicates MemoryContextAllocExtended to avoid increased overhead */
1998 : void *ret;
1999 22951584 : MemoryContext context = CurrentMemoryContext;
2000 :
2001 : Assert(MemoryContextIsValid(context));
2002 : AssertNotInCriticalSection(context);
2003 :
2004 22951584 : context->isReset = false;
2005 :
2006 22951584 : ret = context->methods->alloc(context, size, flags);
2007 22951584 : if (unlikely(ret == NULL))
2008 : {
2009 : /* NULL can be returned only when using MCXT_ALLOC_NO_OOM */
2010 : Assert(flags & MCXT_ALLOC_NO_OOM);
2011 0 : return NULL;
2012 : }
2013 :
2014 : VALGRIND_MEMPOOL_ALLOC(context, ret, size);
2015 :
2016 22951584 : if ((flags & MCXT_ALLOC_ZERO) != 0)
2017 5194 : MemSetAligned(ret, 0, size);
2018 :
2019 22951584 : return ret;
2020 : }
2021 :
2022 : /*
2023 : * MemoryContextAllocAligned
2024 : * Allocate 'size' bytes of memory in 'context' aligned to 'alignto'
2025 : * bytes.
2026 : *
2027 : * Currently, we align addresses by requesting additional bytes from the
2028 : * MemoryContext's standard allocator function and then aligning the returned
2029 : * address by the required alignment. This means that the given MemoryContext
2030 : * must support providing us with a chunk of memory that's larger than 'size'.
2031 : * For allocators such as Slab, that's not going to work, as slab only allows
2032 : * chunks of the size that's specified when the context is created.
2033 : *
2034 : * 'alignto' must be a power of 2.
2035 : * 'flags' may be 0 or set the same as MemoryContextAllocExtended().
2036 : */
2037 : void *
2038 3336074 : MemoryContextAllocAligned(MemoryContext context,
2039 : Size size, Size alignto, int flags)
2040 : {
2041 : MemoryChunk *alignedchunk;
2042 : Size alloc_size;
2043 : void *unaligned;
2044 : void *aligned;
2045 :
2046 : /* wouldn't make much sense to waste that much space */
2047 : Assert(alignto < (128 * 1024 * 1024));
2048 :
2049 : /* ensure alignto is a power of 2 */
2050 : Assert((alignto & (alignto - 1)) == 0);
2051 :
2052 : /*
2053 : * If the alignment requirements are less than what we already guarantee
2054 : * then just use the standard allocation function.
2055 : */
2056 3336074 : if (unlikely(alignto <= MAXIMUM_ALIGNOF))
2057 0 : return MemoryContextAllocExtended(context, size, flags);
2058 :
2059 : /*
2060 : * We implement aligned pointers by simply allocating enough memory for
2061 : * the requested size plus the alignment and an additional "redirection"
2062 : * MemoryChunk. This additional MemoryChunk is required for operations
2063 : * such as pfree when used on the pointer returned by this function. We
2064 : * use this redirection MemoryChunk in order to find the pointer to the
2065 : * memory that was returned by the MemoryContextAllocExtended call below.
2066 : * We do that by "borrowing" the block offset field and instead of using
2067 : * that to find the offset into the owning block, we use it to find the
2068 : * original allocated address.
2069 : *
2070 : * Here we must allocate enough extra memory so that we can still align
2071 : * the pointer returned by MemoryContextAllocExtended and also have enough
2072 : * space for the redirection MemoryChunk. Since allocations will already
2073 : * be at least aligned by MAXIMUM_ALIGNOF, we can subtract that amount
2074 : * from the allocation size to save a little memory.
2075 : */
2076 3336074 : alloc_size = size + PallocAlignedExtraBytes(alignto);
2077 :
2078 : #ifdef MEMORY_CONTEXT_CHECKING
2079 : /* ensure there's space for a sentinel byte */
2080 : alloc_size += 1;
2081 : #endif
2082 :
2083 : /* perform the actual allocation */
2084 3336074 : unaligned = MemoryContextAllocExtended(context, alloc_size, flags);
2085 :
2086 : /* set the aligned pointer */
2087 3336074 : aligned = (void *) TYPEALIGN(alignto, (char *) unaligned +
2088 : sizeof(MemoryChunk));
2089 :
2090 3336074 : alignedchunk = PointerGetMemoryChunk(aligned);
2091 :
2092 : /*
2093 : * We set the redirect MemoryChunk so that the block offset calculation is
2094 : * used to point back to the 'unaligned' allocated chunk. This allows us
2095 : * to use MemoryChunkGetBlock() to find the unaligned chunk when we need
2096 : * to perform operations such as pfree() and repalloc().
2097 : *
2098 : * We store 'alignto' in the MemoryChunk's 'value' so that we know what
2099 : * the alignment was set to should we ever be asked to realloc this
2100 : * pointer.
2101 : */
2102 3336074 : MemoryChunkSetHdrMask(alignedchunk, unaligned, alignto,
2103 : MCTX_ALIGNED_REDIRECT_ID);
2104 :
2105 : /* double check we produced a correctly aligned pointer */
2106 : Assert((void *) TYPEALIGN(alignto, aligned) == aligned);
2107 :
2108 : #ifdef MEMORY_CONTEXT_CHECKING
2109 : alignedchunk->requested_size = size;
2110 : /* set mark to catch clobber of "unused" space */
2111 : set_sentinel(aligned, size);
2112 : #endif
2113 :
2114 : /* Mark the bytes before the redirection header as noaccess */
2115 : VALGRIND_MAKE_MEM_NOACCESS(unaligned,
2116 : (char *) alignedchunk - (char *) unaligned);
2117 :
2118 : /* Disallow access to the redirection chunk header. */
2119 : VALGRIND_MAKE_MEM_NOACCESS(alignedchunk, sizeof(MemoryChunk));
2120 :
2121 3336074 : return aligned;
2122 : }
2123 :
2124 : /*
2125 : * palloc_aligned
2126 : * Allocate 'size' bytes returning a pointer that's aligned to the
2127 : * 'alignto' boundary.
2128 : *
2129 : * Currently, we align addresses by requesting additional bytes from the
2130 : * MemoryContext's standard allocator function and then aligning the returned
2131 : * address by the required alignment. This means that the given MemoryContext
2132 : * must support providing us with a chunk of memory that's larger than 'size'.
2133 : * For allocators such as Slab, that's not going to work, as slab only allows
2134 : * chunks of the size that's specified when the context is created.
2135 : *
2136 : * 'alignto' must be a power of 2.
2137 : * 'flags' may be 0 or set the same as MemoryContextAllocExtended().
2138 : */
2139 : void *
2140 3209554 : palloc_aligned(Size size, Size alignto, int flags)
2141 : {
2142 3209554 : return MemoryContextAllocAligned(CurrentMemoryContext, size, alignto, flags);
2143 : }
2144 :
2145 : /*
2146 : * pfree
2147 : * Release an allocated chunk.
2148 : */
2149 : void
2150 492909582 : pfree(void *pointer)
2151 : {
2152 : #ifdef USE_VALGRIND
2153 : MemoryContextMethodID method = GetMemoryChunkMethodID(pointer);
2154 : MemoryContext context = GetMemoryChunkContext(pointer);
2155 : #endif
2156 :
2157 492909582 : MCXT_METHOD(pointer, free_p) (pointer);
2158 :
2159 : #ifdef USE_VALGRIND
2160 : if (method != MCTX_ALIGNED_REDIRECT_ID)
2161 : VALGRIND_MEMPOOL_FREE(context, pointer);
2162 : #endif
2163 492909582 : }
2164 :
2165 : /*
2166 : * repalloc
2167 : * Adjust the size of a previously allocated chunk.
2168 : */
2169 : void *
2170 9116740 : repalloc(void *pointer, Size size)
2171 : {
2172 : #ifdef USE_VALGRIND
2173 : MemoryContextMethodID method = GetMemoryChunkMethodID(pointer);
2174 : #endif
2175 : #if defined(USE_ASSERT_CHECKING) || defined(USE_VALGRIND)
2176 : MemoryContext context = GetMemoryChunkContext(pointer);
2177 : #endif
2178 : void *ret;
2179 :
2180 : AssertNotInCriticalSection(context);
2181 :
2182 : /* isReset must be false already */
2183 : Assert(!context->isReset);
2184 :
2185 : /*
2186 : * For efficiency reasons, we purposefully offload the handling of
2187 : * allocation failures to the MemoryContextMethods implementation as this
2188 : * allows these checks to be performed only when an actual malloc needs to
2189 : * be done to request more memory from the OS. Additionally, not having
2190 : * to execute any instructions after this call allows the compiler to use
2191 : * the sibling call optimization. If you're considering adding code after
2192 : * this call, consider making it the responsibility of the 'realloc'
2193 : * function instead.
2194 : */
2195 9116740 : ret = MCXT_METHOD(pointer, realloc) (pointer, size, 0);
2196 :
2197 : #ifdef USE_VALGRIND
2198 : if (method != MCTX_ALIGNED_REDIRECT_ID)
2199 : VALGRIND_MEMPOOL_CHANGE(context, pointer, ret, size);
2200 : #endif
2201 :
2202 9116740 : return ret;
2203 : }
2204 :
2205 : /*
2206 : * repalloc_extended
2207 : * Adjust the size of a previously allocated chunk,
2208 : * with HUGE and NO_OOM options.
2209 : */
2210 : void *
2211 106436 : repalloc_extended(void *pointer, Size size, int flags)
2212 : {
2213 : #if defined(USE_ASSERT_CHECKING) || defined(USE_VALGRIND)
2214 : MemoryContext context = GetMemoryChunkContext(pointer);
2215 : #endif
2216 : void *ret;
2217 :
2218 : AssertNotInCriticalSection(context);
2219 :
2220 : /* isReset must be false already */
2221 : Assert(!context->isReset);
2222 :
2223 : /*
2224 : * For efficiency reasons, we purposefully offload the handling of
2225 : * allocation failures to the MemoryContextMethods implementation as this
2226 : * allows these checks to be performed only when an actual malloc needs to
2227 : * be done to request more memory from the OS. Additionally, not having
2228 : * to execute any instructions after this call allows the compiler to use
2229 : * the sibling call optimization. If you're considering adding code after
2230 : * this call, consider making it the responsibility of the 'realloc'
2231 : * function instead.
2232 : */
2233 106436 : ret = MCXT_METHOD(pointer, realloc) (pointer, size, flags);
2234 106436 : if (unlikely(ret == NULL))
2235 0 : return NULL;
2236 :
2237 : VALGRIND_MEMPOOL_CHANGE(context, pointer, ret, size);
2238 :
2239 106436 : return ret;
2240 : }
2241 :
2242 : /*
2243 : * repalloc0
2244 : * Adjust the size of a previously allocated chunk and zero out the added
2245 : * space.
2246 : */
2247 : void *
2248 48946 : repalloc0(void *pointer, Size oldsize, Size size)
2249 : {
2250 : void *ret;
2251 :
2252 : /* catch wrong argument order */
2253 48946 : if (unlikely(oldsize > size))
2254 0 : elog(ERROR, "invalid repalloc0 call: oldsize %zu, new size %zu",
2255 : oldsize, size);
2256 :
2257 48946 : ret = repalloc(pointer, size);
2258 48946 : memset((char *) ret + oldsize, 0, (size - oldsize));
2259 48946 : return ret;
2260 : }
2261 :
2262 : /*
2263 : * MemoryContextAllocHuge
2264 : * Allocate (possibly-expansive) space within the specified context.
2265 : *
2266 : * See considerations in comment at MaxAllocHugeSize.
2267 : */
2268 : void *
2269 2872 : MemoryContextAllocHuge(MemoryContext context, Size size)
2270 : {
2271 : void *ret;
2272 :
2273 : Assert(MemoryContextIsValid(context));
2274 : AssertNotInCriticalSection(context);
2275 :
2276 2872 : context->isReset = false;
2277 :
2278 : /*
2279 : * For efficiency reasons, we purposefully offload the handling of
2280 : * allocation failures to the MemoryContextMethods implementation as this
2281 : * allows these checks to be performed only when an actual malloc needs to
2282 : * be done to request more memory from the OS. Additionally, not having
2283 : * to execute any instructions after this call allows the compiler to use
2284 : * the sibling call optimization. If you're considering adding code after
2285 : * this call, consider making it the responsibility of the 'alloc'
2286 : * function instead.
2287 : */
2288 2872 : ret = context->methods->alloc(context, size, MCXT_ALLOC_HUGE);
2289 :
2290 : VALGRIND_MEMPOOL_ALLOC(context, ret, size);
2291 :
2292 2872 : return ret;
2293 : }
2294 :
2295 : /*
2296 : * repalloc_huge
2297 : * Adjust the size of a previously allocated chunk, permitting a large
2298 : * value. The previous allocation need not have been "huge".
2299 : */
2300 : void *
2301 105598 : repalloc_huge(void *pointer, Size size)
2302 : {
2303 : /* this one seems not worth its own implementation */
2304 105598 : return repalloc_extended(pointer, size, MCXT_ALLOC_HUGE);
2305 : }
2306 :
2307 : /*
2308 : * MemoryContextStrdup
2309 : * Like strdup(), but allocate from the specified context
2310 : */
2311 : char *
2312 90202462 : MemoryContextStrdup(MemoryContext context, const char *string)
2313 : {
2314 : char *nstr;
2315 90202462 : Size len = strlen(string) + 1;
2316 :
2317 90202462 : nstr = (char *) MemoryContextAlloc(context, len);
2318 :
2319 90202462 : memcpy(nstr, string, len);
2320 :
2321 90202462 : return nstr;
2322 : }
2323 :
2324 : char *
2325 86406198 : pstrdup(const char *in)
2326 : {
2327 86406198 : return MemoryContextStrdup(CurrentMemoryContext, in);
2328 : }
2329 :
2330 : /*
2331 : * pnstrdup
2332 : * Like pstrdup(), but append null byte to a
2333 : * not-necessarily-null-terminated input string.
2334 : */
2335 : char *
2336 1225876 : pnstrdup(const char *in, Size len)
2337 : {
2338 : char *out;
2339 :
2340 1225876 : len = strnlen(in, len);
2341 :
2342 1225876 : out = palloc(len + 1);
2343 1225876 : memcpy(out, in, len);
2344 1225876 : out[len] = '\0';
2345 :
2346 1225876 : return out;
2347 : }
2348 :
2349 : /*
2350 : * Make copy of string with all trailing newline characters removed.
2351 : */
2352 : char *
2353 428 : pchomp(const char *in)
2354 : {
2355 : size_t n;
2356 :
2357 428 : n = strlen(in);
2358 856 : while (n > 0 && in[n - 1] == '\n')
2359 428 : n--;
2360 428 : return pnstrdup(in, n);
2361 : }
|