Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * mcxt.c
4 : * POSTGRES memory context management code.
5 : *
6 : * This module handles context management operations that are independent
7 : * of the particular kind of context being operated on. It calls
8 : * context-type-specific operations via the function pointers in a
9 : * context's MemoryContextMethods struct.
10 : *
11 : *
12 : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
13 : * Portions Copyright (c) 1994, Regents of the University of California
14 : *
15 : *
16 : * IDENTIFICATION
17 : * src/backend/utils/mmgr/mcxt.c
18 : *
19 : *-------------------------------------------------------------------------
20 : */
21 :
22 : #include "postgres.h"
23 :
24 : #include "mb/pg_wchar.h"
25 : #include "miscadmin.h"
26 : #include "nodes/pg_list.h"
27 : #include "storage/lwlock.h"
28 : #include "storage/ipc.h"
29 : #include "utils/dsa.h"
30 : #include "utils/hsearch.h"
31 : #include "utils/memdebug.h"
32 : #include "utils/memutils.h"
33 : #include "utils/memutils_internal.h"
34 : #include "utils/memutils_memorychunk.h"
35 :
36 :
37 : static void BogusFree(void *pointer);
38 : static void *BogusRealloc(void *pointer, Size size, int flags);
39 : static MemoryContext BogusGetChunkContext(void *pointer);
40 : static Size BogusGetChunkSpace(void *pointer);
41 :
42 : /*****************************************************************************
43 : * GLOBAL MEMORY *
44 : *****************************************************************************/
45 : #define BOGUS_MCTX(id) \
46 : [id].free_p = BogusFree, \
47 : [id].realloc = BogusRealloc, \
48 : [id].get_chunk_context = BogusGetChunkContext, \
49 : [id].get_chunk_space = BogusGetChunkSpace
50 :
51 : static const MemoryContextMethods mcxt_methods[] = {
52 : /* aset.c */
53 : [MCTX_ASET_ID].alloc = AllocSetAlloc,
54 : [MCTX_ASET_ID].free_p = AllocSetFree,
55 : [MCTX_ASET_ID].realloc = AllocSetRealloc,
56 : [MCTX_ASET_ID].reset = AllocSetReset,
57 : [MCTX_ASET_ID].delete_context = AllocSetDelete,
58 : [MCTX_ASET_ID].get_chunk_context = AllocSetGetChunkContext,
59 : [MCTX_ASET_ID].get_chunk_space = AllocSetGetChunkSpace,
60 : [MCTX_ASET_ID].is_empty = AllocSetIsEmpty,
61 : [MCTX_ASET_ID].stats = AllocSetStats,
62 : #ifdef MEMORY_CONTEXT_CHECKING
63 : [MCTX_ASET_ID].check = AllocSetCheck,
64 : #endif
65 :
66 : /* generation.c */
67 : [MCTX_GENERATION_ID].alloc = GenerationAlloc,
68 : [MCTX_GENERATION_ID].free_p = GenerationFree,
69 : [MCTX_GENERATION_ID].realloc = GenerationRealloc,
70 : [MCTX_GENERATION_ID].reset = GenerationReset,
71 : [MCTX_GENERATION_ID].delete_context = GenerationDelete,
72 : [MCTX_GENERATION_ID].get_chunk_context = GenerationGetChunkContext,
73 : [MCTX_GENERATION_ID].get_chunk_space = GenerationGetChunkSpace,
74 : [MCTX_GENERATION_ID].is_empty = GenerationIsEmpty,
75 : [MCTX_GENERATION_ID].stats = GenerationStats,
76 : #ifdef MEMORY_CONTEXT_CHECKING
77 : [MCTX_GENERATION_ID].check = GenerationCheck,
78 : #endif
79 :
80 : /* slab.c */
81 : [MCTX_SLAB_ID].alloc = SlabAlloc,
82 : [MCTX_SLAB_ID].free_p = SlabFree,
83 : [MCTX_SLAB_ID].realloc = SlabRealloc,
84 : [MCTX_SLAB_ID].reset = SlabReset,
85 : [MCTX_SLAB_ID].delete_context = SlabDelete,
86 : [MCTX_SLAB_ID].get_chunk_context = SlabGetChunkContext,
87 : [MCTX_SLAB_ID].get_chunk_space = SlabGetChunkSpace,
88 : [MCTX_SLAB_ID].is_empty = SlabIsEmpty,
89 : [MCTX_SLAB_ID].stats = SlabStats,
90 : #ifdef MEMORY_CONTEXT_CHECKING
91 : [MCTX_SLAB_ID].check = SlabCheck,
92 : #endif
93 :
94 : /* alignedalloc.c */
95 : [MCTX_ALIGNED_REDIRECT_ID].alloc = NULL, /* not required */
96 : [MCTX_ALIGNED_REDIRECT_ID].free_p = AlignedAllocFree,
97 : [MCTX_ALIGNED_REDIRECT_ID].realloc = AlignedAllocRealloc,
98 : [MCTX_ALIGNED_REDIRECT_ID].reset = NULL, /* not required */
99 : [MCTX_ALIGNED_REDIRECT_ID].delete_context = NULL, /* not required */
100 : [MCTX_ALIGNED_REDIRECT_ID].get_chunk_context = AlignedAllocGetChunkContext,
101 : [MCTX_ALIGNED_REDIRECT_ID].get_chunk_space = AlignedAllocGetChunkSpace,
102 : [MCTX_ALIGNED_REDIRECT_ID].is_empty = NULL, /* not required */
103 : [MCTX_ALIGNED_REDIRECT_ID].stats = NULL, /* not required */
104 : #ifdef MEMORY_CONTEXT_CHECKING
105 : [MCTX_ALIGNED_REDIRECT_ID].check = NULL, /* not required */
106 : #endif
107 :
108 : /* bump.c */
109 : [MCTX_BUMP_ID].alloc = BumpAlloc,
110 : [MCTX_BUMP_ID].free_p = BumpFree,
111 : [MCTX_BUMP_ID].realloc = BumpRealloc,
112 : [MCTX_BUMP_ID].reset = BumpReset,
113 : [MCTX_BUMP_ID].delete_context = BumpDelete,
114 : [MCTX_BUMP_ID].get_chunk_context = BumpGetChunkContext,
115 : [MCTX_BUMP_ID].get_chunk_space = BumpGetChunkSpace,
116 : [MCTX_BUMP_ID].is_empty = BumpIsEmpty,
117 : [MCTX_BUMP_ID].stats = BumpStats,
118 : #ifdef MEMORY_CONTEXT_CHECKING
119 : [MCTX_BUMP_ID].check = BumpCheck,
120 : #endif
121 :
122 :
123 : /*
124 : * Reserved and unused IDs should have dummy entries here. This allows us
125 : * to fail cleanly if a bogus pointer is passed to pfree or the like. It
126 : * seems sufficient to provide routines for the methods that might get
127 : * invoked from inspection of a chunk (see MCXT_METHOD calls below).
128 : */
129 : BOGUS_MCTX(MCTX_1_RESERVED_GLIBC_ID),
130 : BOGUS_MCTX(MCTX_2_RESERVED_GLIBC_ID),
131 : BOGUS_MCTX(MCTX_8_UNUSED_ID),
132 : BOGUS_MCTX(MCTX_9_UNUSED_ID),
133 : BOGUS_MCTX(MCTX_10_UNUSED_ID),
134 : BOGUS_MCTX(MCTX_11_UNUSED_ID),
135 : BOGUS_MCTX(MCTX_12_UNUSED_ID),
136 : BOGUS_MCTX(MCTX_13_UNUSED_ID),
137 : BOGUS_MCTX(MCTX_14_UNUSED_ID),
138 : BOGUS_MCTX(MCTX_0_RESERVED_UNUSEDMEM_ID),
139 : BOGUS_MCTX(MCTX_15_RESERVED_WIPEDMEM_ID)
140 : };
141 :
142 : #undef BOGUS_MCTX
143 : /*
144 : * This is passed to MemoryContextStatsInternal to determine whether
145 : * to print context statistics or not and where to print them logs or
146 : * stderr.
147 : */
148 : typedef enum PrintDestination
149 : {
150 : PRINT_STATS_TO_STDERR = 0,
151 : PRINT_STATS_TO_LOGS,
152 : PRINT_STATS_NONE
153 : } PrintDestination;
154 :
155 : /*
156 : * CurrentMemoryContext
157 : * Default memory context for allocations.
158 : */
159 : MemoryContext CurrentMemoryContext = NULL;
160 :
161 : /*
162 : * Standard top-level contexts. For a description of the purpose of each
163 : * of these contexts, refer to src/backend/utils/mmgr/README
164 : */
165 : MemoryContext TopMemoryContext = NULL;
166 : MemoryContext ErrorContext = NULL;
167 : MemoryContext PostmasterContext = NULL;
168 : MemoryContext CacheMemoryContext = NULL;
169 : MemoryContext MessageContext = NULL;
170 : MemoryContext TopTransactionContext = NULL;
171 : MemoryContext CurTransactionContext = NULL;
172 :
173 : /* This is a transient link to the active portal's memory context: */
174 : MemoryContext PortalContext = NULL;
175 : dsa_area *MemoryStatsDsaArea = NULL;
176 :
177 : static void MemoryContextDeleteOnly(MemoryContext context);
178 : static void MemoryContextCallResetCallbacks(MemoryContext context);
179 : static void MemoryContextStatsInternal(MemoryContext context, int level,
180 : int max_level, int max_children,
181 : MemoryContextCounters *totals,
182 : PrintDestination print_location,
183 : int *num_contexts);
184 : static void MemoryContextStatsPrint(MemoryContext context, void *passthru,
185 : const char *stats_string,
186 : bool print_to_stderr);
187 : static void PublishMemoryContext(MemoryStatsEntry *memcxt_info,
188 : int curr_id, MemoryContext context,
189 : List *path,
190 : MemoryContextCounters stat,
191 : int num_contexts, dsa_area *area,
192 : int max_levels);
193 : static void compute_contexts_count_and_ids(List *contexts, HTAB *context_id_lookup,
194 : int *stats_count,
195 : bool summary);
196 : static List *compute_context_path(MemoryContext c, HTAB *context_id_lookup);
197 : static void free_memorycontextstate_dsa(dsa_area *area, int total_stats,
198 : dsa_pointer prev_dsa_pointer);
199 : static void end_memorycontext_reporting(void);
200 :
201 : /*
202 : * You should not do memory allocations within a critical section, because
203 : * an out-of-memory error will be escalated to a PANIC. To enforce that
204 : * rule, the allocation functions Assert that.
205 : */
206 : #define AssertNotInCriticalSection(context) \
207 : Assert(CritSectionCount == 0 || (context)->allowInCritSection)
208 :
209 : /*
210 : * Call the given function in the MemoryContextMethods for the memory context
211 : * type that 'pointer' belongs to.
212 : */
213 : #define MCXT_METHOD(pointer, method) \
214 : mcxt_methods[GetMemoryChunkMethodID(pointer)].method
215 :
216 : /*
217 : * GetMemoryChunkMethodID
218 : * Return the MemoryContextMethodID from the uint64 chunk header which
219 : * directly precedes 'pointer'.
220 : */
221 : static inline MemoryContextMethodID
222 545967542 : GetMemoryChunkMethodID(const void *pointer)
223 : {
224 : uint64 header;
225 :
226 : /*
227 : * Try to detect bogus pointers handed to us, poorly though we can.
228 : * Presumably, a pointer that isn't MAXALIGNED isn't pointing at an
229 : * allocated chunk.
230 : */
231 : Assert(pointer == (const void *) MAXALIGN(pointer));
232 :
233 : /* Allow access to the uint64 header */
234 : VALGRIND_MAKE_MEM_DEFINED((char *) pointer - sizeof(uint64), sizeof(uint64));
235 :
236 545967542 : header = *((const uint64 *) ((const char *) pointer - sizeof(uint64)));
237 :
238 : /* Disallow access to the uint64 header */
239 : VALGRIND_MAKE_MEM_NOACCESS((char *) pointer - sizeof(uint64), sizeof(uint64));
240 :
241 545967542 : return (MemoryContextMethodID) (header & MEMORY_CONTEXT_METHODID_MASK);
242 : }
243 :
244 : /*
245 : * GetMemoryChunkHeader
246 : * Return the uint64 chunk header which directly precedes 'pointer'.
247 : *
248 : * This is only used after GetMemoryChunkMethodID, so no need for error checks.
249 : */
250 : static inline uint64
251 0 : GetMemoryChunkHeader(const void *pointer)
252 : {
253 : uint64 header;
254 :
255 : /* Allow access to the uint64 header */
256 : VALGRIND_MAKE_MEM_DEFINED((char *) pointer - sizeof(uint64), sizeof(uint64));
257 :
258 0 : header = *((const uint64 *) ((const char *) pointer - sizeof(uint64)));
259 :
260 : /* Disallow access to the uint64 header */
261 : VALGRIND_MAKE_MEM_NOACCESS((char *) pointer - sizeof(uint64), sizeof(uint64));
262 :
263 0 : return header;
264 : }
265 :
266 : /*
267 : * MemoryContextTraverseNext
268 : * Helper function to traverse all descendants of a memory context
269 : * without recursion.
270 : *
271 : * Recursion could lead to out-of-stack errors with deep context hierarchies,
272 : * which would be unpleasant in error cleanup code paths.
273 : *
274 : * To process 'context' and all its descendants, use a loop like this:
275 : *
276 : * <process 'context'>
277 : * for (MemoryContext curr = context->firstchild;
278 : * curr != NULL;
279 : * curr = MemoryContextTraverseNext(curr, context))
280 : * {
281 : * <process 'curr'>
282 : * }
283 : *
284 : * This visits all the contexts in pre-order, that is a node is visited
285 : * before its children.
286 : */
287 : static MemoryContext
288 1263084 : MemoryContextTraverseNext(MemoryContext curr, MemoryContext top)
289 : {
290 : /* After processing a node, traverse to its first child if any */
291 1263084 : if (curr->firstchild != NULL)
292 0 : return curr->firstchild;
293 :
294 : /*
295 : * After processing a childless node, traverse to its next sibling if
296 : * there is one. If there isn't, traverse back up to the parent (which
297 : * has already been visited, and now so have all its descendants). We're
298 : * done if that is "top", otherwise traverse to its next sibling if any,
299 : * otherwise repeat moving up.
300 : */
301 1263084 : while (curr->nextchild == NULL)
302 : {
303 648714 : curr = curr->parent;
304 648714 : if (curr == top)
305 648714 : return NULL;
306 : }
307 614370 : return curr->nextchild;
308 : }
309 :
310 : /*
311 : * Support routines to trap use of invalid memory context method IDs
312 : * (from calling pfree or the like on a bogus pointer). As a possible
313 : * aid in debugging, we report the header word along with the pointer
314 : * address (if we got here, there must be an accessible header word).
315 : */
316 : static void
317 0 : BogusFree(void *pointer)
318 : {
319 0 : elog(ERROR, "pfree called with invalid pointer %p (header 0x%016" PRIx64 ")",
320 : pointer, GetMemoryChunkHeader(pointer));
321 : }
322 :
323 : static void *
324 0 : BogusRealloc(void *pointer, Size size, int flags)
325 : {
326 0 : elog(ERROR, "repalloc called with invalid pointer %p (header 0x%016" PRIx64 ")",
327 : pointer, GetMemoryChunkHeader(pointer));
328 : return NULL; /* keep compiler quiet */
329 : }
330 :
331 : static MemoryContext
332 0 : BogusGetChunkContext(void *pointer)
333 : {
334 0 : elog(ERROR, "GetMemoryChunkContext called with invalid pointer %p (header 0x%016" PRIx64 ")",
335 : pointer, GetMemoryChunkHeader(pointer));
336 : return NULL; /* keep compiler quiet */
337 : }
338 :
339 : static Size
340 0 : BogusGetChunkSpace(void *pointer)
341 : {
342 0 : elog(ERROR, "GetMemoryChunkSpace called with invalid pointer %p (header 0x%016" PRIx64 ")",
343 : pointer, GetMemoryChunkHeader(pointer));
344 : return 0; /* keep compiler quiet */
345 : }
346 :
347 :
348 : /*****************************************************************************
349 : * EXPORTED ROUTINES *
350 : *****************************************************************************/
351 :
352 :
353 : /*
354 : * MemoryContextInit
355 : * Start up the memory-context subsystem.
356 : *
357 : * This must be called before creating contexts or allocating memory in
358 : * contexts. TopMemoryContext and ErrorContext are initialized here;
359 : * other contexts must be created afterwards.
360 : *
361 : * In normal multi-backend operation, this is called once during
362 : * postmaster startup, and not at all by individual backend startup
363 : * (since the backends inherit an already-initialized context subsystem
364 : * by virtue of being forked off the postmaster). But in an EXEC_BACKEND
365 : * build, each process must do this for itself.
366 : *
367 : * In a standalone backend this must be called during backend startup.
368 : */
369 : void
370 3664 : MemoryContextInit(void)
371 : {
372 : Assert(TopMemoryContext == NULL);
373 :
374 : /*
375 : * First, initialize TopMemoryContext, which is the parent of all others.
376 : */
377 3664 : TopMemoryContext = AllocSetContextCreate((MemoryContext) NULL,
378 : "TopMemoryContext",
379 : ALLOCSET_DEFAULT_SIZES);
380 :
381 : /*
382 : * Not having any other place to point CurrentMemoryContext, make it point
383 : * to TopMemoryContext. Caller should change this soon!
384 : */
385 3664 : CurrentMemoryContext = TopMemoryContext;
386 :
387 : /*
388 : * Initialize ErrorContext as an AllocSetContext with slow growth rate ---
389 : * we don't really expect much to be allocated in it. More to the point,
390 : * require it to contain at least 8K at all times. This is the only case
391 : * where retained memory in a context is *essential* --- we want to be
392 : * sure ErrorContext still has some memory even if we've run out
393 : * elsewhere! Also, allow allocations in ErrorContext within a critical
394 : * section. Otherwise a PANIC will cause an assertion failure in the error
395 : * reporting code, before printing out the real cause of the failure.
396 : *
397 : * This should be the last step in this function, as elog.c assumes memory
398 : * management works once ErrorContext is non-null.
399 : */
400 3664 : ErrorContext = AllocSetContextCreate(TopMemoryContext,
401 : "ErrorContext",
402 : 8 * 1024,
403 : 8 * 1024,
404 : 8 * 1024);
405 3664 : MemoryContextAllowInCriticalSection(ErrorContext, true);
406 3664 : }
407 :
408 : /*
409 : * MemoryContextReset
410 : * Release all space allocated within a context and delete all its
411 : * descendant contexts (but not the named context itself).
412 : */
413 : void
414 328763980 : MemoryContextReset(MemoryContext context)
415 : {
416 : Assert(MemoryContextIsValid(context));
417 :
418 : /* save a function call in common case where there are no children */
419 328763980 : if (context->firstchild != NULL)
420 525434 : MemoryContextDeleteChildren(context);
421 :
422 : /* save a function call if no pallocs since startup or last reset */
423 328763980 : if (!context->isReset)
424 49806684 : MemoryContextResetOnly(context);
425 328763980 : }
426 :
427 : /*
428 : * MemoryContextResetOnly
429 : * Release all space allocated within a context.
430 : * Nothing is done to the context's descendant contexts.
431 : */
432 : void
433 55614382 : MemoryContextResetOnly(MemoryContext context)
434 : {
435 : Assert(MemoryContextIsValid(context));
436 :
437 : /* Nothing to do if no pallocs since startup or last reset */
438 55614382 : if (!context->isReset)
439 : {
440 55613202 : MemoryContextCallResetCallbacks(context);
441 :
442 : /*
443 : * If context->ident points into the context's memory, it will become
444 : * a dangling pointer. We could prevent that by setting it to NULL
445 : * here, but that would break valid coding patterns that keep the
446 : * ident elsewhere, e.g. in a parent context. So for now we assume
447 : * the programmer got it right.
448 : */
449 :
450 55613202 : context->methods->reset(context);
451 55613202 : context->isReset = true;
452 : VALGRIND_DESTROY_MEMPOOL(context);
453 : VALGRIND_CREATE_MEMPOOL(context, 0, false);
454 : }
455 55614382 : }
456 :
457 : /*
458 : * MemoryContextResetChildren
459 : * Release all space allocated within a context's descendants,
460 : * but don't delete the contexts themselves. The named context
461 : * itself is not touched.
462 : */
463 : void
464 0 : MemoryContextResetChildren(MemoryContext context)
465 : {
466 : Assert(MemoryContextIsValid(context));
467 :
468 0 : for (MemoryContext curr = context->firstchild;
469 : curr != NULL;
470 0 : curr = MemoryContextTraverseNext(curr, context))
471 : {
472 0 : MemoryContextResetOnly(curr);
473 : }
474 0 : }
475 :
476 : /*
477 : * MemoryContextDelete
478 : * Delete a context and its descendants, and release all space
479 : * allocated therein.
480 : *
481 : * The type-specific delete routine removes all storage for the context,
482 : * but we have to deal with descendant nodes here.
483 : */
484 : void
485 9366880 : MemoryContextDelete(MemoryContext context)
486 : {
487 : MemoryContext curr;
488 :
489 : Assert(MemoryContextIsValid(context));
490 :
491 : /*
492 : * Delete subcontexts from the bottom up.
493 : *
494 : * Note: Do not use recursion here. A "stack depth limit exceeded" error
495 : * would be unpleasant if we're already in the process of cleaning up from
496 : * transaction abort. We also cannot use MemoryContextTraverseNext() here
497 : * because we modify the tree as we go.
498 : */
499 9366880 : curr = context;
500 : for (;;)
501 1510018 : {
502 : MemoryContext parent;
503 :
504 : /* Descend down until we find a leaf context with no children */
505 12386916 : while (curr->firstchild != NULL)
506 1510018 : curr = curr->firstchild;
507 :
508 : /*
509 : * We're now at a leaf with no children. Free it and continue from the
510 : * parent. Or if this was the original node, we're all done.
511 : */
512 10876898 : parent = curr->parent;
513 10876898 : MemoryContextDeleteOnly(curr);
514 :
515 10876898 : if (curr == context)
516 9366880 : break;
517 1510018 : curr = parent;
518 : }
519 9366880 : }
520 :
521 : /*
522 : * Subroutine of MemoryContextDelete,
523 : * to delete a context that has no children.
524 : * We must also delink the context from its parent, if it has one.
525 : */
526 : static void
527 10876898 : MemoryContextDeleteOnly(MemoryContext context)
528 : {
529 : Assert(MemoryContextIsValid(context));
530 : /* We had better not be deleting TopMemoryContext ... */
531 : Assert(context != TopMemoryContext);
532 : /* And not CurrentMemoryContext, either */
533 : Assert(context != CurrentMemoryContext);
534 : /* All the children should've been deleted already */
535 : Assert(context->firstchild == NULL);
536 :
537 : /*
538 : * It's not entirely clear whether 'tis better to do this before or after
539 : * delinking the context; but an error in a callback will likely result in
540 : * leaking the whole context (if it's not a root context) if we do it
541 : * after, so let's do it before.
542 : */
543 10876898 : MemoryContextCallResetCallbacks(context);
544 :
545 : /*
546 : * We delink the context from its parent before deleting it, so that if
547 : * there's an error we won't have deleted/busted contexts still attached
548 : * to the context tree. Better a leak than a crash.
549 : */
550 10876898 : MemoryContextSetParent(context, NULL);
551 :
552 : /*
553 : * Also reset the context's ident pointer, in case it points into the
554 : * context. This would only matter if someone tries to get stats on the
555 : * (already unlinked) context, which is unlikely, but let's be safe.
556 : */
557 10876898 : context->ident = NULL;
558 :
559 10876898 : context->methods->delete_context(context);
560 :
561 : VALGRIND_DESTROY_MEMPOOL(context);
562 10876898 : }
563 :
564 : /*
565 : * MemoryContextDeleteChildren
566 : * Delete all the descendants of the named context and release all
567 : * space allocated therein. The named context itself is not touched.
568 : */
569 : void
570 966696 : MemoryContextDeleteChildren(MemoryContext context)
571 : {
572 : Assert(MemoryContextIsValid(context));
573 :
574 : /*
575 : * MemoryContextDelete will delink the child from me, so just iterate as
576 : * long as there is a child.
577 : */
578 1561040 : while (context->firstchild != NULL)
579 594344 : MemoryContextDelete(context->firstchild);
580 966696 : }
581 :
582 : /*
583 : * MemoryContextRegisterResetCallback
584 : * Register a function to be called before next context reset/delete.
585 : * Such callbacks will be called in reverse order of registration.
586 : *
587 : * The caller is responsible for allocating a MemoryContextCallback struct
588 : * to hold the info about this callback request, and for filling in the
589 : * "func" and "arg" fields in the struct to show what function to call with
590 : * what argument. Typically the callback struct should be allocated within
591 : * the specified context, since that means it will automatically be freed
592 : * when no longer needed.
593 : *
594 : * There is no API for deregistering a callback once registered. If you
595 : * want it to not do anything anymore, adjust the state pointed to by its
596 : * "arg" to indicate that.
597 : */
598 : void
599 78294 : MemoryContextRegisterResetCallback(MemoryContext context,
600 : MemoryContextCallback *cb)
601 : {
602 : Assert(MemoryContextIsValid(context));
603 :
604 : /* Push onto head so this will be called before older registrants. */
605 78294 : cb->next = context->reset_cbs;
606 78294 : context->reset_cbs = cb;
607 : /* Mark the context as non-reset (it probably is already). */
608 78294 : context->isReset = false;
609 78294 : }
610 :
611 : /*
612 : * MemoryContextCallResetCallbacks
613 : * Internal function to call all registered callbacks for context.
614 : */
615 : static void
616 66490100 : MemoryContextCallResetCallbacks(MemoryContext context)
617 : {
618 : MemoryContextCallback *cb;
619 :
620 : /*
621 : * We pop each callback from the list before calling. That way, if an
622 : * error occurs inside the callback, we won't try to call it a second time
623 : * in the likely event that we reset or delete the context later.
624 : */
625 66568366 : while ((cb = context->reset_cbs) != NULL)
626 : {
627 78266 : context->reset_cbs = cb->next;
628 78266 : cb->func(cb->arg);
629 : }
630 66490100 : }
631 :
632 : /*
633 : * MemoryContextSetIdentifier
634 : * Set the identifier string for a memory context.
635 : *
636 : * An identifier can be provided to help distinguish among different contexts
637 : * of the same kind in memory context stats dumps. The identifier string
638 : * must live at least as long as the context it is for; typically it is
639 : * allocated inside that context, so that it automatically goes away on
640 : * context deletion. Pass id = NULL to forget any old identifier.
641 : */
642 : void
643 4861120 : MemoryContextSetIdentifier(MemoryContext context, const char *id)
644 : {
645 : Assert(MemoryContextIsValid(context));
646 4861120 : context->ident = id;
647 4861120 : }
648 :
649 : /*
650 : * MemoryContextSetParent
651 : * Change a context to belong to a new parent (or no parent).
652 : *
653 : * We provide this as an API function because it is sometimes useful to
654 : * change a context's lifespan after creation. For example, a context
655 : * might be created underneath a transient context, filled with data,
656 : * and then reparented underneath CacheMemoryContext to make it long-lived.
657 : * In this way no special effort is needed to get rid of the context in case
658 : * a failure occurs before its contents are completely set up.
659 : *
660 : * Callers often assume that this function cannot fail, so don't put any
661 : * elog(ERROR) calls in it.
662 : *
663 : * A possible caller error is to reparent a context under itself, creating
664 : * a loop in the context graph. We assert here that context != new_parent,
665 : * but checking for multi-level loops seems more trouble than it's worth.
666 : */
667 : void
668 11240570 : MemoryContextSetParent(MemoryContext context, MemoryContext new_parent)
669 : {
670 : Assert(MemoryContextIsValid(context));
671 : Assert(context != new_parent);
672 :
673 : /* Fast path if it's got correct parent already */
674 11240570 : if (new_parent == context->parent)
675 9402 : return;
676 :
677 : /* Delink from existing parent, if any */
678 11231168 : if (context->parent)
679 : {
680 11231168 : MemoryContext parent = context->parent;
681 :
682 11231168 : if (context->prevchild != NULL)
683 1153018 : context->prevchild->nextchild = context->nextchild;
684 : else
685 : {
686 : Assert(parent->firstchild == context);
687 10078150 : parent->firstchild = context->nextchild;
688 : }
689 :
690 11231168 : if (context->nextchild != NULL)
691 4628080 : context->nextchild->prevchild = context->prevchild;
692 : }
693 :
694 : /* And relink */
695 11231168 : if (new_parent)
696 : {
697 : Assert(MemoryContextIsValid(new_parent));
698 354270 : context->parent = new_parent;
699 354270 : context->prevchild = NULL;
700 354270 : context->nextchild = new_parent->firstchild;
701 354270 : if (new_parent->firstchild != NULL)
702 249136 : new_parent->firstchild->prevchild = context;
703 354270 : new_parent->firstchild = context;
704 : }
705 : else
706 : {
707 10876898 : context->parent = NULL;
708 10876898 : context->prevchild = NULL;
709 10876898 : context->nextchild = NULL;
710 : }
711 : }
712 :
713 : /*
714 : * MemoryContextAllowInCriticalSection
715 : * Allow/disallow allocations in this memory context within a critical
716 : * section.
717 : *
718 : * Normally, memory allocations are not allowed within a critical section,
719 : * because a failure would lead to PANIC. There are a few exceptions to
720 : * that, like allocations related to debugging code that is not supposed to
721 : * be enabled in production. This function can be used to exempt specific
722 : * memory contexts from the assertion in palloc().
723 : */
724 : void
725 4884 : MemoryContextAllowInCriticalSection(MemoryContext context, bool allow)
726 : {
727 : Assert(MemoryContextIsValid(context));
728 :
729 4884 : context->allowInCritSection = allow;
730 4884 : }
731 :
732 : /*
733 : * GetMemoryChunkContext
734 : * Given a currently-allocated chunk, determine the MemoryContext that
735 : * the chunk belongs to.
736 : */
737 : MemoryContext
738 3673654 : GetMemoryChunkContext(void *pointer)
739 : {
740 3673654 : return MCXT_METHOD(pointer, get_chunk_context) (pointer);
741 : }
742 :
743 : /*
744 : * GetMemoryChunkSpace
745 : * Given a currently-allocated chunk, determine the total space
746 : * it occupies (including all memory-allocation overhead).
747 : *
748 : * This is useful for measuring the total space occupied by a set of
749 : * allocated chunks.
750 : */
751 : Size
752 38493880 : GetMemoryChunkSpace(void *pointer)
753 : {
754 38493880 : return MCXT_METHOD(pointer, get_chunk_space) (pointer);
755 : }
756 :
757 : /*
758 : * MemoryContextGetParent
759 : * Get the parent context (if any) of the specified context
760 : */
761 : MemoryContext
762 17498 : MemoryContextGetParent(MemoryContext context)
763 : {
764 : Assert(MemoryContextIsValid(context));
765 :
766 17498 : return context->parent;
767 : }
768 :
769 : /*
770 : * MemoryContextIsEmpty
771 : * Is a memory context empty of any allocated space?
772 : */
773 : bool
774 10706 : MemoryContextIsEmpty(MemoryContext context)
775 : {
776 : Assert(MemoryContextIsValid(context));
777 :
778 : /*
779 : * For now, we consider a memory context nonempty if it has any children;
780 : * perhaps this should be changed later.
781 : */
782 10706 : if (context->firstchild != NULL)
783 2 : return false;
784 : /* Otherwise use the type-specific inquiry */
785 10704 : return context->methods->is_empty(context);
786 : }
787 :
788 : /*
789 : * Find the memory allocated to blocks for this memory context. If recurse is
790 : * true, also include children.
791 : */
792 : Size
793 1794062 : MemoryContextMemAllocated(MemoryContext context, bool recurse)
794 : {
795 1794062 : Size total = context->mem_allocated;
796 :
797 : Assert(MemoryContextIsValid(context));
798 :
799 1794062 : if (recurse)
800 : {
801 3057146 : for (MemoryContext curr = context->firstchild;
802 : curr != NULL;
803 1263084 : curr = MemoryContextTraverseNext(curr, context))
804 : {
805 1263084 : total += curr->mem_allocated;
806 : }
807 : }
808 :
809 1794062 : return total;
810 : }
811 :
812 : /*
813 : * Return the memory consumption statistics about the given context and its
814 : * children.
815 : */
816 : void
817 30 : MemoryContextMemConsumed(MemoryContext context,
818 : MemoryContextCounters *consumed)
819 : {
820 : Assert(MemoryContextIsValid(context));
821 :
822 30 : memset(consumed, 0, sizeof(*consumed));
823 :
824 : /* Examine the context itself */
825 30 : context->methods->stats(context, NULL, NULL, consumed, false);
826 :
827 : /* Examine children, using iteration not recursion */
828 30 : for (MemoryContext curr = context->firstchild;
829 : curr != NULL;
830 0 : curr = MemoryContextTraverseNext(curr, context))
831 : {
832 0 : curr->methods->stats(curr, NULL, NULL, consumed, false);
833 : }
834 30 : }
835 :
836 : /*
837 : * MemoryContextStats
838 : * Print statistics about the named context and all its descendants.
839 : *
840 : * This is just a debugging utility, so it's not very fancy. However, we do
841 : * make some effort to summarize when the output would otherwise be very long.
842 : * The statistics are sent to stderr.
843 : */
844 : void
845 0 : MemoryContextStats(MemoryContext context)
846 : {
847 : /* Hard-wired limits are usually good enough */
848 0 : MemoryContextStatsDetail(context, 100, 100, true);
849 0 : }
850 :
851 : /*
852 : * MemoryContextStatsDetail
853 : *
854 : * Entry point for use if you want to vary the number of child contexts shown.
855 : *
856 : * If print_to_stderr is true, print statistics about the memory contexts
857 : * with fprintf(stderr), otherwise use ereport().
858 : */
859 : void
860 18 : MemoryContextStatsDetail(MemoryContext context,
861 : int max_level, int max_children,
862 : bool print_to_stderr)
863 : {
864 : MemoryContextCounters grand_totals;
865 : int num_contexts;
866 : PrintDestination print_location;
867 :
868 18 : memset(&grand_totals, 0, sizeof(grand_totals));
869 :
870 18 : if (print_to_stderr)
871 0 : print_location = PRINT_STATS_TO_STDERR;
872 : else
873 18 : print_location = PRINT_STATS_TO_LOGS;
874 :
875 : /* num_contexts report number of contexts aggregated in the output */
876 18 : MemoryContextStatsInternal(context, 1, max_level, max_children,
877 : &grand_totals, print_location, &num_contexts);
878 :
879 18 : if (print_to_stderr)
880 0 : fprintf(stderr,
881 : "Grand total: %zu bytes in %zu blocks; %zu free (%zu chunks); %zu used\n",
882 : grand_totals.totalspace, grand_totals.nblocks,
883 : grand_totals.freespace, grand_totals.freechunks,
884 0 : grand_totals.totalspace - grand_totals.freespace);
885 : else
886 : {
887 : /*
888 : * Use LOG_SERVER_ONLY to prevent the memory contexts from being sent
889 : * to the connected client.
890 : *
891 : * We don't buffer the information about all memory contexts in a
892 : * backend into StringInfo and log it as one message. That would
893 : * require the buffer to be enlarged, risking an OOM as there could be
894 : * a large number of memory contexts in a backend. Instead, we log
895 : * one message per memory context.
896 : */
897 18 : ereport(LOG_SERVER_ONLY,
898 : (errhidestmt(true),
899 : errhidecontext(true),
900 : errmsg_internal("Grand total: %zu bytes in %zu blocks; %zu free (%zu chunks); %zu used",
901 : grand_totals.totalspace, grand_totals.nblocks,
902 : grand_totals.freespace, grand_totals.freechunks,
903 : grand_totals.totalspace - grand_totals.freespace)));
904 : }
905 18 : }
906 :
907 : /*
908 : * MemoryContextStatsInternal
909 : * One recursion level for MemoryContextStats
910 : *
911 : * Print stats for this context if possible, but in any case accumulate counts
912 : * into *totals (if not NULL). The callers should make sure that print_location
913 : * is set to PRINT_STATS_TO_STDERR or PRINT_STATS_TO_LOGS or PRINT_STATS_NONE.
914 : */
915 : static void
916 1620 : MemoryContextStatsInternal(MemoryContext context, int level,
917 : int max_level, int max_children,
918 : MemoryContextCounters *totals,
919 : PrintDestination print_location, int *num_contexts)
920 : {
921 : MemoryContext child;
922 : int ichild;
923 :
924 : Assert(MemoryContextIsValid(context));
925 :
926 : /* Examine the context itself */
927 1620 : switch (print_location)
928 : {
929 0 : case PRINT_STATS_TO_STDERR:
930 0 : context->methods->stats(context,
931 : MemoryContextStatsPrint,
932 : &level,
933 : totals, true);
934 0 : break;
935 :
936 1620 : case PRINT_STATS_TO_LOGS:
937 1620 : context->methods->stats(context,
938 : MemoryContextStatsPrint,
939 : &level,
940 : totals, false);
941 1620 : break;
942 :
943 0 : case PRINT_STATS_NONE:
944 :
945 : /*
946 : * Do not print the statistics if print_location is
947 : * PRINT_STATS_NONE, only compute totals. This is used in
948 : * reporting of memory context statistics via a sql function. Last
949 : * parameter is not relevant.
950 : */
951 0 : context->methods->stats(context,
952 : NULL,
953 : NULL,
954 : totals, false);
955 0 : break;
956 : }
957 :
958 : /* Increment the context count for each of the recursive call */
959 1620 : *num_contexts = *num_contexts + 1;
960 :
961 : /*
962 : * Examine children.
963 : *
964 : * If we are past the recursion depth limit or already running low on
965 : * stack, do not print them explicitly but just summarize them. Similarly,
966 : * if there are more than max_children of them, we do not print the rest
967 : * explicitly, but just summarize them.
968 : */
969 1620 : child = context->firstchild;
970 1620 : ichild = 0;
971 1620 : if (level <= max_level && !stack_is_too_deep())
972 : {
973 3222 : for (; child != NULL && ichild < max_children;
974 1602 : child = child->nextchild, ichild++)
975 : {
976 1602 : MemoryContextStatsInternal(child, level + 1,
977 : max_level, max_children,
978 : totals,
979 : print_location, num_contexts);
980 : }
981 : }
982 :
983 1620 : if (child != NULL)
984 : {
985 : /* Summarize the rest of the children, avoiding recursion. */
986 : MemoryContextCounters local_totals;
987 :
988 0 : memset(&local_totals, 0, sizeof(local_totals));
989 :
990 0 : ichild = 0;
991 0 : while (child != NULL)
992 : {
993 0 : child->methods->stats(child, NULL, NULL, &local_totals, false);
994 0 : ichild++;
995 0 : child = MemoryContextTraverseNext(child, context);
996 : }
997 :
998 : /*
999 : * Add the count of children contexts which are traversed in the
1000 : * non-recursive manner.
1001 : */
1002 0 : *num_contexts = *num_contexts + ichild;
1003 :
1004 0 : if (print_location == PRINT_STATS_TO_STDERR)
1005 : {
1006 0 : for (int i = 0; i < level; i++)
1007 0 : fprintf(stderr, " ");
1008 0 : fprintf(stderr,
1009 : "%d more child contexts containing %zu total in %zu blocks; %zu free (%zu chunks); %zu used\n",
1010 : ichild,
1011 : local_totals.totalspace,
1012 : local_totals.nblocks,
1013 : local_totals.freespace,
1014 : local_totals.freechunks,
1015 0 : local_totals.totalspace - local_totals.freespace);
1016 : }
1017 0 : else if (print_location == PRINT_STATS_TO_LOGS)
1018 0 : ereport(LOG_SERVER_ONLY,
1019 : (errhidestmt(true),
1020 : errhidecontext(true),
1021 : errmsg_internal("level: %d; %d more child contexts containing %zu total in %zu blocks; %zu free (%zu chunks); %zu used",
1022 : level,
1023 : ichild,
1024 : local_totals.totalspace,
1025 : local_totals.nblocks,
1026 : local_totals.freespace,
1027 : local_totals.freechunks,
1028 : local_totals.totalspace - local_totals.freespace)));
1029 :
1030 0 : if (totals)
1031 : {
1032 0 : totals->nblocks += local_totals.nblocks;
1033 0 : totals->freechunks += local_totals.freechunks;
1034 0 : totals->totalspace += local_totals.totalspace;
1035 0 : totals->freespace += local_totals.freespace;
1036 : }
1037 : }
1038 1620 : }
1039 :
1040 : /*
1041 : * MemoryContextStatsPrint
1042 : * Print callback used by MemoryContextStatsInternal
1043 : *
1044 : * For now, the passthru pointer just points to "int level"; later we might
1045 : * make that more complicated.
1046 : */
1047 : static void
1048 1620 : MemoryContextStatsPrint(MemoryContext context, void *passthru,
1049 : const char *stats_string,
1050 : bool print_to_stderr)
1051 : {
1052 1620 : int level = *(int *) passthru;
1053 1620 : const char *name = context->name;
1054 1620 : const char *ident = context->ident;
1055 : char truncated_ident[110];
1056 : int i;
1057 :
1058 : /*
1059 : * It seems preferable to label dynahash contexts with just the hash table
1060 : * name. Those are already unique enough, so the "dynahash" part isn't
1061 : * very helpful, and this way is more consistent with pre-v11 practice.
1062 : */
1063 1620 : if (ident && strcmp(name, "dynahash") == 0)
1064 : {
1065 210 : name = ident;
1066 210 : ident = NULL;
1067 : }
1068 :
1069 1620 : truncated_ident[0] = '\0';
1070 :
1071 1620 : if (ident)
1072 : {
1073 : /*
1074 : * Some contexts may have very long identifiers (e.g., SQL queries).
1075 : * Arbitrarily truncate at 100 bytes, but be careful not to break
1076 : * multibyte characters. Also, replace ASCII control characters, such
1077 : * as newlines, with spaces.
1078 : */
1079 1140 : int idlen = strlen(ident);
1080 1140 : bool truncated = false;
1081 :
1082 1140 : strcpy(truncated_ident, ": ");
1083 1140 : i = strlen(truncated_ident);
1084 :
1085 1140 : if (idlen > 100)
1086 : {
1087 0 : idlen = pg_mbcliplen(ident, idlen, 100);
1088 0 : truncated = true;
1089 : }
1090 :
1091 31866 : while (idlen-- > 0)
1092 : {
1093 30726 : unsigned char c = *ident++;
1094 :
1095 30726 : if (c < ' ')
1096 0 : c = ' ';
1097 30726 : truncated_ident[i++] = c;
1098 : }
1099 1140 : truncated_ident[i] = '\0';
1100 :
1101 1140 : if (truncated)
1102 0 : strcat(truncated_ident, "...");
1103 : }
1104 :
1105 1620 : if (print_to_stderr)
1106 : {
1107 0 : for (i = 1; i < level; i++)
1108 0 : fprintf(stderr, " ");
1109 0 : fprintf(stderr, "%s: %s%s\n", name, stats_string, truncated_ident);
1110 : }
1111 : else
1112 1620 : ereport(LOG_SERVER_ONLY,
1113 : (errhidestmt(true),
1114 : errhidecontext(true),
1115 : errmsg_internal("level: %d; %s: %s%s",
1116 : level, name, stats_string, truncated_ident)));
1117 1620 : }
1118 :
1119 : /*
1120 : * MemoryContextCheck
1121 : * Check all chunks in the named context and its children.
1122 : *
1123 : * This is just a debugging utility, so it's not fancy.
1124 : */
1125 : #ifdef MEMORY_CONTEXT_CHECKING
1126 : void
1127 : MemoryContextCheck(MemoryContext context)
1128 : {
1129 : Assert(MemoryContextIsValid(context));
1130 : context->methods->check(context);
1131 :
1132 : for (MemoryContext curr = context->firstchild;
1133 : curr != NULL;
1134 : curr = MemoryContextTraverseNext(curr, context))
1135 : {
1136 : Assert(MemoryContextIsValid(curr));
1137 : curr->methods->check(curr);
1138 : }
1139 : }
1140 : #endif
1141 :
1142 : /*
1143 : * MemoryContextCreate
1144 : * Context-type-independent part of context creation.
1145 : *
1146 : * This is only intended to be called by context-type-specific
1147 : * context creation routines, not by the unwashed masses.
1148 : *
1149 : * The memory context creation procedure goes like this:
1150 : * 1. Context-type-specific routine makes some initial space allocation,
1151 : * including enough space for the context header. If it fails,
1152 : * it can ereport() with no damage done.
1153 : * 2. Context-type-specific routine sets up all type-specific fields of
1154 : * the header (those beyond MemoryContextData proper), as well as any
1155 : * other management fields it needs to have a fully valid context.
1156 : * Usually, failure in this step is impossible, but if it's possible
1157 : * the initial space allocation should be freed before ereport'ing.
1158 : * 3. Context-type-specific routine calls MemoryContextCreate() to fill in
1159 : * the generic header fields and link the context into the context tree.
1160 : * 4. We return to the context-type-specific routine, which finishes
1161 : * up type-specific initialization. This routine can now do things
1162 : * that might fail (like allocate more memory), so long as it's
1163 : * sure the node is left in a state that delete will handle.
1164 : *
1165 : * node: the as-yet-uninitialized common part of the context header node.
1166 : * tag: NodeTag code identifying the memory context type.
1167 : * method_id: MemoryContextMethodID of the context-type being created.
1168 : * parent: parent context, or NULL if this will be a top-level context.
1169 : * name: name of context (must be statically allocated).
1170 : *
1171 : * Context routines generally assume that MemoryContextCreate can't fail,
1172 : * so this can contain Assert but not elog/ereport.
1173 : */
1174 : void
1175 14569348 : MemoryContextCreate(MemoryContext node,
1176 : NodeTag tag,
1177 : MemoryContextMethodID method_id,
1178 : MemoryContext parent,
1179 : const char *name)
1180 : {
1181 : /* Creating new memory contexts is not allowed in a critical section */
1182 : Assert(CritSectionCount == 0);
1183 :
1184 : /* Validate parent, to help prevent crazy context linkages */
1185 : Assert(parent == NULL || MemoryContextIsValid(parent));
1186 : Assert(node != parent);
1187 :
1188 : /* Initialize all standard fields of memory context header */
1189 14569348 : node->type = tag;
1190 14569348 : node->isReset = true;
1191 14569348 : node->methods = &mcxt_methods[method_id];
1192 14569348 : node->parent = parent;
1193 14569348 : node->firstchild = NULL;
1194 14569348 : node->mem_allocated = 0;
1195 14569348 : node->prevchild = NULL;
1196 14569348 : node->name = name;
1197 14569348 : node->ident = NULL;
1198 14569348 : node->reset_cbs = NULL;
1199 :
1200 : /* OK to link node into context tree */
1201 14569348 : if (parent)
1202 : {
1203 14565586 : node->nextchild = parent->firstchild;
1204 14565586 : if (parent->firstchild != NULL)
1205 8148066 : parent->firstchild->prevchild = node;
1206 14565586 : parent->firstchild = node;
1207 : /* inherit allowInCritSection flag from parent */
1208 14565586 : node->allowInCritSection = parent->allowInCritSection;
1209 : }
1210 : else
1211 : {
1212 3762 : node->nextchild = NULL;
1213 3762 : node->allowInCritSection = false;
1214 : }
1215 :
1216 : VALGRIND_CREATE_MEMPOOL(node, 0, false);
1217 14569348 : }
1218 :
1219 : /*
1220 : * MemoryContextAllocationFailure
1221 : * For use by MemoryContextMethods implementations to handle when malloc
1222 : * returns NULL. The behavior is specific to whether MCXT_ALLOC_NO_OOM
1223 : * is in 'flags'.
1224 : */
1225 : void *
1226 0 : MemoryContextAllocationFailure(MemoryContext context, Size size, int flags)
1227 : {
1228 0 : if ((flags & MCXT_ALLOC_NO_OOM) == 0)
1229 : {
1230 0 : if (TopMemoryContext)
1231 0 : MemoryContextStats(TopMemoryContext);
1232 0 : ereport(ERROR,
1233 : (errcode(ERRCODE_OUT_OF_MEMORY),
1234 : errmsg("out of memory"),
1235 : errdetail("Failed on request of size %zu in memory context \"%s\".",
1236 : size, context->name)));
1237 : }
1238 0 : return NULL;
1239 : }
1240 :
1241 : /*
1242 : * MemoryContextSizeFailure
1243 : * For use by MemoryContextMethods implementations to handle invalid
1244 : * memory allocation request sizes.
1245 : */
1246 : void
1247 0 : MemoryContextSizeFailure(MemoryContext context, Size size, int flags)
1248 : {
1249 0 : elog(ERROR, "invalid memory alloc request size %zu", size);
1250 : }
1251 :
1252 : /*
1253 : * MemoryContextAlloc
1254 : * Allocate space within the specified context.
1255 : *
1256 : * This could be turned into a macro, but we'd have to import
1257 : * nodes/memnodes.h into postgres.h which seems a bad idea.
1258 : */
1259 : void *
1260 185003552 : MemoryContextAlloc(MemoryContext context, Size size)
1261 : {
1262 : void *ret;
1263 :
1264 : Assert(MemoryContextIsValid(context));
1265 : AssertNotInCriticalSection(context);
1266 :
1267 185003552 : context->isReset = false;
1268 :
1269 : /*
1270 : * For efficiency reasons, we purposefully offload the handling of
1271 : * allocation failures to the MemoryContextMethods implementation as this
1272 : * allows these checks to be performed only when an actual malloc needs to
1273 : * be done to request more memory from the OS. Additionally, not having
1274 : * to execute any instructions after this call allows the compiler to use
1275 : * the sibling call optimization. If you're considering adding code after
1276 : * this call, consider making it the responsibility of the 'alloc'
1277 : * function instead.
1278 : */
1279 185003552 : ret = context->methods->alloc(context, size, 0);
1280 :
1281 : VALGRIND_MEMPOOL_ALLOC(context, ret, size);
1282 :
1283 185003552 : return ret;
1284 : }
1285 :
1286 : /*
1287 : * MemoryContextAllocZero
1288 : * Like MemoryContextAlloc, but clears allocated memory
1289 : *
1290 : * We could just call MemoryContextAlloc then clear the memory, but this
1291 : * is a very common combination, so we provide the combined operation.
1292 : */
1293 : void *
1294 44232724 : MemoryContextAllocZero(MemoryContext context, Size size)
1295 : {
1296 : void *ret;
1297 :
1298 : Assert(MemoryContextIsValid(context));
1299 : AssertNotInCriticalSection(context);
1300 :
1301 44232724 : context->isReset = false;
1302 :
1303 44232724 : ret = context->methods->alloc(context, size, 0);
1304 :
1305 : VALGRIND_MEMPOOL_ALLOC(context, ret, size);
1306 :
1307 543774222 : MemSetAligned(ret, 0, size);
1308 :
1309 44232724 : return ret;
1310 : }
1311 :
1312 : /*
1313 : * MemoryContextAllocExtended
1314 : * Allocate space within the specified context using the given flags.
1315 : */
1316 : void *
1317 7554368 : MemoryContextAllocExtended(MemoryContext context, Size size, int flags)
1318 : {
1319 : void *ret;
1320 :
1321 : Assert(MemoryContextIsValid(context));
1322 : AssertNotInCriticalSection(context);
1323 :
1324 7554368 : if (!((flags & MCXT_ALLOC_HUGE) != 0 ? AllocHugeSizeIsValid(size) :
1325 : AllocSizeIsValid(size)))
1326 0 : elog(ERROR, "invalid memory alloc request size %zu", size);
1327 :
1328 7554368 : context->isReset = false;
1329 :
1330 7554368 : ret = context->methods->alloc(context, size, flags);
1331 7554368 : if (unlikely(ret == NULL))
1332 0 : return NULL;
1333 :
1334 : VALGRIND_MEMPOOL_ALLOC(context, ret, size);
1335 :
1336 7554368 : if ((flags & MCXT_ALLOC_ZERO) != 0)
1337 220712828 : MemSetAligned(ret, 0, size);
1338 :
1339 7554368 : return ret;
1340 : }
1341 :
1342 : /*
1343 : * HandleLogMemoryContextInterrupt
1344 : * Handle receipt of an interrupt indicating logging of memory
1345 : * contexts.
1346 : *
1347 : * All the actual work is deferred to ProcessLogMemoryContextInterrupt(),
1348 : * because we cannot safely emit a log message inside the signal handler.
1349 : */
1350 : void
1351 18 : HandleLogMemoryContextInterrupt(void)
1352 : {
1353 18 : InterruptPending = true;
1354 18 : LogMemoryContextPending = true;
1355 : /* latch will be set by procsignal_sigusr1_handler */
1356 18 : }
1357 :
1358 : /*
1359 : * HandleGetMemoryContextInterrupt
1360 : * Handle receipt of an interrupt indicating a request to publish memory
1361 : * contexts statistics.
1362 : *
1363 : * All the actual work is deferred to ProcessGetMemoryContextInterrupt() as
1364 : * this cannot be performed in a signal handler.
1365 : */
1366 : void
1367 12 : HandleGetMemoryContextInterrupt(void)
1368 : {
1369 12 : InterruptPending = true;
1370 12 : PublishMemoryContextPending = true;
1371 : /* latch will be set by procsignal_sigusr1_handler */
1372 12 : }
1373 :
1374 : /*
1375 : * ProcessLogMemoryContextInterrupt
1376 : * Perform logging of memory contexts of this backend process.
1377 : *
1378 : * Any backend that participates in ProcSignal signaling must arrange
1379 : * to call this function if we see LogMemoryContextPending set.
1380 : * It is called from CHECK_FOR_INTERRUPTS(), which is enough because
1381 : * the target process for logging of memory contexts is a backend.
1382 : */
1383 : void
1384 18 : ProcessLogMemoryContextInterrupt(void)
1385 : {
1386 18 : LogMemoryContextPending = false;
1387 :
1388 : /*
1389 : * Use LOG_SERVER_ONLY to prevent this message from being sent to the
1390 : * connected client.
1391 : */
1392 18 : ereport(LOG_SERVER_ONLY,
1393 : (errhidestmt(true),
1394 : errhidecontext(true),
1395 : errmsg("logging memory contexts of PID %d", MyProcPid)));
1396 :
1397 : /*
1398 : * When a backend process is consuming huge memory, logging all its memory
1399 : * contexts might overrun available disk space. To prevent this, we limit
1400 : * the depth of the hierarchy, as well as the number of child contexts to
1401 : * log per parent to 100.
1402 : *
1403 : * As with MemoryContextStats(), we suppose that practical cases where the
1404 : * dump gets long will typically be huge numbers of siblings under the
1405 : * same parent context; while the additional debugging value from seeing
1406 : * details about individual siblings beyond 100 will not be large.
1407 : */
1408 18 : MemoryContextStatsDetail(TopMemoryContext, 100, 100, false);
1409 18 : }
1410 :
1411 : /*
1412 : * ProcessGetMemoryContextInterrupt
1413 : * Generate information about memory contexts used by the process.
1414 : *
1415 : * Performs a breadth first search on the memory context tree, thus parents
1416 : * statistics are reported before their children in the monitoring function
1417 : * output.
1418 : *
1419 : * Statistics for all the processes are shared via the same dynamic shared
1420 : * area. Statistics written by each process are tracked independently in
1421 : * per-process DSA pointers. These pointers are stored in static shared memory.
1422 : *
1423 : * We calculate maximum number of context's statistics that can be displayed
1424 : * using a pre-determined limit for memory available per process for this
1425 : * utility maximum size of statistics for each context. The remaining context
1426 : * statistics if any are captured as a cumulative total at the end of
1427 : * individual context's statistics.
1428 : *
1429 : * If summary is true, we capture the level 1 and level 2 contexts
1430 : * statistics. For that we traverse the memory context tree recursively in
1431 : * depth first search manner to cover all the children of a parent context, to
1432 : * be able to display a cumulative total of memory consumption by a parent at
1433 : * level 2 and all its children.
1434 : */
1435 : void
1436 12 : ProcessGetMemoryContextInterrupt(void)
1437 : {
1438 : List *contexts;
1439 : HASHCTL ctl;
1440 : HTAB *context_id_lookup;
1441 12 : int context_id = 0;
1442 : MemoryStatsEntry *meminfo;
1443 12 : bool summary = false;
1444 : int max_stats;
1445 12 : int idx = MyProcNumber;
1446 12 : int stats_count = 0;
1447 12 : int stats_num = 0;
1448 : MemoryContextCounters stat;
1449 12 : int num_individual_stats = 0;
1450 :
1451 12 : PublishMemoryContextPending = false;
1452 :
1453 : /*
1454 : * The hash table is used for constructing "path" column of the view,
1455 : * similar to its local backend counterpart.
1456 : */
1457 12 : ctl.keysize = sizeof(MemoryContext);
1458 12 : ctl.entrysize = sizeof(MemoryStatsContextId);
1459 12 : ctl.hcxt = CurrentMemoryContext;
1460 :
1461 12 : context_id_lookup = hash_create("pg_get_remote_backend_memory_contexts",
1462 : 256,
1463 : &ctl,
1464 : HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
1465 :
1466 : /* List of contexts to process in the next round - start at the top. */
1467 12 : contexts = list_make1(TopMemoryContext);
1468 :
1469 : /* Compute the number of stats that can fit in the defined limit */
1470 12 : max_stats =
1471 : MEMORY_CONTEXT_REPORT_MAX_PER_BACKEND / MAX_MEMORY_CONTEXT_STATS_SIZE;
1472 12 : LWLockAcquire(&memCxtState[idx].lw_lock, LW_EXCLUSIVE);
1473 12 : summary = memCxtState[idx].summary;
1474 12 : LWLockRelease(&memCxtState[idx].lw_lock);
1475 :
1476 : /*
1477 : * Traverse the memory context tree to find total number of contexts. If
1478 : * summary is requested report the total number of contexts at level 1 and
1479 : * 2 from the top. Also, populate the hash table of context ids.
1480 : */
1481 12 : compute_contexts_count_and_ids(contexts, context_id_lookup, &stats_count,
1482 : summary);
1483 :
1484 : /*
1485 : * Allocate memory in this process's DSA for storing statistics of the
1486 : * memory contexts upto max_stats, for contexts that don't fit within a
1487 : * limit, a cumulative total is written as the last record in the DSA
1488 : * segment.
1489 : */
1490 12 : stats_num = Min(stats_count, max_stats);
1491 :
1492 12 : LWLockAcquire(&memCxtArea->lw_lock, LW_EXCLUSIVE);
1493 :
1494 : /*
1495 : * Create a DSA and send handle to the client process after storing the
1496 : * context statistics. If number of contexts exceed a predefined limit
1497 : * (1MB), a cumulative total is stored for such contexts.
1498 : */
1499 12 : if (memCxtArea->memstats_dsa_handle == DSA_HANDLE_INVALID)
1500 : {
1501 6 : MemoryContext oldcontext = CurrentMemoryContext;
1502 : dsa_handle handle;
1503 :
1504 6 : MemoryContextSwitchTo(TopMemoryContext);
1505 :
1506 6 : MemoryStatsDsaArea = dsa_create(memCxtArea->lw_lock.tranche);
1507 :
1508 6 : handle = dsa_get_handle(MemoryStatsDsaArea);
1509 6 : MemoryContextSwitchTo(oldcontext);
1510 :
1511 6 : dsa_pin_mapping(MemoryStatsDsaArea);
1512 :
1513 : /*
1514 : * Pin the DSA area, this is to make sure the area remains attachable
1515 : * even if the backend that created it exits. This is done so that the
1516 : * statistics are published even if the process exits while a client
1517 : * is waiting. Also, other processes that publish statistics will use
1518 : * the same area.
1519 : */
1520 6 : dsa_pin(MemoryStatsDsaArea);
1521 :
1522 : /* Set the handle in shared memory */
1523 6 : memCxtArea->memstats_dsa_handle = handle;
1524 : }
1525 :
1526 : /*
1527 : * If DSA exists, created by another process publishing statistics, attach
1528 : * to it.
1529 : */
1530 6 : else if (MemoryStatsDsaArea == NULL)
1531 : {
1532 0 : MemoryContext oldcontext = CurrentMemoryContext;
1533 :
1534 0 : MemoryContextSwitchTo(TopMemoryContext);
1535 0 : MemoryStatsDsaArea = dsa_attach(memCxtArea->memstats_dsa_handle);
1536 0 : MemoryContextSwitchTo(oldcontext);
1537 0 : dsa_pin_mapping(MemoryStatsDsaArea);
1538 : }
1539 12 : LWLockRelease(&memCxtArea->lw_lock);
1540 :
1541 : /*
1542 : * Hold the process lock to protect writes to process specific memory. Two
1543 : * processes publishing statistics do not block each other.
1544 : */
1545 12 : LWLockAcquire(&memCxtState[idx].lw_lock, LW_EXCLUSIVE);
1546 12 : memCxtState[idx].proc_id = MyProcPid;
1547 :
1548 12 : if (DsaPointerIsValid(memCxtState[idx].memstats_dsa_pointer))
1549 : {
1550 : /*
1551 : * Free any previous allocations, free the name, ident and path
1552 : * pointers before freeing the pointer that contains them.
1553 : */
1554 0 : free_memorycontextstate_dsa(MemoryStatsDsaArea, memCxtState[idx].total_stats,
1555 0 : memCxtState[idx].memstats_dsa_pointer);
1556 : }
1557 :
1558 : /*
1559 : * Assigning total stats before allocating memory so that memory cleanup
1560 : * can run if any subsequent dsa_allocate call to allocate name/ident/path
1561 : * fails.
1562 : */
1563 12 : memCxtState[idx].total_stats = stats_num;
1564 24 : memCxtState[idx].memstats_dsa_pointer =
1565 12 : dsa_allocate0(MemoryStatsDsaArea, stats_num * sizeof(MemoryStatsEntry));
1566 :
1567 : meminfo = (MemoryStatsEntry *)
1568 12 : dsa_get_address(MemoryStatsDsaArea, memCxtState[idx].memstats_dsa_pointer);
1569 :
1570 12 : if (summary)
1571 : {
1572 0 : int cxt_id = 0;
1573 0 : List *path = NIL;
1574 :
1575 : /* Copy TopMemoryContext statistics to DSA */
1576 0 : memset(&stat, 0, sizeof(stat));
1577 0 : (*TopMemoryContext->methods->stats) (TopMemoryContext, NULL, NULL,
1578 : &stat, true);
1579 0 : path = lcons_int(1, path);
1580 0 : PublishMemoryContext(meminfo, cxt_id, TopMemoryContext, path, stat,
1581 : 1, MemoryStatsDsaArea, 100);
1582 0 : cxt_id = cxt_id + 1;
1583 :
1584 : /*
1585 : * Copy statistics for each of TopMemoryContexts children. This
1586 : * includes statistics of at most 100 children per node, with each
1587 : * child node limited to a depth of 100 in its subtree.
1588 : */
1589 0 : for (MemoryContext c = TopMemoryContext->firstchild; c != NULL;
1590 0 : c = c->nextchild)
1591 : {
1592 : MemoryContextCounters grand_totals;
1593 0 : int num_contexts = 0;
1594 :
1595 0 : path = NIL;
1596 0 : memset(&grand_totals, 0, sizeof(grand_totals));
1597 :
1598 0 : MemoryContextStatsInternal(c, 1, 100, 100, &grand_totals,
1599 : PRINT_STATS_NONE, &num_contexts);
1600 :
1601 0 : path = compute_context_path(c, context_id_lookup);
1602 :
1603 : /*
1604 : * Register the stats entry first, that way the cleanup handler
1605 : * can reach it in case of allocation failures of one or more
1606 : * members.
1607 : */
1608 0 : memCxtState[idx].total_stats = cxt_id++;
1609 0 : PublishMemoryContext(meminfo, cxt_id, c, path,
1610 : grand_totals, num_contexts, MemoryStatsDsaArea, 100);
1611 : }
1612 0 : memCxtState[idx].total_stats = cxt_id;
1613 :
1614 : /* Notify waiting backends and return */
1615 0 : end_memorycontext_reporting();
1616 :
1617 0 : hash_destroy(context_id_lookup);
1618 :
1619 0 : return;
1620 : }
1621 :
1622 1142 : foreach_ptr(MemoryContextData, cur, contexts)
1623 : {
1624 1118 : List *path = NIL;
1625 :
1626 : /*
1627 : * Figure out the transient context_id of this context and each of its
1628 : * ancestors, to compute a path for this context.
1629 : */
1630 1118 : path = compute_context_path(cur, context_id_lookup);
1631 :
1632 : /* Examine the context stats */
1633 1118 : memset(&stat, 0, sizeof(stat));
1634 1118 : (*cur->methods->stats) (cur, NULL, NULL, &stat, true);
1635 :
1636 : /* Account for saving one statistics slot for cumulative reporting */
1637 1118 : if (context_id < (max_stats - 1) || stats_count <= max_stats)
1638 : {
1639 : /* Copy statistics to DSA memory */
1640 1118 : PublishMemoryContext(meminfo, context_id, cur, path, stat, 1, MemoryStatsDsaArea, 100);
1641 : }
1642 : else
1643 : {
1644 0 : meminfo[max_stats - 1].totalspace += stat.totalspace;
1645 0 : meminfo[max_stats - 1].nblocks += stat.nblocks;
1646 0 : meminfo[max_stats - 1].freespace += stat.freespace;
1647 0 : meminfo[max_stats - 1].freechunks += stat.freechunks;
1648 : }
1649 :
1650 : /*
1651 : * DSA max limit per process is reached, write aggregate of the
1652 : * remaining statistics.
1653 : *
1654 : * We can store contexts from 0 to max_stats - 1. When stats_count is
1655 : * greater than max_stats, we stop reporting individual statistics
1656 : * when context_id equals max_stats - 2. As we use max_stats - 1 array
1657 : * slot for reporting cumulative statistics or "Remaining Totals".
1658 : */
1659 1118 : if (stats_count > max_stats && context_id == (max_stats - 2))
1660 : {
1661 : char *nameptr;
1662 0 : int namelen = strlen("Remaining Totals");
1663 :
1664 0 : num_individual_stats = context_id + 1;
1665 0 : meminfo[max_stats - 1].name = dsa_allocate(MemoryStatsDsaArea, namelen + 1);
1666 0 : nameptr = dsa_get_address(MemoryStatsDsaArea, meminfo[max_stats - 1].name);
1667 0 : strlcpy(nameptr, "Remaining Totals", namelen + 1);
1668 0 : meminfo[max_stats - 1].ident = InvalidDsaPointer;
1669 0 : meminfo[max_stats - 1].path = InvalidDsaPointer;
1670 0 : meminfo[max_stats - 1].type = 0;
1671 : }
1672 1118 : context_id++;
1673 : }
1674 :
1675 : /*
1676 : * Statistics are not aggregated, i.e individual statistics reported when
1677 : * stats_count <= max_stats.
1678 : */
1679 12 : if (stats_count <= max_stats)
1680 : {
1681 12 : memCxtState[idx].total_stats = context_id;
1682 : }
1683 : /* Report number of aggregated memory contexts */
1684 : else
1685 : {
1686 0 : meminfo[max_stats - 1].num_agg_stats = context_id -
1687 : num_individual_stats;
1688 :
1689 : /*
1690 : * Total stats equals num_individual_stats + 1 record for cumulative
1691 : * statistics.
1692 : */
1693 0 : memCxtState[idx].total_stats = num_individual_stats + 1;
1694 : }
1695 :
1696 : /* Notify waiting backends and return */
1697 12 : end_memorycontext_reporting();
1698 :
1699 12 : hash_destroy(context_id_lookup);
1700 : }
1701 :
1702 : /*
1703 : * Update timestamp and signal all the waiting client backends after copying
1704 : * all the statistics.
1705 : */
1706 : static void
1707 12 : end_memorycontext_reporting(void)
1708 : {
1709 12 : memCxtState[MyProcNumber].stats_timestamp = GetCurrentTimestamp();
1710 12 : LWLockRelease(&memCxtState[MyProcNumber].lw_lock);
1711 12 : ConditionVariableBroadcast(&memCxtState[MyProcNumber].memcxt_cv);
1712 12 : }
1713 :
1714 : /*
1715 : * compute_context_path
1716 : *
1717 : * Append the transient context_id of this context and each of its ancestors
1718 : * to a list, in order to compute a path.
1719 : */
1720 : static List *
1721 1118 : compute_context_path(MemoryContext c, HTAB *context_id_lookup)
1722 : {
1723 : bool found;
1724 1118 : List *path = NIL;
1725 : MemoryContext cur_context;
1726 :
1727 4488 : for (cur_context = c; cur_context != NULL; cur_context = cur_context->parent)
1728 : {
1729 : MemoryStatsContextId *cur_entry;
1730 :
1731 3370 : cur_entry = hash_search(context_id_lookup, &cur_context, HASH_FIND, &found);
1732 :
1733 3370 : if (!found)
1734 0 : elog(ERROR, "hash table corrupted, can't construct path value");
1735 :
1736 3370 : path = lcons_int(cur_entry->context_id, path);
1737 : }
1738 :
1739 1118 : return path;
1740 : }
1741 :
1742 : /*
1743 : * Return the number of contexts allocated currently by the backend
1744 : * Assign context ids to each of the contexts.
1745 : */
1746 : static void
1747 12 : compute_contexts_count_and_ids(List *contexts, HTAB *context_id_lookup,
1748 : int *stats_count, bool summary)
1749 : {
1750 1142 : foreach_ptr(MemoryContextData, cur, contexts)
1751 : {
1752 : MemoryStatsContextId *entry;
1753 : bool found;
1754 :
1755 1118 : entry = (MemoryStatsContextId *) hash_search(context_id_lookup, &cur,
1756 : HASH_ENTER, &found);
1757 : Assert(!found);
1758 :
1759 : /*
1760 : * context id starts with 1 so increment the stats_count before
1761 : * assigning.
1762 : */
1763 1118 : entry->context_id = ++(*stats_count);
1764 :
1765 : /* Append the children of the current context to the main list. */
1766 2224 : for (MemoryContext c = cur->firstchild; c != NULL; c = c->nextchild)
1767 : {
1768 1106 : if (summary)
1769 : {
1770 0 : entry = (MemoryStatsContextId *) hash_search(context_id_lookup, &c,
1771 : HASH_ENTER, &found);
1772 : Assert(!found);
1773 :
1774 0 : entry->context_id = ++(*stats_count);
1775 : }
1776 :
1777 1106 : contexts = lappend(contexts, c);
1778 : }
1779 :
1780 : /*
1781 : * In summary mode only the first two level (from top) contexts are
1782 : * displayed.
1783 : */
1784 1118 : if (summary)
1785 0 : break;
1786 : }
1787 12 : }
1788 :
1789 : /*
1790 : * PublishMemoryContext
1791 : *
1792 : * Copy the memory context statistics of a single context to a DSA memory
1793 : */
1794 : static void
1795 1118 : PublishMemoryContext(MemoryStatsEntry *memcxt_info, int curr_id,
1796 : MemoryContext context, List *path,
1797 : MemoryContextCounters stat, int num_contexts,
1798 : dsa_area *area, int max_levels)
1799 : {
1800 1118 : const char *ident = context->ident;
1801 1118 : const char *name = context->name;
1802 : int *path_list;
1803 :
1804 : /*
1805 : * To be consistent with logging output, we label dynahash contexts with
1806 : * just the hash table name as with MemoryContextStatsPrint().
1807 : */
1808 1118 : if (context->ident && strncmp(context->name, "dynahash", 8) == 0)
1809 : {
1810 134 : name = context->ident;
1811 134 : ident = NULL;
1812 : }
1813 :
1814 1118 : if (name != NULL)
1815 : {
1816 1118 : int namelen = strlen(name);
1817 : char *nameptr;
1818 :
1819 1118 : if (strlen(name) >= MEMORY_CONTEXT_IDENT_SHMEM_SIZE)
1820 0 : namelen = pg_mbcliplen(name, namelen,
1821 : MEMORY_CONTEXT_IDENT_SHMEM_SIZE - 1);
1822 :
1823 1118 : memcxt_info[curr_id].name = dsa_allocate(area, namelen + 1);
1824 1118 : nameptr = (char *) dsa_get_address(area, memcxt_info[curr_id].name);
1825 1118 : strlcpy(nameptr, name, namelen + 1);
1826 : }
1827 : else
1828 0 : memcxt_info[curr_id].name = InvalidDsaPointer;
1829 :
1830 : /* Trim and copy the identifier if it is not set to NULL */
1831 1118 : if (ident != NULL)
1832 : {
1833 726 : int idlen = strlen(context->ident);
1834 : char *identptr;
1835 :
1836 : /*
1837 : * Some identifiers such as SQL query string can be very long,
1838 : * truncate oversize identifiers.
1839 : */
1840 726 : if (idlen >= MEMORY_CONTEXT_IDENT_SHMEM_SIZE)
1841 42 : idlen = pg_mbcliplen(ident, idlen,
1842 : MEMORY_CONTEXT_IDENT_SHMEM_SIZE - 1);
1843 :
1844 726 : memcxt_info[curr_id].ident = dsa_allocate(area, idlen + 1);
1845 726 : identptr = (char *) dsa_get_address(area, memcxt_info[curr_id].ident);
1846 726 : strlcpy(identptr, ident, idlen + 1);
1847 : }
1848 : else
1849 392 : memcxt_info[curr_id].ident = InvalidDsaPointer;
1850 :
1851 : /* Allocate DSA memory for storing path information */
1852 1118 : if (path == NIL)
1853 0 : memcxt_info[curr_id].path = InvalidDsaPointer;
1854 : else
1855 : {
1856 1118 : int levels = Min(list_length(path), max_levels);
1857 :
1858 1118 : memcxt_info[curr_id].path_length = levels;
1859 1118 : memcxt_info[curr_id].path = dsa_allocate0(area, levels * sizeof(int));
1860 1118 : memcxt_info[curr_id].levels = list_length(path);
1861 1118 : path_list = (int *) dsa_get_address(area, memcxt_info[curr_id].path);
1862 :
1863 4488 : foreach_int(i, path)
1864 : {
1865 3370 : path_list[foreach_current_index(i)] = i;
1866 3370 : if (--levels == 0)
1867 1118 : break;
1868 : }
1869 : }
1870 1118 : memcxt_info[curr_id].type = context->type;
1871 1118 : memcxt_info[curr_id].totalspace = stat.totalspace;
1872 1118 : memcxt_info[curr_id].nblocks = stat.nblocks;
1873 1118 : memcxt_info[curr_id].freespace = stat.freespace;
1874 1118 : memcxt_info[curr_id].freechunks = stat.freechunks;
1875 1118 : memcxt_info[curr_id].num_agg_stats = num_contexts;
1876 1118 : }
1877 :
1878 : /*
1879 : * free_memorycontextstate_dsa
1880 : *
1881 : * Worker for freeing resources from a MemoryStatsEntry. Callers are
1882 : * responsible for ensuring that the DSA pointer is valid.
1883 : */
1884 : static void
1885 12 : free_memorycontextstate_dsa(dsa_area *area, int total_stats,
1886 : dsa_pointer prev_dsa_pointer)
1887 : {
1888 : MemoryStatsEntry *meminfo;
1889 :
1890 12 : meminfo = (MemoryStatsEntry *) dsa_get_address(area, prev_dsa_pointer);
1891 : Assert(meminfo != NULL);
1892 1130 : for (int i = 0; i < total_stats; i++)
1893 : {
1894 1118 : if (DsaPointerIsValid(meminfo[i].name))
1895 1118 : dsa_free(area, meminfo[i].name);
1896 :
1897 1118 : if (DsaPointerIsValid(meminfo[i].ident))
1898 726 : dsa_free(area, meminfo[i].ident);
1899 :
1900 1118 : if (DsaPointerIsValid(meminfo[i].path))
1901 1118 : dsa_free(area, meminfo[i].path);
1902 : }
1903 :
1904 12 : dsa_free(area, memCxtState[MyProcNumber].memstats_dsa_pointer);
1905 12 : memCxtState[MyProcNumber].memstats_dsa_pointer = InvalidDsaPointer;
1906 12 : }
1907 :
1908 : /*
1909 : * Free the memory context statistics stored by this process
1910 : * in DSA area.
1911 : */
1912 : void
1913 43614 : AtProcExit_memstats_cleanup(int code, Datum arg)
1914 : {
1915 43614 : int idx = MyProcNumber;
1916 :
1917 43614 : if (memCxtArea->memstats_dsa_handle == DSA_HANDLE_INVALID)
1918 41074 : return;
1919 :
1920 2540 : LWLockAcquire(&memCxtState[idx].lw_lock, LW_EXCLUSIVE);
1921 :
1922 2540 : if (!DsaPointerIsValid(memCxtState[idx].memstats_dsa_pointer))
1923 : {
1924 2528 : LWLockRelease(&memCxtState[idx].lw_lock);
1925 2528 : return;
1926 : }
1927 :
1928 : /* If the dsa mapping could not be found, attach to the area */
1929 12 : if (MemoryStatsDsaArea == NULL)
1930 0 : MemoryStatsDsaArea = dsa_attach(memCxtArea->memstats_dsa_handle);
1931 :
1932 : /*
1933 : * Free the memory context statistics, free the name, ident and path
1934 : * pointers before freeing the pointer that contains these pointers and
1935 : * integer statistics.
1936 : */
1937 12 : free_memorycontextstate_dsa(MemoryStatsDsaArea, memCxtState[idx].total_stats,
1938 12 : memCxtState[idx].memstats_dsa_pointer);
1939 :
1940 12 : dsa_detach(MemoryStatsDsaArea);
1941 12 : LWLockRelease(&memCxtState[idx].lw_lock);
1942 : }
1943 :
1944 : void *
1945 667480630 : palloc(Size size)
1946 : {
1947 : /* duplicates MemoryContextAlloc to avoid increased overhead */
1948 : void *ret;
1949 667480630 : MemoryContext context = CurrentMemoryContext;
1950 :
1951 : Assert(MemoryContextIsValid(context));
1952 : AssertNotInCriticalSection(context);
1953 :
1954 667480630 : context->isReset = false;
1955 :
1956 : /*
1957 : * For efficiency reasons, we purposefully offload the handling of
1958 : * allocation failures to the MemoryContextMethods implementation as this
1959 : * allows these checks to be performed only when an actual malloc needs to
1960 : * be done to request more memory from the OS. Additionally, not having
1961 : * to execute any instructions after this call allows the compiler to use
1962 : * the sibling call optimization. If you're considering adding code after
1963 : * this call, consider making it the responsibility of the 'alloc'
1964 : * function instead.
1965 : */
1966 667480630 : ret = context->methods->alloc(context, size, 0);
1967 : /* We expect OOM to be handled by the alloc function */
1968 : Assert(ret != NULL);
1969 : VALGRIND_MEMPOOL_ALLOC(context, ret, size);
1970 :
1971 667480630 : return ret;
1972 : }
1973 :
1974 : void *
1975 478213172 : palloc0(Size size)
1976 : {
1977 : /* duplicates MemoryContextAllocZero to avoid increased overhead */
1978 : void *ret;
1979 478213172 : MemoryContext context = CurrentMemoryContext;
1980 :
1981 : Assert(MemoryContextIsValid(context));
1982 : AssertNotInCriticalSection(context);
1983 :
1984 478213172 : context->isReset = false;
1985 :
1986 478213172 : ret = context->methods->alloc(context, size, 0);
1987 : /* We expect OOM to be handled by the alloc function */
1988 : Assert(ret != NULL);
1989 : VALGRIND_MEMPOOL_ALLOC(context, ret, size);
1990 :
1991 4389193302 : MemSetAligned(ret, 0, size);
1992 :
1993 478213172 : return ret;
1994 : }
1995 :
1996 : void *
1997 23001146 : palloc_extended(Size size, int flags)
1998 : {
1999 : /* duplicates MemoryContextAllocExtended to avoid increased overhead */
2000 : void *ret;
2001 23001146 : MemoryContext context = CurrentMemoryContext;
2002 :
2003 : Assert(MemoryContextIsValid(context));
2004 : AssertNotInCriticalSection(context);
2005 :
2006 23001146 : context->isReset = false;
2007 :
2008 23001146 : ret = context->methods->alloc(context, size, flags);
2009 23001146 : if (unlikely(ret == NULL))
2010 : {
2011 : /* NULL can be returned only when using MCXT_ALLOC_NO_OOM */
2012 : Assert(flags & MCXT_ALLOC_NO_OOM);
2013 0 : return NULL;
2014 : }
2015 :
2016 : VALGRIND_MEMPOOL_ALLOC(context, ret, size);
2017 :
2018 23001146 : if ((flags & MCXT_ALLOC_ZERO) != 0)
2019 5222 : MemSetAligned(ret, 0, size);
2020 :
2021 23001146 : return ret;
2022 : }
2023 :
2024 : /*
2025 : * MemoryContextAllocAligned
2026 : * Allocate 'size' bytes of memory in 'context' aligned to 'alignto'
2027 : * bytes.
2028 : *
2029 : * Currently, we align addresses by requesting additional bytes from the
2030 : * MemoryContext's standard allocator function and then aligning the returned
2031 : * address by the required alignment. This means that the given MemoryContext
2032 : * must support providing us with a chunk of memory that's larger than 'size'.
2033 : * For allocators such as Slab, that's not going to work, as slab only allows
2034 : * chunks of the size that's specified when the context is created.
2035 : *
2036 : * 'alignto' must be a power of 2.
2037 : * 'flags' may be 0 or set the same as MemoryContextAllocExtended().
2038 : */
2039 : void *
2040 3352016 : MemoryContextAllocAligned(MemoryContext context,
2041 : Size size, Size alignto, int flags)
2042 : {
2043 : MemoryChunk *alignedchunk;
2044 : Size alloc_size;
2045 : void *unaligned;
2046 : void *aligned;
2047 :
2048 : /* wouldn't make much sense to waste that much space */
2049 : Assert(alignto < (128 * 1024 * 1024));
2050 :
2051 : /* ensure alignto is a power of 2 */
2052 : Assert((alignto & (alignto - 1)) == 0);
2053 :
2054 : /*
2055 : * If the alignment requirements are less than what we already guarantee
2056 : * then just use the standard allocation function.
2057 : */
2058 3352016 : if (unlikely(alignto <= MAXIMUM_ALIGNOF))
2059 0 : return MemoryContextAllocExtended(context, size, flags);
2060 :
2061 : /*
2062 : * We implement aligned pointers by simply allocating enough memory for
2063 : * the requested size plus the alignment and an additional "redirection"
2064 : * MemoryChunk. This additional MemoryChunk is required for operations
2065 : * such as pfree when used on the pointer returned by this function. We
2066 : * use this redirection MemoryChunk in order to find the pointer to the
2067 : * memory that was returned by the MemoryContextAllocExtended call below.
2068 : * We do that by "borrowing" the block offset field and instead of using
2069 : * that to find the offset into the owning block, we use it to find the
2070 : * original allocated address.
2071 : *
2072 : * Here we must allocate enough extra memory so that we can still align
2073 : * the pointer returned by MemoryContextAllocExtended and also have enough
2074 : * space for the redirection MemoryChunk. Since allocations will already
2075 : * be at least aligned by MAXIMUM_ALIGNOF, we can subtract that amount
2076 : * from the allocation size to save a little memory.
2077 : */
2078 3352016 : alloc_size = size + PallocAlignedExtraBytes(alignto);
2079 :
2080 : #ifdef MEMORY_CONTEXT_CHECKING
2081 : /* ensure there's space for a sentinel byte */
2082 : alloc_size += 1;
2083 : #endif
2084 :
2085 : /* perform the actual allocation */
2086 3352016 : unaligned = MemoryContextAllocExtended(context, alloc_size, flags);
2087 :
2088 : /* set the aligned pointer */
2089 3352016 : aligned = (void *) TYPEALIGN(alignto, (char *) unaligned +
2090 : sizeof(MemoryChunk));
2091 :
2092 3352016 : alignedchunk = PointerGetMemoryChunk(aligned);
2093 :
2094 : /*
2095 : * We set the redirect MemoryChunk so that the block offset calculation is
2096 : * used to point back to the 'unaligned' allocated chunk. This allows us
2097 : * to use MemoryChunkGetBlock() to find the unaligned chunk when we need
2098 : * to perform operations such as pfree() and repalloc().
2099 : *
2100 : * We store 'alignto' in the MemoryChunk's 'value' so that we know what
2101 : * the alignment was set to should we ever be asked to realloc this
2102 : * pointer.
2103 : */
2104 3352016 : MemoryChunkSetHdrMask(alignedchunk, unaligned, alignto,
2105 : MCTX_ALIGNED_REDIRECT_ID);
2106 :
2107 : /* double check we produced a correctly aligned pointer */
2108 : Assert((void *) TYPEALIGN(alignto, aligned) == aligned);
2109 :
2110 : #ifdef MEMORY_CONTEXT_CHECKING
2111 : alignedchunk->requested_size = size;
2112 : /* set mark to catch clobber of "unused" space */
2113 : set_sentinel(aligned, size);
2114 : #endif
2115 :
2116 : /* Mark the bytes before the redirection header as noaccess */
2117 : VALGRIND_MAKE_MEM_NOACCESS(unaligned,
2118 : (char *) alignedchunk - (char *) unaligned);
2119 :
2120 : /* Disallow access to the redirection chunk header. */
2121 : VALGRIND_MAKE_MEM_NOACCESS(alignedchunk, sizeof(MemoryChunk));
2122 :
2123 3352016 : return aligned;
2124 : }
2125 :
2126 : /*
2127 : * palloc_aligned
2128 : * Allocate 'size' bytes returning a pointer that's aligned to the
2129 : * 'alignto' boundary.
2130 : *
2131 : * Currently, we align addresses by requesting additional bytes from the
2132 : * MemoryContext's standard allocator function and then aligning the returned
2133 : * address by the required alignment. This means that the given MemoryContext
2134 : * must support providing us with a chunk of memory that's larger than 'size'.
2135 : * For allocators such as Slab, that's not going to work, as slab only allows
2136 : * chunks of the size that's specified when the context is created.
2137 : *
2138 : * 'alignto' must be a power of 2.
2139 : * 'flags' may be 0 or set the same as MemoryContextAllocExtended().
2140 : */
2141 : void *
2142 3225534 : palloc_aligned(Size size, Size alignto, int flags)
2143 : {
2144 3225534 : return MemoryContextAllocAligned(CurrentMemoryContext, size, alignto, flags);
2145 : }
2146 :
2147 : /*
2148 : * pfree
2149 : * Release an allocated chunk.
2150 : */
2151 : void
2152 494627548 : pfree(void *pointer)
2153 : {
2154 : #ifdef USE_VALGRIND
2155 : MemoryContextMethodID method = GetMemoryChunkMethodID(pointer);
2156 : MemoryContext context = GetMemoryChunkContext(pointer);
2157 : #endif
2158 :
2159 494627548 : MCXT_METHOD(pointer, free_p) (pointer);
2160 :
2161 : #ifdef USE_VALGRIND
2162 : if (method != MCTX_ALIGNED_REDIRECT_ID)
2163 : VALGRIND_MEMPOOL_FREE(context, pointer);
2164 : #endif
2165 494627548 : }
2166 :
2167 : /*
2168 : * repalloc
2169 : * Adjust the size of a previously allocated chunk.
2170 : */
2171 : void *
2172 9066964 : repalloc(void *pointer, Size size)
2173 : {
2174 : #ifdef USE_VALGRIND
2175 : MemoryContextMethodID method = GetMemoryChunkMethodID(pointer);
2176 : #endif
2177 : #if defined(USE_ASSERT_CHECKING) || defined(USE_VALGRIND)
2178 : MemoryContext context = GetMemoryChunkContext(pointer);
2179 : #endif
2180 : void *ret;
2181 :
2182 : AssertNotInCriticalSection(context);
2183 :
2184 : /* isReset must be false already */
2185 : Assert(!context->isReset);
2186 :
2187 : /*
2188 : * For efficiency reasons, we purposefully offload the handling of
2189 : * allocation failures to the MemoryContextMethods implementation as this
2190 : * allows these checks to be performed only when an actual malloc needs to
2191 : * be done to request more memory from the OS. Additionally, not having
2192 : * to execute any instructions after this call allows the compiler to use
2193 : * the sibling call optimization. If you're considering adding code after
2194 : * this call, consider making it the responsibility of the 'realloc'
2195 : * function instead.
2196 : */
2197 9066964 : ret = MCXT_METHOD(pointer, realloc) (pointer, size, 0);
2198 :
2199 : #ifdef USE_VALGRIND
2200 : if (method != MCTX_ALIGNED_REDIRECT_ID)
2201 : VALGRIND_MEMPOOL_CHANGE(context, pointer, ret, size);
2202 : #endif
2203 :
2204 9066964 : return ret;
2205 : }
2206 :
2207 : /*
2208 : * repalloc_extended
2209 : * Adjust the size of a previously allocated chunk,
2210 : * with HUGE and NO_OOM options.
2211 : */
2212 : void *
2213 105496 : repalloc_extended(void *pointer, Size size, int flags)
2214 : {
2215 : #if defined(USE_ASSERT_CHECKING) || defined(USE_VALGRIND)
2216 : MemoryContext context = GetMemoryChunkContext(pointer);
2217 : #endif
2218 : void *ret;
2219 :
2220 : AssertNotInCriticalSection(context);
2221 :
2222 : /* isReset must be false already */
2223 : Assert(!context->isReset);
2224 :
2225 : /*
2226 : * For efficiency reasons, we purposefully offload the handling of
2227 : * allocation failures to the MemoryContextMethods implementation as this
2228 : * allows these checks to be performed only when an actual malloc needs to
2229 : * be done to request more memory from the OS. Additionally, not having
2230 : * to execute any instructions after this call allows the compiler to use
2231 : * the sibling call optimization. If you're considering adding code after
2232 : * this call, consider making it the responsibility of the 'realloc'
2233 : * function instead.
2234 : */
2235 105496 : ret = MCXT_METHOD(pointer, realloc) (pointer, size, flags);
2236 105496 : if (unlikely(ret == NULL))
2237 0 : return NULL;
2238 :
2239 : VALGRIND_MEMPOOL_CHANGE(context, pointer, ret, size);
2240 :
2241 105496 : return ret;
2242 : }
2243 :
2244 : /*
2245 : * repalloc0
2246 : * Adjust the size of a previously allocated chunk and zero out the added
2247 : * space.
2248 : */
2249 : void *
2250 49290 : repalloc0(void *pointer, Size oldsize, Size size)
2251 : {
2252 : void *ret;
2253 :
2254 : /* catch wrong argument order */
2255 49290 : if (unlikely(oldsize > size))
2256 0 : elog(ERROR, "invalid repalloc0 call: oldsize %zu, new size %zu",
2257 : oldsize, size);
2258 :
2259 49290 : ret = repalloc(pointer, size);
2260 49290 : memset((char *) ret + oldsize, 0, (size - oldsize));
2261 49290 : return ret;
2262 : }
2263 :
2264 : /*
2265 : * MemoryContextAllocHuge
2266 : * Allocate (possibly-expansive) space within the specified context.
2267 : *
2268 : * See considerations in comment at MaxAllocHugeSize.
2269 : */
2270 : void *
2271 2904 : MemoryContextAllocHuge(MemoryContext context, Size size)
2272 : {
2273 : void *ret;
2274 :
2275 : Assert(MemoryContextIsValid(context));
2276 : AssertNotInCriticalSection(context);
2277 :
2278 2904 : context->isReset = false;
2279 :
2280 : /*
2281 : * For efficiency reasons, we purposefully offload the handling of
2282 : * allocation failures to the MemoryContextMethods implementation as this
2283 : * allows these checks to be performed only when an actual malloc needs to
2284 : * be done to request more memory from the OS. Additionally, not having
2285 : * to execute any instructions after this call allows the compiler to use
2286 : * the sibling call optimization. If you're considering adding code after
2287 : * this call, consider making it the responsibility of the 'alloc'
2288 : * function instead.
2289 : */
2290 2904 : ret = context->methods->alloc(context, size, MCXT_ALLOC_HUGE);
2291 :
2292 : VALGRIND_MEMPOOL_ALLOC(context, ret, size);
2293 :
2294 2904 : return ret;
2295 : }
2296 :
2297 : /*
2298 : * repalloc_huge
2299 : * Adjust the size of a previously allocated chunk, permitting a large
2300 : * value. The previous allocation need not have been "huge".
2301 : */
2302 : void *
2303 104658 : repalloc_huge(void *pointer, Size size)
2304 : {
2305 : /* this one seems not worth its own implementation */
2306 104658 : return repalloc_extended(pointer, size, MCXT_ALLOC_HUGE);
2307 : }
2308 :
2309 : /*
2310 : * MemoryContextStrdup
2311 : * Like strdup(), but allocate from the specified context
2312 : */
2313 : char *
2314 90261960 : MemoryContextStrdup(MemoryContext context, const char *string)
2315 : {
2316 : char *nstr;
2317 90261960 : Size len = strlen(string) + 1;
2318 :
2319 90261960 : nstr = (char *) MemoryContextAlloc(context, len);
2320 :
2321 90261960 : memcpy(nstr, string, len);
2322 :
2323 90261960 : return nstr;
2324 : }
2325 :
2326 : char *
2327 86449326 : pstrdup(const char *in)
2328 : {
2329 86449326 : return MemoryContextStrdup(CurrentMemoryContext, in);
2330 : }
2331 :
2332 : /*
2333 : * pnstrdup
2334 : * Like pstrdup(), but append null byte to a
2335 : * not-necessarily-null-terminated input string.
2336 : */
2337 : char *
2338 1239200 : pnstrdup(const char *in, Size len)
2339 : {
2340 : char *out;
2341 :
2342 1239200 : len = strnlen(in, len);
2343 :
2344 1239200 : out = palloc(len + 1);
2345 1239200 : memcpy(out, in, len);
2346 1239200 : out[len] = '\0';
2347 :
2348 1239200 : return out;
2349 : }
2350 :
2351 : /*
2352 : * Make copy of string with all trailing newline characters removed.
2353 : */
2354 : char *
2355 414 : pchomp(const char *in)
2356 : {
2357 : size_t n;
2358 :
2359 414 : n = strlen(in);
2360 828 : while (n > 0 && in[n - 1] == '\n')
2361 414 : n--;
2362 414 : return pnstrdup(in, n);
2363 : }
|