Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * portalmem.c
4 : * backend portal memory management
5 : *
6 : * Portals are objects representing the execution state of a query.
7 : * This module provides memory management services for portals, but it
8 : * doesn't actually run the executor for them.
9 : *
10 : *
11 : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
12 : * Portions Copyright (c) 1994, Regents of the University of California
13 : *
14 : * IDENTIFICATION
15 : * src/backend/utils/mmgr/portalmem.c
16 : *
17 : *-------------------------------------------------------------------------
18 : */
19 : #include "postgres.h"
20 :
21 : #include "access/xact.h"
22 : #include "commands/portalcmds.h"
23 : #include "funcapi.h"
24 : #include "miscadmin.h"
25 : #include "storage/ipc.h"
26 : #include "utils/builtins.h"
27 : #include "utils/memutils.h"
28 : #include "utils/snapmgr.h"
29 : #include "utils/timestamp.h"
30 :
31 : /*
32 : * Estimate of the maximum number of open portals a user would have,
33 : * used in initially sizing the PortalHashTable in EnablePortalManager().
34 : * Since the hash table can expand, there's no need to make this overly
35 : * generous, and keeping it small avoids unnecessary overhead in the
36 : * hash_seq_search() calls executed during transaction end.
37 : */
38 : #define PORTALS_PER_USER 16
39 :
40 :
41 : /* ----------------
42 : * Global state
43 : * ----------------
44 : */
45 :
46 : #define MAX_PORTALNAME_LEN NAMEDATALEN
47 :
48 : typedef struct portalhashent
49 : {
50 : char portalname[MAX_PORTALNAME_LEN];
51 : Portal portal;
52 : } PortalHashEnt;
53 :
54 : static HTAB *PortalHashTable = NULL;
55 :
56 : #define PortalHashTableLookup(NAME, PORTAL) \
57 : do { \
58 : PortalHashEnt *hentry; \
59 : \
60 : hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
61 : (NAME), HASH_FIND, NULL); \
62 : if (hentry) \
63 : PORTAL = hentry->portal; \
64 : else \
65 : PORTAL = NULL; \
66 : } while(0)
67 :
68 : #define PortalHashTableInsert(PORTAL, NAME) \
69 : do { \
70 : PortalHashEnt *hentry; bool found; \
71 : \
72 : hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
73 : (NAME), HASH_ENTER, &found); \
74 : if (found) \
75 : elog(ERROR, "duplicate portal name"); \
76 : hentry->portal = PORTAL; \
77 : /* To avoid duplicate storage, make PORTAL->name point to htab entry */ \
78 : PORTAL->name = hentry->portalname; \
79 : } while(0)
80 :
81 : #define PortalHashTableDelete(PORTAL) \
82 : do { \
83 : PortalHashEnt *hentry; \
84 : \
85 : hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
86 : PORTAL->name, HASH_REMOVE, NULL); \
87 : if (hentry == NULL) \
88 : elog(WARNING, "trying to delete portal name that does not exist"); \
89 : } while(0)
90 :
91 : static MemoryContext TopPortalContext = NULL;
92 :
93 :
94 : /* ----------------------------------------------------------------
95 : * public portal interface functions
96 : * ----------------------------------------------------------------
97 : */
98 :
99 : /*
100 : * EnablePortalManager
101 : * Enables the portal management module at backend startup.
102 : */
103 : void
104 30676 : EnablePortalManager(void)
105 : {
106 : HASHCTL ctl;
107 :
108 : Assert(TopPortalContext == NULL);
109 :
110 30676 : TopPortalContext = AllocSetContextCreate(TopMemoryContext,
111 : "TopPortalContext",
112 : ALLOCSET_DEFAULT_SIZES);
113 :
114 30676 : ctl.keysize = MAX_PORTALNAME_LEN;
115 30676 : ctl.entrysize = sizeof(PortalHashEnt);
116 :
117 : /*
118 : * use PORTALS_PER_USER as a guess of how many hash table entries to
119 : * create, initially
120 : */
121 30676 : PortalHashTable = hash_create("Portal hash", PORTALS_PER_USER,
122 : &ctl, HASH_ELEM | HASH_STRINGS);
123 30676 : }
124 :
125 : /*
126 : * GetPortalByName
127 : * Returns a portal given a portal name, or NULL if name not found.
128 : */
129 : Portal
130 826378 : GetPortalByName(const char *name)
131 : {
132 : Portal portal;
133 :
134 826378 : if (PointerIsValid(name))
135 826378 : PortalHashTableLookup(name, portal);
136 : else
137 0 : portal = NULL;
138 :
139 826378 : return portal;
140 : }
141 :
142 : /*
143 : * PortalGetPrimaryStmt
144 : * Get the "primary" stmt within a portal, ie, the one marked canSetTag.
145 : *
146 : * Returns NULL if no such stmt. If multiple PlannedStmt structs within the
147 : * portal are marked canSetTag, returns the first one. Neither of these
148 : * cases should occur in present usages of this function.
149 : */
150 : PlannedStmt *
151 364614 : PortalGetPrimaryStmt(Portal portal)
152 : {
153 : ListCell *lc;
154 :
155 364614 : foreach(lc, portal->stmts)
156 : {
157 364614 : PlannedStmt *stmt = lfirst_node(PlannedStmt, lc);
158 :
159 364614 : if (stmt->canSetTag)
160 364614 : return stmt;
161 : }
162 0 : return NULL;
163 : }
164 :
165 : /*
166 : * CreatePortal
167 : * Returns a new portal given a name.
168 : *
169 : * allowDup: if true, automatically drop any pre-existing portal of the
170 : * same name (if false, an error is raised).
171 : *
172 : * dupSilent: if true, don't even emit a WARNING.
173 : */
174 : Portal
175 724608 : CreatePortal(const char *name, bool allowDup, bool dupSilent)
176 : {
177 : Portal portal;
178 :
179 : Assert(PointerIsValid(name));
180 :
181 724608 : portal = GetPortalByName(name);
182 724608 : if (PortalIsValid(portal))
183 : {
184 11800 : if (!allowDup)
185 0 : ereport(ERROR,
186 : (errcode(ERRCODE_DUPLICATE_CURSOR),
187 : errmsg("cursor \"%s\" already exists", name)));
188 11800 : if (!dupSilent)
189 0 : ereport(WARNING,
190 : (errcode(ERRCODE_DUPLICATE_CURSOR),
191 : errmsg("closing existing cursor \"%s\"",
192 : name)));
193 11800 : PortalDrop(portal, false);
194 : }
195 :
196 : /* make new portal structure */
197 724608 : portal = (Portal) MemoryContextAllocZero(TopPortalContext, sizeof *portal);
198 :
199 : /* initialize portal context; typically it won't store much */
200 724608 : portal->portalContext = AllocSetContextCreate(TopPortalContext,
201 : "PortalContext",
202 : ALLOCSET_SMALL_SIZES);
203 :
204 : /* create a resource owner for the portal */
205 724608 : portal->resowner = ResourceOwnerCreate(CurTransactionResourceOwner,
206 : "Portal");
207 :
208 : /* initialize portal fields that don't start off zero */
209 724608 : portal->status = PORTAL_NEW;
210 724608 : portal->cleanup = PortalCleanup;
211 724608 : portal->createSubid = GetCurrentSubTransactionId();
212 724608 : portal->activeSubid = portal->createSubid;
213 724608 : portal->createLevel = GetCurrentTransactionNestLevel();
214 724608 : portal->strategy = PORTAL_MULTI_QUERY;
215 724608 : portal->cursorOptions = CURSOR_OPT_NO_SCROLL;
216 724608 : portal->atStart = true;
217 724608 : portal->atEnd = true; /* disallow fetches until query is set */
218 724608 : portal->visible = true;
219 724608 : portal->creation_time = GetCurrentStatementStartTimestamp();
220 :
221 : /* put portal in table (sets portal->name) */
222 724608 : PortalHashTableInsert(portal, name);
223 :
224 : /* for named portals reuse portal->name copy */
225 724608 : MemoryContextSetIdentifier(portal->portalContext, portal->name[0] ? portal->name : "<unnamed>");
226 :
227 724608 : return portal;
228 : }
229 :
230 : /*
231 : * CreateNewPortal
232 : * Create a new portal, assigning it a random nonconflicting name.
233 : */
234 : Portal
235 27368 : CreateNewPortal(void)
236 : {
237 : static unsigned int unnamed_portal_count = 0;
238 :
239 : char portalname[MAX_PORTALNAME_LEN];
240 :
241 : /* Select a nonconflicting name */
242 : for (;;)
243 : {
244 27368 : unnamed_portal_count++;
245 27368 : sprintf(portalname, "<unnamed portal %u>", unnamed_portal_count);
246 27368 : if (GetPortalByName(portalname) == NULL)
247 27368 : break;
248 : }
249 :
250 27368 : return CreatePortal(portalname, false, false);
251 : }
252 :
253 : /*
254 : * PortalDefineQuery
255 : * A simple subroutine to establish a portal's query.
256 : *
257 : * Notes: as of PG 8.4, caller MUST supply a sourceText string; it is not
258 : * allowed anymore to pass NULL. (If you really don't have source text,
259 : * you can pass a constant string, perhaps "(query not available)".)
260 : *
261 : * commandTag shall be NULL if and only if the original query string
262 : * (before rewriting) was an empty string. Also, the passed commandTag must
263 : * be a pointer to a constant string, since it is not copied.
264 : *
265 : * If cplan is provided, then it is a cached plan containing the stmts, and
266 : * the caller must have done GetCachedPlan(), causing a refcount increment.
267 : * The refcount will be released when the portal is destroyed.
268 : *
269 : * If cplan is NULL, then it is the caller's responsibility to ensure that
270 : * the passed plan trees have adequate lifetime. Typically this is done by
271 : * copying them into the portal's context.
272 : *
273 : * The caller is also responsible for ensuring that the passed prepStmtName
274 : * (if not NULL) and sourceText have adequate lifetime.
275 : *
276 : * NB: this function mustn't do much beyond storing the passed values; in
277 : * particular don't do anything that risks elog(ERROR). If that were to
278 : * happen here before storing the cplan reference, we'd leak the plancache
279 : * refcount that the caller is trying to hand off to us.
280 : */
281 : void
282 724574 : PortalDefineQuery(Portal portal,
283 : const char *prepStmtName,
284 : const char *sourceText,
285 : CommandTag commandTag,
286 : List *stmts,
287 : CachedPlan *cplan,
288 : CachedPlanSource *plansource)
289 : {
290 : Assert(PortalIsValid(portal));
291 : Assert(portal->status == PORTAL_NEW);
292 :
293 : Assert(sourceText != NULL);
294 : Assert(commandTag != CMDTAG_UNKNOWN || stmts == NIL);
295 :
296 724574 : portal->prepStmtName = prepStmtName;
297 724574 : portal->sourceText = sourceText;
298 724574 : portal->qc.commandTag = commandTag;
299 724574 : portal->qc.nprocessed = 0;
300 724574 : portal->commandTag = commandTag;
301 724574 : portal->stmts = stmts;
302 724574 : portal->cplan = cplan;
303 724574 : portal->plansource = plansource;
304 724574 : portal->status = PORTAL_DEFINED;
305 724574 : }
306 :
307 : /*
308 : * PortalReleaseCachedPlan
309 : * Release a portal's reference to its cached plan, if any.
310 : */
311 : static void
312 753706 : PortalReleaseCachedPlan(Portal portal)
313 : {
314 753706 : if (portal->cplan)
315 : {
316 39168 : ReleaseCachedPlan(portal->cplan, NULL);
317 39168 : portal->cplan = NULL;
318 :
319 : /*
320 : * We must also clear portal->stmts which is now a dangling reference
321 : * to the cached plan's plan list. This protects any code that might
322 : * try to examine the Portal later.
323 : */
324 39168 : portal->stmts = NIL;
325 : }
326 753706 : }
327 :
328 : /*
329 : * PortalCreateHoldStore
330 : * Create the tuplestore for a portal.
331 : */
332 : void
333 47904 : PortalCreateHoldStore(Portal portal)
334 : {
335 : MemoryContext oldcxt;
336 :
337 : Assert(portal->holdContext == NULL);
338 : Assert(portal->holdStore == NULL);
339 : Assert(portal->holdSnapshot == NULL);
340 :
341 : /*
342 : * Create the memory context that is used for storage of the tuple set.
343 : * Note this is NOT a child of the portal's portalContext.
344 : */
345 47904 : portal->holdContext =
346 47904 : AllocSetContextCreate(TopPortalContext,
347 : "PortalHoldContext",
348 : ALLOCSET_DEFAULT_SIZES);
349 :
350 : /*
351 : * Create the tuple store, selecting cross-transaction temp files, and
352 : * enabling random access only if cursor requires scrolling.
353 : *
354 : * XXX: Should maintenance_work_mem be used for the portal size?
355 : */
356 47904 : oldcxt = MemoryContextSwitchTo(portal->holdContext);
357 :
358 47904 : portal->holdStore =
359 47904 : tuplestore_begin_heap(portal->cursorOptions & CURSOR_OPT_SCROLL,
360 : true, work_mem);
361 :
362 47904 : MemoryContextSwitchTo(oldcxt);
363 47904 : }
364 :
365 : /*
366 : * PinPortal
367 : * Protect a portal from dropping.
368 : *
369 : * A pinned portal is still unpinned and dropped at transaction or
370 : * subtransaction abort.
371 : */
372 : void
373 11930 : PinPortal(Portal portal)
374 : {
375 11930 : if (portal->portalPinned)
376 0 : elog(ERROR, "portal already pinned");
377 :
378 11930 : portal->portalPinned = true;
379 11930 : }
380 :
381 : void
382 11882 : UnpinPortal(Portal portal)
383 : {
384 11882 : if (!portal->portalPinned)
385 0 : elog(ERROR, "portal not pinned");
386 :
387 11882 : portal->portalPinned = false;
388 11882 : }
389 :
390 : /*
391 : * MarkPortalActive
392 : * Transition a portal from READY to ACTIVE state.
393 : *
394 : * NOTE: never set portal->status = PORTAL_ACTIVE directly; call this instead.
395 : */
396 : void
397 758976 : MarkPortalActive(Portal portal)
398 : {
399 : /* For safety, this is a runtime test not just an Assert */
400 758976 : if (portal->status != PORTAL_READY)
401 18 : ereport(ERROR,
402 : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
403 : errmsg("portal \"%s\" cannot be run", portal->name)));
404 : /* Perform the state transition */
405 758958 : portal->status = PORTAL_ACTIVE;
406 758958 : portal->activeSubid = GetCurrentSubTransactionId();
407 758958 : }
408 :
409 : /*
410 : * MarkPortalDone
411 : * Transition a portal from ACTIVE to DONE state.
412 : *
413 : * NOTE: never set portal->status = PORTAL_DONE directly; call this instead.
414 : */
415 : void
416 361910 : MarkPortalDone(Portal portal)
417 : {
418 : /* Perform the state transition */
419 : Assert(portal->status == PORTAL_ACTIVE);
420 361910 : portal->status = PORTAL_DONE;
421 :
422 : /*
423 : * Allow portalcmds.c to clean up the state it knows about. We might as
424 : * well do that now, since the portal can't be executed any more.
425 : *
426 : * In some cases involving execution of a ROLLBACK command in an already
427 : * aborted transaction, this is necessary, or we'd reach AtCleanup_Portals
428 : * with the cleanup hook still unexecuted.
429 : */
430 361910 : if (PointerIsValid(portal->cleanup))
431 : {
432 361848 : portal->cleanup(portal);
433 361848 : portal->cleanup = NULL;
434 : }
435 361910 : }
436 :
437 : /*
438 : * MarkPortalFailed
439 : * Transition a portal into FAILED state.
440 : *
441 : * NOTE: never set portal->status = PORTAL_FAILED directly; call this instead.
442 : */
443 : void
444 28792 : MarkPortalFailed(Portal portal)
445 : {
446 : /* Perform the state transition */
447 : Assert(portal->status != PORTAL_DONE);
448 28792 : portal->status = PORTAL_FAILED;
449 :
450 : /*
451 : * Allow portalcmds.c to clean up the state it knows about. We might as
452 : * well do that now, since the portal can't be executed any more.
453 : *
454 : * In some cases involving cleanup of an already aborted transaction, this
455 : * is necessary, or we'd reach AtCleanup_Portals with the cleanup hook
456 : * still unexecuted.
457 : */
458 28792 : if (PointerIsValid(portal->cleanup))
459 : {
460 28778 : portal->cleanup(portal);
461 28778 : portal->cleanup = NULL;
462 : }
463 28792 : }
464 :
465 : /*
466 : * PortalDrop
467 : * Destroy the portal.
468 : */
469 : void
470 724590 : PortalDrop(Portal portal, bool isTopCommit)
471 : {
472 : Assert(PortalIsValid(portal));
473 :
474 : /*
475 : * Don't allow dropping a pinned portal, it's still needed by whoever
476 : * pinned it.
477 : */
478 724590 : if (portal->portalPinned)
479 0 : ereport(ERROR,
480 : (errcode(ERRCODE_INVALID_CURSOR_STATE),
481 : errmsg("cannot drop pinned portal \"%s\"", portal->name)));
482 :
483 : /*
484 : * Not sure if the PORTAL_ACTIVE case can validly happen or not...
485 : */
486 724590 : if (portal->status == PORTAL_ACTIVE)
487 0 : ereport(ERROR,
488 : (errcode(ERRCODE_INVALID_CURSOR_STATE),
489 : errmsg("cannot drop active portal \"%s\"", portal->name)));
490 :
491 : /*
492 : * Allow portalcmds.c to clean up the state it knows about, in particular
493 : * shutting down the executor if still active. This step potentially runs
494 : * user-defined code so failure has to be expected. It's the cleanup
495 : * hook's responsibility to not try to do that more than once, in the case
496 : * that failure occurs and then we come back to drop the portal again
497 : * during transaction abort.
498 : *
499 : * Note: in most paths of control, this will have been done already in
500 : * MarkPortalDone or MarkPortalFailed. We're just making sure.
501 : */
502 724590 : if (PointerIsValid(portal->cleanup))
503 : {
504 333852 : portal->cleanup(portal);
505 333852 : portal->cleanup = NULL;
506 : }
507 :
508 : /* There shouldn't be an active snapshot anymore, except after error */
509 : Assert(portal->portalSnapshot == NULL || !isTopCommit);
510 :
511 : /*
512 : * Remove portal from hash table. Because we do this here, we will not
513 : * come back to try to remove the portal again if there's any error in the
514 : * subsequent steps. Better to leak a little memory than to get into an
515 : * infinite error-recovery loop.
516 : */
517 724590 : PortalHashTableDelete(portal);
518 :
519 : /* drop cached plan reference, if any */
520 724590 : PortalReleaseCachedPlan(portal);
521 :
522 : /*
523 : * If portal has a snapshot protecting its data, release that. This needs
524 : * a little care since the registration will be attached to the portal's
525 : * resowner; if the portal failed, we will already have released the
526 : * resowner (and the snapshot) during transaction abort.
527 : */
528 724590 : if (portal->holdSnapshot)
529 : {
530 41368 : if (portal->resowner)
531 41042 : UnregisterSnapshotFromOwner(portal->holdSnapshot,
532 : portal->resowner);
533 41368 : portal->holdSnapshot = NULL;
534 : }
535 :
536 : /*
537 : * Release any resources still attached to the portal. There are several
538 : * cases being covered here:
539 : *
540 : * Top transaction commit (indicated by isTopCommit): normally we should
541 : * do nothing here and let the regular end-of-transaction resource
542 : * releasing mechanism handle these resources too. However, if we have a
543 : * FAILED portal (eg, a cursor that got an error), we'd better clean up
544 : * its resources to avoid resource-leakage warning messages.
545 : *
546 : * Sub transaction commit: never comes here at all, since we don't kill
547 : * any portals in AtSubCommit_Portals().
548 : *
549 : * Main or sub transaction abort: we will do nothing here because
550 : * portal->resowner was already set NULL; the resources were already
551 : * cleaned up in transaction abort.
552 : *
553 : * Ordinary portal drop: must release resources. However, if the portal
554 : * is not FAILED then we do not release its locks. The locks become the
555 : * responsibility of the transaction's ResourceOwner (since it is the
556 : * parent of the portal's owner) and will be released when the transaction
557 : * eventually ends.
558 : */
559 724590 : if (portal->resowner &&
560 687356 : (!isTopCommit || portal->status == PORTAL_FAILED))
561 : {
562 676186 : bool isCommit = (portal->status != PORTAL_FAILED);
563 :
564 676186 : ResourceOwnerRelease(portal->resowner,
565 : RESOURCE_RELEASE_BEFORE_LOCKS,
566 : isCommit, false);
567 676186 : ResourceOwnerRelease(portal->resowner,
568 : RESOURCE_RELEASE_LOCKS,
569 : isCommit, false);
570 676186 : ResourceOwnerRelease(portal->resowner,
571 : RESOURCE_RELEASE_AFTER_LOCKS,
572 : isCommit, false);
573 676186 : ResourceOwnerDelete(portal->resowner);
574 : }
575 724590 : portal->resowner = NULL;
576 :
577 : /*
578 : * Delete tuplestore if present. We should do this even under error
579 : * conditions; since the tuplestore would have been using cross-
580 : * transaction storage, its temp files need to be explicitly deleted.
581 : */
582 724590 : if (portal->holdStore)
583 : {
584 : MemoryContext oldcontext;
585 :
586 47886 : oldcontext = MemoryContextSwitchTo(portal->holdContext);
587 47886 : tuplestore_end(portal->holdStore);
588 47886 : MemoryContextSwitchTo(oldcontext);
589 47886 : portal->holdStore = NULL;
590 : }
591 :
592 : /* delete tuplestore storage, if any */
593 724590 : if (portal->holdContext)
594 47886 : MemoryContextDelete(portal->holdContext);
595 :
596 : /* release subsidiary storage */
597 724590 : MemoryContextDelete(portal->portalContext);
598 :
599 : /* release portal struct (it's in TopPortalContext) */
600 724590 : pfree(portal);
601 724590 : }
602 :
603 : /*
604 : * Delete all declared cursors.
605 : *
606 : * Used by commands: CLOSE ALL, DISCARD ALL
607 : */
608 : void
609 18 : PortalHashTableDeleteAll(void)
610 : {
611 : HASH_SEQ_STATUS status;
612 : PortalHashEnt *hentry;
613 :
614 18 : if (PortalHashTable == NULL)
615 0 : return;
616 :
617 18 : hash_seq_init(&status, PortalHashTable);
618 72 : while ((hentry = hash_seq_search(&status)) != NULL)
619 : {
620 54 : Portal portal = hentry->portal;
621 :
622 : /* Can't close the active portal (the one running the command) */
623 54 : if (portal->status == PORTAL_ACTIVE)
624 30 : continue;
625 :
626 24 : PortalDrop(portal, false);
627 :
628 : /* Restart the iteration in case that led to other drops */
629 24 : hash_seq_term(&status);
630 24 : hash_seq_init(&status, PortalHashTable);
631 : }
632 : }
633 :
634 : /*
635 : * "Hold" a portal. Prepare it for access by later transactions.
636 : */
637 : static void
638 82 : HoldPortal(Portal portal)
639 : {
640 : /*
641 : * Note that PersistHoldablePortal() must release all resources used by
642 : * the portal that are local to the creating transaction.
643 : */
644 82 : PortalCreateHoldStore(portal);
645 82 : PersistHoldablePortal(portal);
646 :
647 : /* drop cached plan reference, if any */
648 78 : PortalReleaseCachedPlan(portal);
649 :
650 : /*
651 : * Any resources belonging to the portal will be released in the upcoming
652 : * transaction-wide cleanup; the portal will no longer have its own
653 : * resources.
654 : */
655 78 : portal->resowner = NULL;
656 :
657 : /*
658 : * Having successfully exported the holdable cursor, mark it as not
659 : * belonging to this transaction.
660 : */
661 78 : portal->createSubid = InvalidSubTransactionId;
662 78 : portal->activeSubid = InvalidSubTransactionId;
663 78 : portal->createLevel = 0;
664 78 : }
665 :
666 : /*
667 : * Pre-commit processing for portals.
668 : *
669 : * Holdable cursors created in this transaction need to be converted to
670 : * materialized form, since we are going to close down the executor and
671 : * release locks. Non-holdable portals created in this transaction are
672 : * simply removed. Portals remaining from prior transactions should be
673 : * left untouched.
674 : *
675 : * Returns true if any portals changed state (possibly causing user-defined
676 : * code to be run), false if not.
677 : */
678 : bool
679 766772 : PreCommit_Portals(bool isPrepare)
680 : {
681 766772 : bool result = false;
682 : HASH_SEQ_STATUS status;
683 : PortalHashEnt *hentry;
684 :
685 766772 : hash_seq_init(&status, PortalHashTable);
686 :
687 828974 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
688 : {
689 62202 : Portal portal = hentry->portal;
690 :
691 : /*
692 : * There should be no pinned portals anymore. Complain if someone
693 : * leaked one. Auto-held portals are allowed; we assume that whoever
694 : * pinned them is managing them.
695 : */
696 62202 : if (portal->portalPinned && !portal->autoHeld)
697 0 : elog(ERROR, "cannot commit while a portal is pinned");
698 :
699 : /*
700 : * Do not touch active portals --- this can only happen in the case of
701 : * a multi-transaction utility command, such as VACUUM, or a commit in
702 : * a procedure.
703 : *
704 : * Note however that any resource owner attached to such a portal is
705 : * still going to go away, so don't leave a dangling pointer. Also
706 : * unregister any snapshots held by the portal, mainly to avoid
707 : * snapshot leak warnings from ResourceOwnerRelease().
708 : */
709 62202 : if (portal->status == PORTAL_ACTIVE)
710 : {
711 50416 : if (portal->holdSnapshot)
712 : {
713 2 : if (portal->resowner)
714 2 : UnregisterSnapshotFromOwner(portal->holdSnapshot,
715 : portal->resowner);
716 2 : portal->holdSnapshot = NULL;
717 : }
718 50416 : portal->resowner = NULL;
719 : /* Clear portalSnapshot too, for cleanliness */
720 50416 : portal->portalSnapshot = NULL;
721 50416 : continue;
722 : }
723 :
724 : /* Is it a holdable portal created in the current xact? */
725 11786 : if ((portal->cursorOptions & CURSOR_OPT_HOLD) &&
726 492 : portal->createSubid != InvalidSubTransactionId &&
727 46 : portal->status == PORTAL_READY)
728 : {
729 : /*
730 : * We are exiting the transaction that created a holdable cursor.
731 : * Instead of dropping the portal, prepare it for access by later
732 : * transactions.
733 : *
734 : * However, if this is PREPARE TRANSACTION rather than COMMIT,
735 : * refuse PREPARE, because the semantics seem pretty unclear.
736 : */
737 46 : if (isPrepare)
738 0 : ereport(ERROR,
739 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
740 : errmsg("cannot PREPARE a transaction that has created a cursor WITH HOLD")));
741 :
742 46 : HoldPortal(portal);
743 :
744 : /* Report we changed state */
745 46 : result = true;
746 : }
747 11740 : else if (portal->createSubid == InvalidSubTransactionId)
748 : {
749 : /*
750 : * Do nothing to cursors held over from a previous transaction
751 : * (including ones we just froze in a previous cycle of this loop)
752 : */
753 530 : continue;
754 : }
755 : else
756 : {
757 : /* Zap all non-holdable portals */
758 11210 : PortalDrop(portal, true);
759 :
760 : /* Report we changed state */
761 11210 : result = true;
762 : }
763 :
764 : /*
765 : * After either freezing or dropping a portal, we have to restart the
766 : * iteration, because we could have invoked user-defined code that
767 : * caused a drop of the next portal in the hash chain.
768 : */
769 11256 : hash_seq_term(&status);
770 11256 : hash_seq_init(&status, PortalHashTable);
771 : }
772 :
773 766772 : return result;
774 : }
775 :
776 : /*
777 : * Abort processing for portals.
778 : *
779 : * At this point we run the cleanup hook if present, but we can't release the
780 : * portal's memory until the cleanup call.
781 : */
782 : void
783 48160 : AtAbort_Portals(void)
784 : {
785 : HASH_SEQ_STATUS status;
786 : PortalHashEnt *hentry;
787 :
788 48160 : hash_seq_init(&status, PortalHashTable);
789 :
790 77094 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
791 : {
792 28934 : Portal portal = hentry->portal;
793 :
794 : /*
795 : * When elog(FATAL) is progress, we need to set the active portal to
796 : * failed, so that PortalCleanup() doesn't run the executor shutdown.
797 : */
798 28934 : if (portal->status == PORTAL_ACTIVE && shmem_exit_inprogress)
799 8 : MarkPortalFailed(portal);
800 :
801 : /*
802 : * Do nothing else to cursors held over from a previous transaction.
803 : */
804 28934 : if (portal->createSubid == InvalidSubTransactionId)
805 136 : continue;
806 :
807 : /*
808 : * Do nothing to auto-held cursors. This is similar to the case of a
809 : * cursor from a previous transaction, but it could also be that the
810 : * cursor was auto-held in this transaction, so it wants to live on.
811 : */
812 28798 : if (portal->autoHeld)
813 0 : continue;
814 :
815 : /*
816 : * If it was created in the current transaction, we can't do normal
817 : * shutdown on a READY portal either; it might refer to objects
818 : * created in the failed transaction. See comments in
819 : * AtSubAbort_Portals.
820 : */
821 28798 : if (portal->status == PORTAL_READY)
822 288 : MarkPortalFailed(portal);
823 :
824 : /*
825 : * Allow portalcmds.c to clean up the state it knows about, if we
826 : * haven't already.
827 : */
828 28798 : if (PointerIsValid(portal->cleanup))
829 : {
830 112 : portal->cleanup(portal);
831 112 : portal->cleanup = NULL;
832 : }
833 :
834 : /* drop cached plan reference, if any */
835 28798 : PortalReleaseCachedPlan(portal);
836 :
837 : /*
838 : * Any resources belonging to the portal will be released in the
839 : * upcoming transaction-wide cleanup; they will be gone before we run
840 : * PortalDrop.
841 : */
842 28798 : portal->resowner = NULL;
843 :
844 : /*
845 : * Although we can't delete the portal data structure proper, we can
846 : * release any memory in subsidiary contexts, such as executor state.
847 : * The cleanup hook was the last thing that might have needed data
848 : * there. But leave active portals alone.
849 : */
850 28798 : if (portal->status != PORTAL_ACTIVE)
851 28606 : MemoryContextDeleteChildren(portal->portalContext);
852 : }
853 48160 : }
854 :
855 : /*
856 : * Post-abort cleanup for portals.
857 : *
858 : * Delete all portals not held over from prior transactions. */
859 : void
860 48138 : AtCleanup_Portals(void)
861 : {
862 : HASH_SEQ_STATUS status;
863 : PortalHashEnt *hentry;
864 :
865 48138 : hash_seq_init(&status, PortalHashTable);
866 :
867 75790 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
868 : {
869 27652 : Portal portal = hentry->portal;
870 :
871 : /*
872 : * Do not touch active portals --- this can only happen in the case of
873 : * a multi-transaction command.
874 : */
875 27652 : if (portal->status == PORTAL_ACTIVE)
876 192 : continue;
877 :
878 : /*
879 : * Do nothing to cursors held over from a previous transaction or
880 : * auto-held ones.
881 : */
882 27460 : if (portal->createSubid == InvalidSubTransactionId || portal->autoHeld)
883 : {
884 : Assert(portal->status != PORTAL_ACTIVE);
885 : Assert(portal->resowner == NULL);
886 136 : continue;
887 : }
888 :
889 : /*
890 : * If a portal is still pinned, forcibly unpin it. PortalDrop will not
891 : * let us drop the portal otherwise. Whoever pinned the portal was
892 : * interrupted by the abort too and won't try to use it anymore.
893 : */
894 27324 : if (portal->portalPinned)
895 38 : portal->portalPinned = false;
896 :
897 : /*
898 : * We had better not call any user-defined code during cleanup, so if
899 : * the cleanup hook hasn't been run yet, too bad; we'll just skip it.
900 : */
901 27324 : if (PointerIsValid(portal->cleanup))
902 : {
903 0 : elog(WARNING, "skipping cleanup for portal \"%s\"", portal->name);
904 0 : portal->cleanup = NULL;
905 : }
906 :
907 : /* Zap it. */
908 27324 : PortalDrop(portal, false);
909 : }
910 48138 : }
911 :
912 : /*
913 : * Portal-related cleanup when we return to the main loop on error.
914 : *
915 : * This is different from the cleanup at transaction abort. Auto-held portals
916 : * are cleaned up on error but not on transaction abort.
917 : */
918 : void
919 42520 : PortalErrorCleanup(void)
920 : {
921 : HASH_SEQ_STATUS status;
922 : PortalHashEnt *hentry;
923 :
924 42520 : hash_seq_init(&status, PortalHashTable);
925 :
926 44306 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
927 : {
928 1786 : Portal portal = hentry->portal;
929 :
930 1786 : if (portal->autoHeld)
931 : {
932 4 : portal->portalPinned = false;
933 4 : PortalDrop(portal, false);
934 : }
935 : }
936 42520 : }
937 :
938 : /*
939 : * Pre-subcommit processing for portals.
940 : *
941 : * Reassign portals created or used in the current subtransaction to the
942 : * parent subtransaction.
943 : */
944 : void
945 10728 : AtSubCommit_Portals(SubTransactionId mySubid,
946 : SubTransactionId parentSubid,
947 : int parentLevel,
948 : ResourceOwner parentXactOwner)
949 : {
950 : HASH_SEQ_STATUS status;
951 : PortalHashEnt *hentry;
952 :
953 10728 : hash_seq_init(&status, PortalHashTable);
954 :
955 20172 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
956 : {
957 9444 : Portal portal = hentry->portal;
958 :
959 9444 : if (portal->createSubid == mySubid)
960 : {
961 60 : portal->createSubid = parentSubid;
962 60 : portal->createLevel = parentLevel;
963 60 : if (portal->resowner)
964 60 : ResourceOwnerNewParent(portal->resowner, parentXactOwner);
965 : }
966 9444 : if (portal->activeSubid == mySubid)
967 220 : portal->activeSubid = parentSubid;
968 : }
969 10728 : }
970 :
971 : /*
972 : * Subtransaction abort handling for portals.
973 : *
974 : * Deactivate portals created or used during the failed subtransaction.
975 : * Note that per AtSubCommit_Portals, this will catch portals created/used
976 : * in descendants of the subtransaction too.
977 : *
978 : * We don't destroy any portals here; that's done in AtSubCleanup_Portals.
979 : */
980 : void
981 9298 : AtSubAbort_Portals(SubTransactionId mySubid,
982 : SubTransactionId parentSubid,
983 : ResourceOwner myXactOwner,
984 : ResourceOwner parentXactOwner)
985 : {
986 : HASH_SEQ_STATUS status;
987 : PortalHashEnt *hentry;
988 :
989 9298 : hash_seq_init(&status, PortalHashTable);
990 :
991 21462 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
992 : {
993 12164 : Portal portal = hentry->portal;
994 :
995 : /* Was it created in this subtransaction? */
996 12164 : if (portal->createSubid != mySubid)
997 : {
998 : /* No, but maybe it was used in this subtransaction? */
999 11924 : if (portal->activeSubid == mySubid)
1000 : {
1001 : /* Maintain activeSubid until the portal is removed */
1002 44 : portal->activeSubid = parentSubid;
1003 :
1004 : /*
1005 : * A MarkPortalActive() caller ran an upper-level portal in
1006 : * this subtransaction and left the portal ACTIVE. This can't
1007 : * happen, but force the portal into FAILED state for the same
1008 : * reasons discussed below.
1009 : *
1010 : * We assume we can get away without forcing upper-level READY
1011 : * portals to fail, even if they were run and then suspended.
1012 : * In theory a suspended upper-level portal could have
1013 : * acquired some references to objects that are about to be
1014 : * destroyed, but there should be sufficient defenses against
1015 : * such cases: the portal's original query cannot contain such
1016 : * references, and any references within, say, cached plans of
1017 : * PL/pgSQL functions are not from active queries and should
1018 : * be protected by revalidation logic.
1019 : */
1020 44 : if (portal->status == PORTAL_ACTIVE)
1021 0 : MarkPortalFailed(portal);
1022 :
1023 : /*
1024 : * Also, if we failed it during the current subtransaction
1025 : * (either just above, or earlier), reattach its resource
1026 : * owner to the current subtransaction's resource owner, so
1027 : * that any resources it still holds will be released while
1028 : * cleaning up this subtransaction. This prevents some corner
1029 : * cases wherein we might get Asserts or worse while cleaning
1030 : * up objects created during the current subtransaction
1031 : * (because they're still referenced within this portal).
1032 : */
1033 44 : if (portal->status == PORTAL_FAILED && portal->resowner)
1034 : {
1035 14 : ResourceOwnerNewParent(portal->resowner, myXactOwner);
1036 14 : portal->resowner = NULL;
1037 : }
1038 : }
1039 : /* Done if it wasn't created in this subtransaction */
1040 11924 : continue;
1041 : }
1042 :
1043 : /*
1044 : * Force any live portals of my own subtransaction into FAILED state.
1045 : * We have to do this because they might refer to objects created or
1046 : * changed in the failed subtransaction, leading to crashes within
1047 : * ExecutorEnd when portalcmds.c tries to close down the portal.
1048 : * Currently, every MarkPortalActive() caller ensures it updates the
1049 : * portal status again before relinquishing control, so ACTIVE can't
1050 : * happen here. If it does happen, dispose the portal like existing
1051 : * MarkPortalActive() callers would.
1052 : */
1053 240 : if (portal->status == PORTAL_READY ||
1054 228 : portal->status == PORTAL_ACTIVE)
1055 12 : MarkPortalFailed(portal);
1056 :
1057 : /*
1058 : * Allow portalcmds.c to clean up the state it knows about, if we
1059 : * haven't already.
1060 : */
1061 240 : if (PointerIsValid(portal->cleanup))
1062 : {
1063 0 : portal->cleanup(portal);
1064 0 : portal->cleanup = NULL;
1065 : }
1066 :
1067 : /* drop cached plan reference, if any */
1068 240 : PortalReleaseCachedPlan(portal);
1069 :
1070 : /*
1071 : * Any resources belonging to the portal will be released in the
1072 : * upcoming transaction-wide cleanup; they will be gone before we run
1073 : * PortalDrop.
1074 : */
1075 240 : portal->resowner = NULL;
1076 :
1077 : /*
1078 : * Although we can't delete the portal data structure proper, we can
1079 : * release any memory in subsidiary contexts, such as executor state.
1080 : * The cleanup hook was the last thing that might have needed data
1081 : * there.
1082 : */
1083 240 : MemoryContextDeleteChildren(portal->portalContext);
1084 : }
1085 9298 : }
1086 :
1087 : /*
1088 : * Post-subabort cleanup for portals.
1089 : *
1090 : * Drop all portals created in the failed subtransaction (but note that
1091 : * we will not drop any that were reassigned to the parent above).
1092 : */
1093 : void
1094 9298 : AtSubCleanup_Portals(SubTransactionId mySubid)
1095 : {
1096 : HASH_SEQ_STATUS status;
1097 : PortalHashEnt *hentry;
1098 :
1099 9298 : hash_seq_init(&status, PortalHashTable);
1100 :
1101 21236 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1102 : {
1103 11938 : Portal portal = hentry->portal;
1104 :
1105 11938 : if (portal->createSubid != mySubid)
1106 11924 : continue;
1107 :
1108 : /*
1109 : * If a portal is still pinned, forcibly unpin it. PortalDrop will not
1110 : * let us drop the portal otherwise. Whoever pinned the portal was
1111 : * interrupted by the abort too and won't try to use it anymore.
1112 : */
1113 14 : if (portal->portalPinned)
1114 6 : portal->portalPinned = false;
1115 :
1116 : /*
1117 : * We had better not call any user-defined code during cleanup, so if
1118 : * the cleanup hook hasn't been run yet, too bad; we'll just skip it.
1119 : */
1120 14 : if (PointerIsValid(portal->cleanup))
1121 : {
1122 0 : elog(WARNING, "skipping cleanup for portal \"%s\"", portal->name);
1123 0 : portal->cleanup = NULL;
1124 : }
1125 :
1126 : /* Zap it. */
1127 14 : PortalDrop(portal, false);
1128 : }
1129 9298 : }
1130 :
1131 : /* Find all available cursors */
1132 : Datum
1133 120 : pg_cursor(PG_FUNCTION_ARGS)
1134 : {
1135 120 : ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
1136 : HASH_SEQ_STATUS hash_seq;
1137 : PortalHashEnt *hentry;
1138 :
1139 : /*
1140 : * We put all the tuples into a tuplestore in one scan of the hashtable.
1141 : * This avoids any issue of the hashtable possibly changing between calls.
1142 : */
1143 120 : InitMaterializedSRF(fcinfo, 0);
1144 :
1145 120 : hash_seq_init(&hash_seq, PortalHashTable);
1146 372 : while ((hentry = hash_seq_search(&hash_seq)) != NULL)
1147 : {
1148 252 : Portal portal = hentry->portal;
1149 : Datum values[6];
1150 252 : bool nulls[6] = {0};
1151 :
1152 : /* report only "visible" entries */
1153 252 : if (!portal->visible)
1154 126 : continue;
1155 : /* also ignore it if PortalDefineQuery hasn't been called yet */
1156 126 : if (!portal->sourceText)
1157 0 : continue;
1158 :
1159 126 : values[0] = CStringGetTextDatum(portal->name);
1160 126 : values[1] = CStringGetTextDatum(portal->sourceText);
1161 126 : values[2] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_HOLD);
1162 126 : values[3] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_BINARY);
1163 126 : values[4] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_SCROLL);
1164 126 : values[5] = TimestampTzGetDatum(portal->creation_time);
1165 :
1166 126 : tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls);
1167 : }
1168 :
1169 120 : return (Datum) 0;
1170 : }
1171 :
1172 : bool
1173 60 : ThereAreNoReadyPortals(void)
1174 : {
1175 : HASH_SEQ_STATUS status;
1176 : PortalHashEnt *hentry;
1177 :
1178 60 : hash_seq_init(&status, PortalHashTable);
1179 :
1180 120 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1181 : {
1182 60 : Portal portal = hentry->portal;
1183 :
1184 60 : if (portal->status == PORTAL_READY)
1185 0 : return false;
1186 : }
1187 :
1188 60 : return true;
1189 : }
1190 :
1191 : /*
1192 : * Hold all pinned portals.
1193 : *
1194 : * When initiating a COMMIT or ROLLBACK inside a procedure, this must be
1195 : * called to protect internally-generated cursors from being dropped during
1196 : * the transaction shutdown. Currently, SPI calls this automatically; PLs
1197 : * that initiate COMMIT or ROLLBACK some other way are on the hook to do it
1198 : * themselves. (Note that we couldn't do this in, say, AtAbort_Portals
1199 : * because we need to run user-defined code while persisting a portal.
1200 : * It's too late to do that once transaction abort has started.)
1201 : *
1202 : * We protect such portals by converting them to held cursors. We mark them
1203 : * as "auto-held" so that exception exit knows to clean them up. (In normal,
1204 : * non-exception code paths, the PL needs to clean such portals itself, since
1205 : * transaction end won't do it anymore; but that should be normal practice
1206 : * anyway.)
1207 : */
1208 : void
1209 4426 : HoldPinnedPortals(void)
1210 : {
1211 : HASH_SEQ_STATUS status;
1212 : PortalHashEnt *hentry;
1213 :
1214 4426 : hash_seq_init(&status, PortalHashTable);
1215 :
1216 8972 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1217 : {
1218 4552 : Portal portal = hentry->portal;
1219 :
1220 4552 : if (portal->portalPinned && !portal->autoHeld)
1221 : {
1222 : /*
1223 : * Doing transaction control, especially abort, inside a cursor
1224 : * loop that is not read-only, for example using UPDATE ...
1225 : * RETURNING, has weird semantics issues. Also, this
1226 : * implementation wouldn't work, because such portals cannot be
1227 : * held. (The core grammar enforces that only SELECT statements
1228 : * can drive a cursor, but for example PL/pgSQL does not restrict
1229 : * it.)
1230 : */
1231 38 : if (portal->strategy != PORTAL_ONE_SELECT)
1232 2 : ereport(ERROR,
1233 : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1234 : errmsg("cannot perform transaction commands inside a cursor loop that is not read-only")));
1235 :
1236 : /* Verify it's in a suitable state to be held */
1237 36 : if (portal->status != PORTAL_READY)
1238 0 : elog(ERROR, "pinned portal is not ready to be auto-held");
1239 :
1240 36 : HoldPortal(portal);
1241 32 : portal->autoHeld = true;
1242 : }
1243 : }
1244 4420 : }
1245 :
1246 : /*
1247 : * Drop the outer active snapshots for all portals, so that no snapshots
1248 : * remain active.
1249 : *
1250 : * Like HoldPinnedPortals, this must be called when initiating a COMMIT or
1251 : * ROLLBACK inside a procedure. This has to be separate from that since it
1252 : * should not be run until we're done with steps that are likely to fail.
1253 : *
1254 : * It's tempting to fold this into PreCommit_Portals, but to do so, we'd
1255 : * need to clean up snapshot management in VACUUM and perhaps other places.
1256 : */
1257 : void
1258 4420 : ForgetPortalSnapshots(void)
1259 : {
1260 : HASH_SEQ_STATUS status;
1261 : PortalHashEnt *hentry;
1262 4420 : int numPortalSnaps = 0;
1263 4420 : int numActiveSnaps = 0;
1264 :
1265 : /* First, scan PortalHashTable and clear portalSnapshot fields */
1266 4420 : hash_seq_init(&status, PortalHashTable);
1267 :
1268 8966 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1269 : {
1270 4546 : Portal portal = hentry->portal;
1271 :
1272 4546 : if (portal->portalSnapshot != NULL)
1273 : {
1274 4420 : portal->portalSnapshot = NULL;
1275 4420 : numPortalSnaps++;
1276 : }
1277 : /* portal->holdSnapshot will be cleaned up in PreCommit_Portals */
1278 : }
1279 :
1280 : /*
1281 : * Now, pop all the active snapshots, which should be just those that were
1282 : * portal snapshots. Ideally we'd drive this directly off the portal
1283 : * scan, but there's no good way to visit the portals in the correct
1284 : * order. So just cross-check after the fact.
1285 : */
1286 8840 : while (ActiveSnapshotSet())
1287 : {
1288 4420 : PopActiveSnapshot();
1289 4420 : numActiveSnaps++;
1290 : }
1291 :
1292 4420 : if (numPortalSnaps != numActiveSnaps)
1293 0 : elog(ERROR, "portal snapshots (%d) did not account for all active snapshots (%d)",
1294 : numPortalSnaps, numActiveSnaps);
1295 4420 : }
|