Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * portalmem.c
4 : * backend portal memory management
5 : *
6 : * Portals are objects representing the execution state of a query.
7 : * This module provides memory management services for portals, but it
8 : * doesn't actually run the executor for them.
9 : *
10 : *
11 : * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
12 : * Portions Copyright (c) 1994, Regents of the University of California
13 : *
14 : * IDENTIFICATION
15 : * src/backend/utils/mmgr/portalmem.c
16 : *
17 : *-------------------------------------------------------------------------
18 : */
19 : #include "postgres.h"
20 :
21 : #include "access/xact.h"
22 : #include "commands/portalcmds.h"
23 : #include "funcapi.h"
24 : #include "miscadmin.h"
25 : #include "storage/ipc.h"
26 : #include "utils/builtins.h"
27 : #include "utils/memutils.h"
28 : #include "utils/snapmgr.h"
29 : #include "utils/timestamp.h"
30 :
31 : /*
32 : * Estimate of the maximum number of open portals a user would have,
33 : * used in initially sizing the PortalHashTable in EnablePortalManager().
34 : * Since the hash table can expand, there's no need to make this overly
35 : * generous, and keeping it small avoids unnecessary overhead in the
36 : * hash_seq_search() calls executed during transaction end.
37 : */
38 : #define PORTALS_PER_USER 16
39 :
40 :
41 : /* ----------------
42 : * Global state
43 : * ----------------
44 : */
45 :
46 : #define MAX_PORTALNAME_LEN NAMEDATALEN
47 :
48 : typedef struct portalhashent
49 : {
50 : char portalname[MAX_PORTALNAME_LEN];
51 : Portal portal;
52 : } PortalHashEnt;
53 :
54 : static HTAB *PortalHashTable = NULL;
55 :
56 : #define PortalHashTableLookup(NAME, PORTAL) \
57 : do { \
58 : PortalHashEnt *hentry; \
59 : \
60 : hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
61 : (NAME), HASH_FIND, NULL); \
62 : if (hentry) \
63 : PORTAL = hentry->portal; \
64 : else \
65 : PORTAL = NULL; \
66 : } while(0)
67 :
68 : #define PortalHashTableInsert(PORTAL, NAME) \
69 : do { \
70 : PortalHashEnt *hentry; bool found; \
71 : \
72 : hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
73 : (NAME), HASH_ENTER, &found); \
74 : if (found) \
75 : elog(ERROR, "duplicate portal name"); \
76 : hentry->portal = PORTAL; \
77 : /* To avoid duplicate storage, make PORTAL->name point to htab entry */ \
78 : PORTAL->name = hentry->portalname; \
79 : } while(0)
80 :
81 : #define PortalHashTableDelete(PORTAL) \
82 : do { \
83 : PortalHashEnt *hentry; \
84 : \
85 : hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
86 : PORTAL->name, HASH_REMOVE, NULL); \
87 : if (hentry == NULL) \
88 : elog(WARNING, "trying to delete portal name that does not exist"); \
89 : } while(0)
90 :
91 : static MemoryContext TopPortalContext = NULL;
92 :
93 :
94 : /* ----------------------------------------------------------------
95 : * public portal interface functions
96 : * ----------------------------------------------------------------
97 : */
98 :
99 : /*
100 : * EnablePortalManager
101 : * Enables the portal management module at backend startup.
102 : */
103 : void
104 36750 : EnablePortalManager(void)
105 : {
106 : HASHCTL ctl;
107 :
108 : Assert(TopPortalContext == NULL);
109 :
110 36750 : TopPortalContext = AllocSetContextCreate(TopMemoryContext,
111 : "TopPortalContext",
112 : ALLOCSET_DEFAULT_SIZES);
113 :
114 36750 : ctl.keysize = MAX_PORTALNAME_LEN;
115 36750 : ctl.entrysize = sizeof(PortalHashEnt);
116 :
117 : /*
118 : * use PORTALS_PER_USER as a guess of how many hash table entries to
119 : * create, initially
120 : */
121 36750 : PortalHashTable = hash_create("Portal hash", PORTALS_PER_USER,
122 : &ctl, HASH_ELEM | HASH_STRINGS);
123 36750 : }
124 :
125 : /*
126 : * GetPortalByName
127 : * Returns a portal given a portal name, or NULL if name not found.
128 : */
129 : Portal
130 894736 : GetPortalByName(const char *name)
131 : {
132 : Portal portal;
133 :
134 894736 : if (name)
135 894736 : PortalHashTableLookup(name, portal);
136 : else
137 0 : portal = NULL;
138 :
139 894736 : return portal;
140 : }
141 :
142 : /*
143 : * PortalGetPrimaryStmt
144 : * Get the "primary" stmt within a portal, ie, the one marked canSetTag.
145 : *
146 : * Returns NULL if no such stmt. If multiple PlannedStmt structs within the
147 : * portal are marked canSetTag, returns the first one. Neither of these
148 : * cases should occur in present usages of this function.
149 : */
150 : PlannedStmt *
151 377074 : PortalGetPrimaryStmt(Portal portal)
152 : {
153 : ListCell *lc;
154 :
155 377074 : foreach(lc, portal->stmts)
156 : {
157 377074 : PlannedStmt *stmt = lfirst_node(PlannedStmt, lc);
158 :
159 377074 : if (stmt->canSetTag)
160 377074 : return stmt;
161 : }
162 0 : return NULL;
163 : }
164 :
165 : /*
166 : * CreatePortal
167 : * Returns a new portal given a name.
168 : *
169 : * allowDup: if true, automatically drop any pre-existing portal of the
170 : * same name (if false, an error is raised).
171 : *
172 : * dupSilent: if true, don't even emit a WARNING.
173 : */
174 : Portal
175 783860 : CreatePortal(const char *name, bool allowDup, bool dupSilent)
176 : {
177 : Portal portal;
178 :
179 : Assert(name);
180 :
181 783860 : portal = GetPortalByName(name);
182 783860 : if (PortalIsValid(portal))
183 : {
184 12036 : if (!allowDup)
185 0 : ereport(ERROR,
186 : (errcode(ERRCODE_DUPLICATE_CURSOR),
187 : errmsg("cursor \"%s\" already exists", name)));
188 12036 : if (!dupSilent)
189 0 : ereport(WARNING,
190 : (errcode(ERRCODE_DUPLICATE_CURSOR),
191 : errmsg("closing existing cursor \"%s\"",
192 : name)));
193 12036 : PortalDrop(portal, false);
194 : }
195 :
196 : /* make new portal structure */
197 783860 : portal = (Portal) MemoryContextAllocZero(TopPortalContext, sizeof *portal);
198 :
199 : /* initialize portal context; typically it won't store much */
200 783860 : portal->portalContext = AllocSetContextCreate(TopPortalContext,
201 : "PortalContext",
202 : ALLOCSET_SMALL_SIZES);
203 :
204 : /* create a resource owner for the portal */
205 783860 : portal->resowner = ResourceOwnerCreate(CurTransactionResourceOwner,
206 : "Portal");
207 :
208 : /* initialize portal fields that don't start off zero */
209 783860 : portal->status = PORTAL_NEW;
210 783860 : portal->cleanup = PortalCleanup;
211 783860 : portal->createSubid = GetCurrentSubTransactionId();
212 783860 : portal->activeSubid = portal->createSubid;
213 783860 : portal->createLevel = GetCurrentTransactionNestLevel();
214 783860 : portal->strategy = PORTAL_MULTI_QUERY;
215 783860 : portal->cursorOptions = CURSOR_OPT_NO_SCROLL;
216 783860 : portal->atStart = true;
217 783860 : portal->atEnd = true; /* disallow fetches until query is set */
218 783860 : portal->visible = true;
219 783860 : portal->creation_time = GetCurrentStatementStartTimestamp();
220 :
221 : /* put portal in table (sets portal->name) */
222 783860 : PortalHashTableInsert(portal, name);
223 :
224 : /* for named portals reuse portal->name copy */
225 783860 : MemoryContextSetIdentifier(portal->portalContext, portal->name[0] ? portal->name : "<unnamed>");
226 :
227 783860 : return portal;
228 : }
229 :
230 : /*
231 : * CreateNewPortal
232 : * Create a new portal, assigning it a random nonconflicting name.
233 : */
234 : Portal
235 28514 : CreateNewPortal(void)
236 : {
237 : static unsigned int unnamed_portal_count = 0;
238 :
239 : char portalname[MAX_PORTALNAME_LEN];
240 :
241 : /* Select a nonconflicting name */
242 : for (;;)
243 : {
244 28514 : unnamed_portal_count++;
245 28514 : sprintf(portalname, "<unnamed portal %u>", unnamed_portal_count);
246 28514 : if (GetPortalByName(portalname) == NULL)
247 28514 : break;
248 : }
249 :
250 28514 : return CreatePortal(portalname, false, false);
251 : }
252 :
253 : /*
254 : * PortalDefineQuery
255 : * A simple subroutine to establish a portal's query.
256 : *
257 : * Notes: as of PG 8.4, caller MUST supply a sourceText string; it is not
258 : * allowed anymore to pass NULL. (If you really don't have source text,
259 : * you can pass a constant string, perhaps "(query not available)".)
260 : *
261 : * commandTag shall be NULL if and only if the original query string
262 : * (before rewriting) was an empty string. Also, the passed commandTag must
263 : * be a pointer to a constant string, since it is not copied.
264 : *
265 : * If cplan is provided, then it is a cached plan containing the stmts, and
266 : * the caller must have done GetCachedPlan(), causing a refcount increment.
267 : * The refcount will be released when the portal is destroyed.
268 : *
269 : * If cplan is NULL, then it is the caller's responsibility to ensure that
270 : * the passed plan trees have adequate lifetime. Typically this is done by
271 : * copying them into the portal's context.
272 : *
273 : * The caller is also responsible for ensuring that the passed prepStmtName
274 : * (if not NULL) and sourceText have adequate lifetime.
275 : *
276 : * NB: this function mustn't do much beyond storing the passed values; in
277 : * particular don't do anything that risks elog(ERROR). If that were to
278 : * happen here before storing the cplan reference, we'd leak the plancache
279 : * refcount that the caller is trying to hand off to us.
280 : */
281 : void
282 783826 : PortalDefineQuery(Portal portal,
283 : const char *prepStmtName,
284 : const char *sourceText,
285 : CommandTag commandTag,
286 : List *stmts,
287 : CachedPlan *cplan)
288 : {
289 : Assert(PortalIsValid(portal));
290 : Assert(portal->status == PORTAL_NEW);
291 :
292 : Assert(sourceText != NULL);
293 : Assert(commandTag != CMDTAG_UNKNOWN || stmts == NIL);
294 :
295 783826 : portal->prepStmtName = prepStmtName;
296 783826 : portal->sourceText = sourceText;
297 783826 : portal->commandTag = commandTag;
298 783826 : SetQueryCompletion(&portal->qc, commandTag, 0);
299 783826 : portal->stmts = stmts;
300 783826 : portal->cplan = cplan;
301 783826 : portal->status = PORTAL_DEFINED;
302 783826 : }
303 :
304 : /*
305 : * PortalReleaseCachedPlan
306 : * Release a portal's reference to its cached plan, if any.
307 : */
308 : static void
309 816088 : PortalReleaseCachedPlan(Portal portal)
310 : {
311 816088 : if (portal->cplan)
312 : {
313 41024 : ReleaseCachedPlan(portal->cplan, NULL);
314 41024 : portal->cplan = NULL;
315 :
316 : /*
317 : * We must also clear portal->stmts which is now a dangling reference
318 : * to the cached plan's plan list. This protects any code that might
319 : * try to examine the Portal later.
320 : */
321 41024 : portal->stmts = NIL;
322 : }
323 816088 : }
324 :
325 : /*
326 : * PortalCreateHoldStore
327 : * Create the tuplestore for a portal.
328 : */
329 : void
330 50932 : PortalCreateHoldStore(Portal portal)
331 : {
332 : MemoryContext oldcxt;
333 :
334 : Assert(portal->holdContext == NULL);
335 : Assert(portal->holdStore == NULL);
336 : Assert(portal->holdSnapshot == NULL);
337 :
338 : /*
339 : * Create the memory context that is used for storage of the tuple set.
340 : * Note this is NOT a child of the portal's portalContext.
341 : */
342 50932 : portal->holdContext =
343 50932 : AllocSetContextCreate(TopPortalContext,
344 : "PortalHoldContext",
345 : ALLOCSET_DEFAULT_SIZES);
346 :
347 : /*
348 : * Create the tuple store, selecting cross-transaction temp files, and
349 : * enabling random access only if cursor requires scrolling.
350 : *
351 : * XXX: Should maintenance_work_mem be used for the portal size?
352 : */
353 50932 : oldcxt = MemoryContextSwitchTo(portal->holdContext);
354 :
355 50932 : portal->holdStore =
356 50932 : tuplestore_begin_heap(portal->cursorOptions & CURSOR_OPT_SCROLL,
357 : true, work_mem);
358 :
359 50932 : MemoryContextSwitchTo(oldcxt);
360 50932 : }
361 :
362 : /*
363 : * PinPortal
364 : * Protect a portal from dropping.
365 : *
366 : * A pinned portal is still unpinned and dropped at transaction or
367 : * subtransaction abort.
368 : */
369 : void
370 12124 : PinPortal(Portal portal)
371 : {
372 12124 : if (portal->portalPinned)
373 0 : elog(ERROR, "portal already pinned");
374 :
375 12124 : portal->portalPinned = true;
376 12124 : }
377 :
378 : void
379 12076 : UnpinPortal(Portal portal)
380 : {
381 12076 : if (!portal->portalPinned)
382 0 : elog(ERROR, "portal not pinned");
383 :
384 12076 : portal->portalPinned = false;
385 12076 : }
386 :
387 : /*
388 : * MarkPortalActive
389 : * Transition a portal from READY to ACTIVE state.
390 : *
391 : * NOTE: never set portal->status = PORTAL_ACTIVE directly; call this instead.
392 : */
393 : void
394 818706 : MarkPortalActive(Portal portal)
395 : {
396 : /* For safety, this is a runtime test not just an Assert */
397 818706 : if (portal->status != PORTAL_READY)
398 18 : ereport(ERROR,
399 : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
400 : errmsg("portal \"%s\" cannot be run", portal->name)));
401 : /* Perform the state transition */
402 818688 : portal->status = PORTAL_ACTIVE;
403 818688 : portal->activeSubid = GetCurrentSubTransactionId();
404 818688 : }
405 :
406 : /*
407 : * MarkPortalDone
408 : * Transition a portal from ACTIVE to DONE state.
409 : *
410 : * NOTE: never set portal->status = PORTAL_DONE directly; call this instead.
411 : */
412 : void
413 408042 : MarkPortalDone(Portal portal)
414 : {
415 : /* Perform the state transition */
416 : Assert(portal->status == PORTAL_ACTIVE);
417 408042 : portal->status = PORTAL_DONE;
418 :
419 : /*
420 : * Allow portalcmds.c to clean up the state it knows about. We might as
421 : * well do that now, since the portal can't be executed any more.
422 : *
423 : * In some cases involving execution of a ROLLBACK command in an already
424 : * aborted transaction, this is necessary, or we'd reach AtCleanup_Portals
425 : * with the cleanup hook still unexecuted.
426 : */
427 408042 : if (portal->cleanup)
428 : {
429 407974 : portal->cleanup(portal);
430 407974 : portal->cleanup = NULL;
431 : }
432 408042 : }
433 :
434 : /*
435 : * MarkPortalFailed
436 : * Transition a portal into FAILED state.
437 : *
438 : * NOTE: never set portal->status = PORTAL_FAILED directly; call this instead.
439 : */
440 : void
441 31912 : MarkPortalFailed(Portal portal)
442 : {
443 : /* Perform the state transition */
444 : Assert(portal->status != PORTAL_DONE);
445 31912 : portal->status = PORTAL_FAILED;
446 :
447 : /*
448 : * Allow portalcmds.c to clean up the state it knows about. We might as
449 : * well do that now, since the portal can't be executed any more.
450 : *
451 : * In some cases involving cleanup of an already aborted transaction, this
452 : * is necessary, or we'd reach AtCleanup_Portals with the cleanup hook
453 : * still unexecuted.
454 : */
455 31912 : if (portal->cleanup)
456 : {
457 31898 : portal->cleanup(portal);
458 31898 : portal->cleanup = NULL;
459 : }
460 31912 : }
461 :
462 : /*
463 : * PortalDrop
464 : * Destroy the portal.
465 : */
466 : void
467 783842 : PortalDrop(Portal portal, bool isTopCommit)
468 : {
469 : Assert(PortalIsValid(portal));
470 :
471 : /*
472 : * Don't allow dropping a pinned portal, it's still needed by whoever
473 : * pinned it.
474 : */
475 783842 : if (portal->portalPinned)
476 0 : ereport(ERROR,
477 : (errcode(ERRCODE_INVALID_CURSOR_STATE),
478 : errmsg("cannot drop pinned portal \"%s\"", portal->name)));
479 :
480 : /*
481 : * Not sure if the PORTAL_ACTIVE case can validly happen or not...
482 : */
483 783842 : if (portal->status == PORTAL_ACTIVE)
484 0 : ereport(ERROR,
485 : (errcode(ERRCODE_INVALID_CURSOR_STATE),
486 : errmsg("cannot drop active portal \"%s\"", portal->name)));
487 :
488 : /*
489 : * Allow portalcmds.c to clean up the state it knows about, in particular
490 : * shutting down the executor if still active. This step potentially runs
491 : * user-defined code so failure has to be expected. It's the cleanup
492 : * hook's responsibility to not try to do that more than once, in the case
493 : * that failure occurs and then we come back to drop the portal again
494 : * during transaction abort.
495 : *
496 : * Note: in most paths of control, this will have been done already in
497 : * MarkPortalDone or MarkPortalFailed. We're just making sure.
498 : */
499 783842 : if (portal->cleanup)
500 : {
501 343852 : portal->cleanup(portal);
502 343852 : portal->cleanup = NULL;
503 : }
504 :
505 : /* There shouldn't be an active snapshot anymore, except after error */
506 : Assert(portal->portalSnapshot == NULL || !isTopCommit);
507 :
508 : /*
509 : * Remove portal from hash table. Because we do this here, we will not
510 : * come back to try to remove the portal again if there's any error in the
511 : * subsequent steps. Better to leak a little memory than to get into an
512 : * infinite error-recovery loop.
513 : */
514 783842 : PortalHashTableDelete(portal);
515 :
516 : /* drop cached plan reference, if any */
517 783842 : PortalReleaseCachedPlan(portal);
518 :
519 : /*
520 : * If portal has a snapshot protecting its data, release that. This needs
521 : * a little care since the registration will be attached to the portal's
522 : * resowner; if the portal failed, we will already have released the
523 : * resowner (and the snapshot) during transaction abort.
524 : */
525 783842 : if (portal->holdSnapshot)
526 : {
527 44028 : if (portal->resowner)
528 43664 : UnregisterSnapshotFromOwner(portal->holdSnapshot,
529 : portal->resowner);
530 44028 : portal->holdSnapshot = NULL;
531 : }
532 :
533 : /*
534 : * Release any resources still attached to the portal. There are several
535 : * cases being covered here:
536 : *
537 : * Top transaction commit (indicated by isTopCommit): normally we should
538 : * do nothing here and let the regular end-of-transaction resource
539 : * releasing mechanism handle these resources too. However, if we have a
540 : * FAILED portal (eg, a cursor that got an error), we'd better clean up
541 : * its resources to avoid resource-leakage warning messages.
542 : *
543 : * Sub transaction commit: never comes here at all, since we don't kill
544 : * any portals in AtSubCommit_Portals().
545 : *
546 : * Main or sub transaction abort: we will do nothing here because
547 : * portal->resowner was already set NULL; the resources were already
548 : * cleaned up in transaction abort.
549 : *
550 : * Ordinary portal drop: must release resources. However, if the portal
551 : * is not FAILED then we do not release its locks. The locks become the
552 : * responsibility of the transaction's ResourceOwner (since it is the
553 : * parent of the portal's owner) and will be released when the transaction
554 : * eventually ends.
555 : */
556 783842 : if (portal->resowner &&
557 739984 : (!isTopCommit || portal->status == PORTAL_FAILED))
558 : {
559 728238 : bool isCommit = (portal->status != PORTAL_FAILED);
560 :
561 728238 : ResourceOwnerRelease(portal->resowner,
562 : RESOURCE_RELEASE_BEFORE_LOCKS,
563 : isCommit, false);
564 728238 : ResourceOwnerRelease(portal->resowner,
565 : RESOURCE_RELEASE_LOCKS,
566 : isCommit, false);
567 728238 : ResourceOwnerRelease(portal->resowner,
568 : RESOURCE_RELEASE_AFTER_LOCKS,
569 : isCommit, false);
570 728238 : ResourceOwnerDelete(portal->resowner);
571 : }
572 783842 : portal->resowner = NULL;
573 :
574 : /*
575 : * Delete tuplestore if present. We should do this even under error
576 : * conditions; since the tuplestore would have been using cross-
577 : * transaction storage, its temp files need to be explicitly deleted.
578 : */
579 783842 : if (portal->holdStore)
580 : {
581 : MemoryContext oldcontext;
582 :
583 50914 : oldcontext = MemoryContextSwitchTo(portal->holdContext);
584 50914 : tuplestore_end(portal->holdStore);
585 50914 : MemoryContextSwitchTo(oldcontext);
586 50914 : portal->holdStore = NULL;
587 : }
588 :
589 : /* delete tuplestore storage, if any */
590 783842 : if (portal->holdContext)
591 50914 : MemoryContextDelete(portal->holdContext);
592 :
593 : /* release subsidiary storage */
594 783842 : MemoryContextDelete(portal->portalContext);
595 :
596 : /* release portal struct (it's in TopPortalContext) */
597 783842 : pfree(portal);
598 783842 : }
599 :
600 : /*
601 : * Delete all declared cursors.
602 : *
603 : * Used by commands: CLOSE ALL, DISCARD ALL
604 : */
605 : void
606 18 : PortalHashTableDeleteAll(void)
607 : {
608 : HASH_SEQ_STATUS status;
609 : PortalHashEnt *hentry;
610 :
611 18 : if (PortalHashTable == NULL)
612 0 : return;
613 :
614 18 : hash_seq_init(&status, PortalHashTable);
615 72 : while ((hentry = hash_seq_search(&status)) != NULL)
616 : {
617 54 : Portal portal = hentry->portal;
618 :
619 : /* Can't close the active portal (the one running the command) */
620 54 : if (portal->status == PORTAL_ACTIVE)
621 30 : continue;
622 :
623 24 : PortalDrop(portal, false);
624 :
625 : /* Restart the iteration in case that led to other drops */
626 24 : hash_seq_term(&status);
627 24 : hash_seq_init(&status, PortalHashTable);
628 : }
629 : }
630 :
631 : /*
632 : * "Hold" a portal. Prepare it for access by later transactions.
633 : */
634 : static void
635 84 : HoldPortal(Portal portal)
636 : {
637 : /*
638 : * Note that PersistHoldablePortal() must release all resources used by
639 : * the portal that are local to the creating transaction.
640 : */
641 84 : PortalCreateHoldStore(portal);
642 84 : PersistHoldablePortal(portal);
643 :
644 : /* drop cached plan reference, if any */
645 80 : PortalReleaseCachedPlan(portal);
646 :
647 : /*
648 : * Any resources belonging to the portal will be released in the upcoming
649 : * transaction-wide cleanup; the portal will no longer have its own
650 : * resources.
651 : */
652 80 : portal->resowner = NULL;
653 :
654 : /*
655 : * Having successfully exported the holdable cursor, mark it as not
656 : * belonging to this transaction.
657 : */
658 80 : portal->createSubid = InvalidSubTransactionId;
659 80 : portal->activeSubid = InvalidSubTransactionId;
660 80 : portal->createLevel = 0;
661 80 : }
662 :
663 : /*
664 : * Pre-commit processing for portals.
665 : *
666 : * Holdable cursors created in this transaction need to be converted to
667 : * materialized form, since we are going to close down the executor and
668 : * release locks. Non-holdable portals created in this transaction are
669 : * simply removed. Portals remaining from prior transactions should be
670 : * left untouched.
671 : *
672 : * Returns true if any portals changed state (possibly causing user-defined
673 : * code to be run), false if not.
674 : */
675 : bool
676 996216 : PreCommit_Portals(bool isPrepare)
677 : {
678 996216 : bool result = false;
679 : HASH_SEQ_STATUS status;
680 : PortalHashEnt *hentry;
681 :
682 996216 : hash_seq_init(&status, PortalHashTable);
683 :
684 1071542 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
685 : {
686 75326 : Portal portal = hentry->portal;
687 :
688 : /*
689 : * There should be no pinned portals anymore. Complain if someone
690 : * leaked one. Auto-held portals are allowed; we assume that whoever
691 : * pinned them is managing them.
692 : */
693 75326 : if (portal->portalPinned && !portal->autoHeld)
694 0 : elog(ERROR, "cannot commit while a portal is pinned");
695 :
696 : /*
697 : * Do not touch active portals --- this can only happen in the case of
698 : * a multi-transaction utility command, such as VACUUM, or a commit in
699 : * a procedure.
700 : *
701 : * Note however that any resource owner attached to such a portal is
702 : * still going to go away, so don't leave a dangling pointer. Also
703 : * unregister any snapshots held by the portal, mainly to avoid
704 : * snapshot leak warnings from ResourceOwnerRelease().
705 : */
706 75326 : if (portal->status == PORTAL_ACTIVE)
707 : {
708 62934 : if (portal->holdSnapshot)
709 : {
710 2 : if (portal->resowner)
711 2 : UnregisterSnapshotFromOwner(portal->holdSnapshot,
712 : portal->resowner);
713 2 : portal->holdSnapshot = NULL;
714 : }
715 62934 : portal->resowner = NULL;
716 : /* Clear portalSnapshot too, for cleanliness */
717 62934 : portal->portalSnapshot = NULL;
718 62934 : continue;
719 : }
720 :
721 : /* Is it a holdable portal created in the current xact? */
722 12392 : if ((portal->cursorOptions & CURSOR_OPT_HOLD) &&
723 498 : portal->createSubid != InvalidSubTransactionId &&
724 48 : portal->status == PORTAL_READY)
725 : {
726 : /*
727 : * We are exiting the transaction that created a holdable cursor.
728 : * Instead of dropping the portal, prepare it for access by later
729 : * transactions.
730 : *
731 : * However, if this is PREPARE TRANSACTION rather than COMMIT,
732 : * refuse PREPARE, because the semantics seem pretty unclear.
733 : */
734 48 : if (isPrepare)
735 0 : ereport(ERROR,
736 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
737 : errmsg("cannot PREPARE a transaction that has created a cursor WITH HOLD")));
738 :
739 48 : HoldPortal(portal);
740 :
741 : /* Report we changed state */
742 48 : result = true;
743 : }
744 12344 : else if (portal->createSubid == InvalidSubTransactionId)
745 : {
746 : /*
747 : * Do nothing to cursors held over from a previous transaction
748 : * (including ones we just froze in a previous cycle of this loop)
749 : */
750 534 : continue;
751 : }
752 : else
753 : {
754 : /* Zap all non-holdable portals */
755 11810 : PortalDrop(portal, true);
756 :
757 : /* Report we changed state */
758 11810 : result = true;
759 : }
760 :
761 : /*
762 : * After either freezing or dropping a portal, we have to restart the
763 : * iteration, because we could have invoked user-defined code that
764 : * caused a drop of the next portal in the hash chain.
765 : */
766 11858 : hash_seq_term(&status);
767 11858 : hash_seq_init(&status, PortalHashTable);
768 : }
769 :
770 996216 : return result;
771 : }
772 :
773 : /*
774 : * Abort processing for portals.
775 : *
776 : * At this point we run the cleanup hook if present, but we can't release the
777 : * portal's memory until the cleanup call.
778 : */
779 : void
780 52292 : AtAbort_Portals(void)
781 : {
782 : HASH_SEQ_STATUS status;
783 : PortalHashEnt *hentry;
784 :
785 52292 : hash_seq_init(&status, PortalHashTable);
786 :
787 83282 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
788 : {
789 30990 : Portal portal = hentry->portal;
790 :
791 : /*
792 : * When elog(FATAL) is progress, we need to set the active portal to
793 : * failed, so that PortalCleanup() doesn't run the executor shutdown.
794 : */
795 30990 : if (portal->status == PORTAL_ACTIVE && shmem_exit_inprogress)
796 8 : MarkPortalFailed(portal);
797 :
798 : /*
799 : * Do nothing else to cursors held over from a previous transaction.
800 : */
801 30990 : if (portal->createSubid == InvalidSubTransactionId)
802 136 : continue;
803 :
804 : /*
805 : * Do nothing to auto-held cursors. This is similar to the case of a
806 : * cursor from a previous transaction, but it could also be that the
807 : * cursor was auto-held in this transaction, so it wants to live on.
808 : */
809 30854 : if (portal->autoHeld)
810 0 : continue;
811 :
812 : /*
813 : * If it was created in the current transaction, we can't do normal
814 : * shutdown on a READY portal either; it might refer to objects
815 : * created in the failed transaction. See comments in
816 : * AtSubAbort_Portals.
817 : */
818 30854 : if (portal->status == PORTAL_READY)
819 974 : MarkPortalFailed(portal);
820 :
821 : /*
822 : * Allow portalcmds.c to clean up the state it knows about, if we
823 : * haven't already.
824 : */
825 30854 : if (portal->cleanup)
826 : {
827 118 : portal->cleanup(portal);
828 118 : portal->cleanup = NULL;
829 : }
830 :
831 : /* drop cached plan reference, if any */
832 30854 : PortalReleaseCachedPlan(portal);
833 :
834 : /*
835 : * Any resources belonging to the portal will be released in the
836 : * upcoming transaction-wide cleanup; they will be gone before we run
837 : * PortalDrop.
838 : */
839 30854 : portal->resowner = NULL;
840 :
841 : /*
842 : * Although we can't delete the portal data structure proper, we can
843 : * release any memory in subsidiary contexts, such as executor state.
844 : * The cleanup hook was the last thing that might have needed data
845 : * there. But leave active portals alone.
846 : */
847 30854 : if (portal->status != PORTAL_ACTIVE)
848 30656 : MemoryContextDeleteChildren(portal->portalContext);
849 : }
850 52292 : }
851 :
852 : /*
853 : * Post-abort cleanup for portals.
854 : *
855 : * Delete all portals not held over from prior transactions.
856 : */
857 : void
858 52268 : AtCleanup_Portals(void)
859 : {
860 : HASH_SEQ_STATUS status;
861 : PortalHashEnt *hentry;
862 :
863 52268 : hash_seq_init(&status, PortalHashTable);
864 :
865 81914 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
866 : {
867 29646 : Portal portal = hentry->portal;
868 :
869 : /*
870 : * Do not touch active portals --- this can only happen in the case of
871 : * a multi-transaction command.
872 : */
873 29646 : if (portal->status == PORTAL_ACTIVE)
874 198 : continue;
875 :
876 : /*
877 : * Do nothing to cursors held over from a previous transaction or
878 : * auto-held ones.
879 : */
880 29448 : if (portal->createSubid == InvalidSubTransactionId || portal->autoHeld)
881 : {
882 : Assert(portal->status != PORTAL_ACTIVE);
883 : Assert(portal->resowner == NULL);
884 136 : continue;
885 : }
886 :
887 : /*
888 : * If a portal is still pinned, forcibly unpin it. PortalDrop will not
889 : * let us drop the portal otherwise. Whoever pinned the portal was
890 : * interrupted by the abort too and won't try to use it anymore.
891 : */
892 29312 : if (portal->portalPinned)
893 38 : portal->portalPinned = false;
894 :
895 : /*
896 : * We had better not call any user-defined code during cleanup, so if
897 : * the cleanup hook hasn't been run yet, too bad; we'll just skip it.
898 : */
899 29312 : if (portal->cleanup)
900 : {
901 0 : elog(WARNING, "skipping cleanup for portal \"%s\"", portal->name);
902 0 : portal->cleanup = NULL;
903 : }
904 :
905 : /* Zap it. */
906 29312 : PortalDrop(portal, false);
907 : }
908 52268 : }
909 :
910 : /*
911 : * Portal-related cleanup when we return to the main loop on error.
912 : *
913 : * This is different from the cleanup at transaction abort. Auto-held portals
914 : * are cleaned up on error but not on transaction abort.
915 : */
916 : void
917 45310 : PortalErrorCleanup(void)
918 : {
919 : HASH_SEQ_STATUS status;
920 : PortalHashEnt *hentry;
921 :
922 45310 : hash_seq_init(&status, PortalHashTable);
923 :
924 93718 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
925 : {
926 3098 : Portal portal = hentry->portal;
927 :
928 3098 : if (portal->autoHeld)
929 : {
930 4 : portal->portalPinned = false;
931 4 : PortalDrop(portal, false);
932 : }
933 : }
934 45310 : }
935 :
936 : /*
937 : * Pre-subcommit processing for portals.
938 : *
939 : * Reassign portals created or used in the current subtransaction to the
940 : * parent subtransaction.
941 : */
942 : void
943 10756 : AtSubCommit_Portals(SubTransactionId mySubid,
944 : SubTransactionId parentSubid,
945 : int parentLevel,
946 : ResourceOwner parentXactOwner)
947 : {
948 : HASH_SEQ_STATUS status;
949 : PortalHashEnt *hentry;
950 :
951 10756 : hash_seq_init(&status, PortalHashTable);
952 :
953 31010 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
954 : {
955 9498 : Portal portal = hentry->portal;
956 :
957 9498 : if (portal->createSubid == mySubid)
958 : {
959 60 : portal->createSubid = parentSubid;
960 60 : portal->createLevel = parentLevel;
961 60 : if (portal->resowner)
962 60 : ResourceOwnerNewParent(portal->resowner, parentXactOwner);
963 : }
964 9498 : if (portal->activeSubid == mySubid)
965 220 : portal->activeSubid = parentSubid;
966 : }
967 10756 : }
968 :
969 : /*
970 : * Subtransaction abort handling for portals.
971 : *
972 : * Deactivate portals created or used during the failed subtransaction.
973 : * Note that per AtSubCommit_Portals, this will catch portals created/used
974 : * in descendants of the subtransaction too.
975 : *
976 : * We don't destroy any portals here; that's done in AtSubCleanup_Portals.
977 : */
978 : void
979 9438 : AtSubAbort_Portals(SubTransactionId mySubid,
980 : SubTransactionId parentSubid,
981 : ResourceOwner myXactOwner,
982 : ResourceOwner parentXactOwner)
983 : {
984 : HASH_SEQ_STATUS status;
985 : PortalHashEnt *hentry;
986 :
987 9438 : hash_seq_init(&status, PortalHashTable);
988 :
989 22898 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
990 : {
991 13460 : Portal portal = hentry->portal;
992 :
993 : /* Was it created in this subtransaction? */
994 13460 : if (portal->createSubid != mySubid)
995 : {
996 : /* No, but maybe it was used in this subtransaction? */
997 12148 : if (portal->activeSubid == mySubid)
998 : {
999 : /* Maintain activeSubid until the portal is removed */
1000 50 : portal->activeSubid = parentSubid;
1001 :
1002 : /*
1003 : * A MarkPortalActive() caller ran an upper-level portal in
1004 : * this subtransaction and left the portal ACTIVE. This can't
1005 : * happen, but force the portal into FAILED state for the same
1006 : * reasons discussed below.
1007 : *
1008 : * We assume we can get away without forcing upper-level READY
1009 : * portals to fail, even if they were run and then suspended.
1010 : * In theory a suspended upper-level portal could have
1011 : * acquired some references to objects that are about to be
1012 : * destroyed, but there should be sufficient defenses against
1013 : * such cases: the portal's original query cannot contain such
1014 : * references, and any references within, say, cached plans of
1015 : * PL/pgSQL functions are not from active queries and should
1016 : * be protected by revalidation logic.
1017 : */
1018 50 : if (portal->status == PORTAL_ACTIVE)
1019 0 : MarkPortalFailed(portal);
1020 :
1021 : /*
1022 : * Also, if we failed it during the current subtransaction
1023 : * (either just above, or earlier), reattach its resource
1024 : * owner to the current subtransaction's resource owner, so
1025 : * that any resources it still holds will be released while
1026 : * cleaning up this subtransaction. This prevents some corner
1027 : * cases wherein we might get Asserts or worse while cleaning
1028 : * up objects created during the current subtransaction
1029 : * (because they're still referenced within this portal).
1030 : */
1031 50 : if (portal->status == PORTAL_FAILED && portal->resowner)
1032 : {
1033 14 : ResourceOwnerNewParent(portal->resowner, myXactOwner);
1034 14 : portal->resowner = NULL;
1035 : }
1036 : }
1037 : /* Done if it wasn't created in this subtransaction */
1038 12148 : continue;
1039 : }
1040 :
1041 : /*
1042 : * Force any live portals of my own subtransaction into FAILED state.
1043 : * We have to do this because they might refer to objects created or
1044 : * changed in the failed subtransaction, leading to crashes within
1045 : * ExecutorEnd when portalcmds.c tries to close down the portal.
1046 : * Currently, every MarkPortalActive() caller ensures it updates the
1047 : * portal status again before relinquishing control, so ACTIVE can't
1048 : * happen here. If it does happen, dispose the portal like existing
1049 : * MarkPortalActive() callers would.
1050 : */
1051 1312 : if (portal->status == PORTAL_READY ||
1052 256 : portal->status == PORTAL_ACTIVE)
1053 1056 : MarkPortalFailed(portal);
1054 :
1055 : /*
1056 : * Allow portalcmds.c to clean up the state it knows about, if we
1057 : * haven't already.
1058 : */
1059 1312 : if (portal->cleanup)
1060 : {
1061 0 : portal->cleanup(portal);
1062 0 : portal->cleanup = NULL;
1063 : }
1064 :
1065 : /* drop cached plan reference, if any */
1066 1312 : PortalReleaseCachedPlan(portal);
1067 :
1068 : /*
1069 : * Any resources belonging to the portal will be released in the
1070 : * upcoming transaction-wide cleanup; they will be gone before we run
1071 : * PortalDrop.
1072 : */
1073 1312 : portal->resowner = NULL;
1074 :
1075 : /*
1076 : * Although we can't delete the portal data structure proper, we can
1077 : * release any memory in subsidiary contexts, such as executor state.
1078 : * The cleanup hook was the last thing that might have needed data
1079 : * there.
1080 : */
1081 1312 : MemoryContextDeleteChildren(portal->portalContext);
1082 : }
1083 9438 : }
1084 :
1085 : /*
1086 : * Post-subabort cleanup for portals.
1087 : *
1088 : * Drop all portals created in the failed subtransaction (but note that
1089 : * we will not drop any that were reassigned to the parent above).
1090 : */
1091 : void
1092 9438 : AtSubCleanup_Portals(SubTransactionId mySubid)
1093 : {
1094 : HASH_SEQ_STATUS status;
1095 : PortalHashEnt *hentry;
1096 :
1097 9438 : hash_seq_init(&status, PortalHashTable);
1098 :
1099 22650 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1100 : {
1101 13212 : Portal portal = hentry->portal;
1102 :
1103 13212 : if (portal->createSubid != mySubid)
1104 12148 : continue;
1105 :
1106 : /*
1107 : * If a portal is still pinned, forcibly unpin it. PortalDrop will not
1108 : * let us drop the portal otherwise. Whoever pinned the portal was
1109 : * interrupted by the abort too and won't try to use it anymore.
1110 : */
1111 1064 : if (portal->portalPinned)
1112 6 : portal->portalPinned = false;
1113 :
1114 : /*
1115 : * We had better not call any user-defined code during cleanup, so if
1116 : * the cleanup hook hasn't been run yet, too bad; we'll just skip it.
1117 : */
1118 1064 : if (portal->cleanup)
1119 : {
1120 0 : elog(WARNING, "skipping cleanup for portal \"%s\"", portal->name);
1121 0 : portal->cleanup = NULL;
1122 : }
1123 :
1124 : /* Zap it. */
1125 1064 : PortalDrop(portal, false);
1126 : }
1127 9438 : }
1128 :
1129 : /* Find all available cursors */
1130 : Datum
1131 120 : pg_cursor(PG_FUNCTION_ARGS)
1132 : {
1133 120 : ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
1134 : HASH_SEQ_STATUS hash_seq;
1135 : PortalHashEnt *hentry;
1136 :
1137 : /*
1138 : * We put all the tuples into a tuplestore in one scan of the hashtable.
1139 : * This avoids any issue of the hashtable possibly changing between calls.
1140 : */
1141 120 : InitMaterializedSRF(fcinfo, 0);
1142 :
1143 120 : hash_seq_init(&hash_seq, PortalHashTable);
1144 372 : while ((hentry = hash_seq_search(&hash_seq)) != NULL)
1145 : {
1146 252 : Portal portal = hentry->portal;
1147 : Datum values[6];
1148 252 : bool nulls[6] = {0};
1149 :
1150 : /* report only "visible" entries */
1151 252 : if (!portal->visible)
1152 126 : continue;
1153 : /* also ignore it if PortalDefineQuery hasn't been called yet */
1154 126 : if (!portal->sourceText)
1155 0 : continue;
1156 :
1157 126 : values[0] = CStringGetTextDatum(portal->name);
1158 126 : values[1] = CStringGetTextDatum(portal->sourceText);
1159 126 : values[2] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_HOLD);
1160 126 : values[3] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_BINARY);
1161 126 : values[4] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_SCROLL);
1162 126 : values[5] = TimestampTzGetDatum(portal->creation_time);
1163 :
1164 126 : tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls);
1165 : }
1166 :
1167 120 : return (Datum) 0;
1168 : }
1169 :
1170 : bool
1171 60 : ThereAreNoReadyPortals(void)
1172 : {
1173 : HASH_SEQ_STATUS status;
1174 : PortalHashEnt *hentry;
1175 :
1176 60 : hash_seq_init(&status, PortalHashTable);
1177 :
1178 120 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1179 : {
1180 60 : Portal portal = hentry->portal;
1181 :
1182 60 : if (portal->status == PORTAL_READY)
1183 0 : return false;
1184 : }
1185 :
1186 60 : return true;
1187 : }
1188 :
1189 : /*
1190 : * Hold all pinned portals.
1191 : *
1192 : * When initiating a COMMIT or ROLLBACK inside a procedure, this must be
1193 : * called to protect internally-generated cursors from being dropped during
1194 : * the transaction shutdown. Currently, SPI calls this automatically; PLs
1195 : * that initiate COMMIT or ROLLBACK some other way are on the hook to do it
1196 : * themselves. (Note that we couldn't do this in, say, AtAbort_Portals
1197 : * because we need to run user-defined code while persisting a portal.
1198 : * It's too late to do that once transaction abort has started.)
1199 : *
1200 : * We protect such portals by converting them to held cursors. We mark them
1201 : * as "auto-held" so that exception exit knows to clean them up. (In normal,
1202 : * non-exception code paths, the PL needs to clean such portals itself, since
1203 : * transaction end won't do it anymore; but that should be normal practice
1204 : * anyway.)
1205 : */
1206 : void
1207 4432 : HoldPinnedPortals(void)
1208 : {
1209 : HASH_SEQ_STATUS status;
1210 : PortalHashEnt *hentry;
1211 :
1212 4432 : hash_seq_init(&status, PortalHashTable);
1213 :
1214 8984 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1215 : {
1216 4558 : Portal portal = hentry->portal;
1217 :
1218 4558 : if (portal->portalPinned && !portal->autoHeld)
1219 : {
1220 : /*
1221 : * Doing transaction control, especially abort, inside a cursor
1222 : * loop that is not read-only, for example using UPDATE ...
1223 : * RETURNING, has weird semantics issues. Also, this
1224 : * implementation wouldn't work, because such portals cannot be
1225 : * held. (The core grammar enforces that only SELECT statements
1226 : * can drive a cursor, but for example PL/pgSQL does not restrict
1227 : * it.)
1228 : */
1229 38 : if (portal->strategy != PORTAL_ONE_SELECT)
1230 2 : ereport(ERROR,
1231 : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1232 : errmsg("cannot perform transaction commands inside a cursor loop that is not read-only")));
1233 :
1234 : /* Verify it's in a suitable state to be held */
1235 36 : if (portal->status != PORTAL_READY)
1236 0 : elog(ERROR, "pinned portal is not ready to be auto-held");
1237 :
1238 36 : HoldPortal(portal);
1239 32 : portal->autoHeld = true;
1240 : }
1241 : }
1242 4426 : }
1243 :
1244 : /*
1245 : * Drop the outer active snapshots for all portals, so that no snapshots
1246 : * remain active.
1247 : *
1248 : * Like HoldPinnedPortals, this must be called when initiating a COMMIT or
1249 : * ROLLBACK inside a procedure. This has to be separate from that since it
1250 : * should not be run until we're done with steps that are likely to fail.
1251 : *
1252 : * It's tempting to fold this into PreCommit_Portals, but to do so, we'd
1253 : * need to clean up snapshot management in VACUUM and perhaps other places.
1254 : */
1255 : void
1256 4426 : ForgetPortalSnapshots(void)
1257 : {
1258 : HASH_SEQ_STATUS status;
1259 : PortalHashEnt *hentry;
1260 4426 : int numPortalSnaps = 0;
1261 4426 : int numActiveSnaps = 0;
1262 :
1263 : /* First, scan PortalHashTable and clear portalSnapshot fields */
1264 4426 : hash_seq_init(&status, PortalHashTable);
1265 :
1266 13404 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1267 : {
1268 4552 : Portal portal = hentry->portal;
1269 :
1270 4552 : if (portal->portalSnapshot != NULL)
1271 : {
1272 4426 : portal->portalSnapshot = NULL;
1273 4426 : numPortalSnaps++;
1274 : }
1275 : /* portal->holdSnapshot will be cleaned up in PreCommit_Portals */
1276 : }
1277 :
1278 : /*
1279 : * Now, pop all the active snapshots, which should be just those that were
1280 : * portal snapshots. Ideally we'd drive this directly off the portal
1281 : * scan, but there's no good way to visit the portals in the correct
1282 : * order. So just cross-check after the fact.
1283 : */
1284 8852 : while (ActiveSnapshotSet())
1285 : {
1286 4426 : PopActiveSnapshot();
1287 4426 : numActiveSnaps++;
1288 : }
1289 :
1290 4426 : if (numPortalSnaps != numActiveSnaps)
1291 0 : elog(ERROR, "portal snapshots (%d) did not account for all active snapshots (%d)",
1292 : numPortalSnaps, numActiveSnaps);
1293 4426 : }
|