Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * portalmem.c
4 : * backend portal memory management
5 : *
6 : * Portals are objects representing the execution state of a query.
7 : * This module provides memory management services for portals, but it
8 : * doesn't actually run the executor for them.
9 : *
10 : *
11 : * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
12 : * Portions Copyright (c) 1994, Regents of the University of California
13 : *
14 : * IDENTIFICATION
15 : * src/backend/utils/mmgr/portalmem.c
16 : *
17 : *-------------------------------------------------------------------------
18 : */
19 : #include "postgres.h"
20 :
21 : #include "access/xact.h"
22 : #include "commands/portalcmds.h"
23 : #include "funcapi.h"
24 : #include "miscadmin.h"
25 : #include "storage/ipc.h"
26 : #include "utils/builtins.h"
27 : #include "utils/hsearch.h"
28 : #include "utils/memutils.h"
29 : #include "utils/snapmgr.h"
30 : #include "utils/timestamp.h"
31 : #include "utils/tuplestore.h"
32 :
33 : /*
34 : * Estimate of the maximum number of open portals a user would have,
35 : * used in initially sizing the PortalHashTable in EnablePortalManager().
36 : * Since the hash table can expand, there's no need to make this overly
37 : * generous, and keeping it small avoids unnecessary overhead in the
38 : * hash_seq_search() calls executed during transaction end.
39 : */
40 : #define PORTALS_PER_USER 16
41 :
42 :
43 : /* ----------------
44 : * Global state
45 : * ----------------
46 : */
47 :
48 : #define MAX_PORTALNAME_LEN NAMEDATALEN
49 :
50 : typedef struct portalhashent
51 : {
52 : char portalname[MAX_PORTALNAME_LEN];
53 : Portal portal;
54 : } PortalHashEnt;
55 :
56 : static HTAB *PortalHashTable = NULL;
57 :
58 : #define PortalHashTableLookup(NAME, PORTAL) \
59 : do { \
60 : PortalHashEnt *hentry; \
61 : \
62 : hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
63 : (NAME), HASH_FIND, NULL); \
64 : if (hentry) \
65 : PORTAL = hentry->portal; \
66 : else \
67 : PORTAL = NULL; \
68 : } while(0)
69 :
70 : #define PortalHashTableInsert(PORTAL, NAME) \
71 : do { \
72 : PortalHashEnt *hentry; bool found; \
73 : \
74 : hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
75 : (NAME), HASH_ENTER, &found); \
76 : if (found) \
77 : elog(ERROR, "duplicate portal name"); \
78 : hentry->portal = PORTAL; \
79 : /* To avoid duplicate storage, make PORTAL->name point to htab entry */ \
80 : PORTAL->name = hentry->portalname; \
81 : } while(0)
82 :
83 : #define PortalHashTableDelete(PORTAL) \
84 : do { \
85 : PortalHashEnt *hentry; \
86 : \
87 : hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
88 : PORTAL->name, HASH_REMOVE, NULL); \
89 : if (hentry == NULL) \
90 : elog(WARNING, "trying to delete portal name that does not exist"); \
91 : } while(0)
92 :
93 : static MemoryContext TopPortalContext = NULL;
94 :
95 :
96 : /* ----------------------------------------------------------------
97 : * public portal interface functions
98 : * ----------------------------------------------------------------
99 : */
100 :
101 : /*
102 : * EnablePortalManager
103 : * Enables the portal management module at backend startup.
104 : */
105 : void
106 19433 : EnablePortalManager(void)
107 : {
108 : HASHCTL ctl;
109 :
110 : Assert(TopPortalContext == NULL);
111 :
112 19433 : TopPortalContext = AllocSetContextCreate(TopMemoryContext,
113 : "TopPortalContext",
114 : ALLOCSET_DEFAULT_SIZES);
115 :
116 19433 : ctl.keysize = MAX_PORTALNAME_LEN;
117 19433 : ctl.entrysize = sizeof(PortalHashEnt);
118 :
119 : /*
120 : * use PORTALS_PER_USER as a guess of how many hash table entries to
121 : * create, initially
122 : */
123 19433 : PortalHashTable = hash_create("Portal hash", PORTALS_PER_USER,
124 : &ctl, HASH_ELEM | HASH_STRINGS);
125 19433 : }
126 :
127 : /*
128 : * GetPortalByName
129 : * Returns a portal given a portal name, or NULL if name not found.
130 : */
131 : Portal
132 524170 : GetPortalByName(const char *name)
133 : {
134 : Portal portal;
135 :
136 524170 : if (name)
137 524170 : PortalHashTableLookup(name, portal);
138 : else
139 0 : portal = NULL;
140 :
141 524170 : return portal;
142 : }
143 :
144 : /*
145 : * PortalGetPrimaryStmt
146 : * Get the "primary" stmt within a portal, ie, the one marked canSetTag.
147 : *
148 : * Returns NULL if no such stmt. If multiple PlannedStmt structs within the
149 : * portal are marked canSetTag, returns the first one. Neither of these
150 : * cases should occur in present usages of this function.
151 : */
152 : PlannedStmt *
153 231100 : PortalGetPrimaryStmt(Portal portal)
154 : {
155 : ListCell *lc;
156 :
157 231100 : foreach(lc, portal->stmts)
158 : {
159 231100 : PlannedStmt *stmt = lfirst_node(PlannedStmt, lc);
160 :
161 231100 : if (stmt->canSetTag)
162 231100 : return stmt;
163 : }
164 0 : return NULL;
165 : }
166 :
167 : /*
168 : * CreatePortal
169 : * Returns a new portal given a name.
170 : *
171 : * allowDup: if true, automatically drop any pre-existing portal of the
172 : * same name (if false, an error is raised).
173 : *
174 : * dupSilent: if true, don't even emit a WARNING.
175 : */
176 : Portal
177 464313 : CreatePortal(const char *name, bool allowDup, bool dupSilent)
178 : {
179 : Portal portal;
180 :
181 : Assert(name);
182 :
183 464313 : portal = GetPortalByName(name);
184 464313 : if (PortalIsValid(portal))
185 : {
186 6208 : if (!allowDup)
187 0 : ereport(ERROR,
188 : (errcode(ERRCODE_DUPLICATE_CURSOR),
189 : errmsg("cursor \"%s\" already exists", name)));
190 6208 : if (!dupSilent)
191 0 : ereport(WARNING,
192 : (errcode(ERRCODE_DUPLICATE_CURSOR),
193 : errmsg("closing existing cursor \"%s\"",
194 : name)));
195 6208 : PortalDrop(portal, false);
196 : }
197 :
198 : /* make new portal structure */
199 464313 : portal = (Portal) MemoryContextAllocZero(TopPortalContext, sizeof *portal);
200 :
201 : /* initialize portal context; typically it won't store much */
202 464313 : portal->portalContext = AllocSetContextCreate(TopPortalContext,
203 : "PortalContext",
204 : ALLOCSET_SMALL_SIZES);
205 :
206 : /* create a resource owner for the portal */
207 464313 : portal->resowner = ResourceOwnerCreate(CurTransactionResourceOwner,
208 : "Portal");
209 :
210 : /* initialize portal fields that don't start off zero */
211 464313 : portal->status = PORTAL_NEW;
212 464313 : portal->cleanup = PortalCleanup;
213 464313 : portal->createSubid = GetCurrentSubTransactionId();
214 464313 : portal->activeSubid = portal->createSubid;
215 464313 : portal->createLevel = GetCurrentTransactionNestLevel();
216 464313 : portal->strategy = PORTAL_MULTI_QUERY;
217 464313 : portal->cursorOptions = CURSOR_OPT_NO_SCROLL;
218 464313 : portal->atStart = true;
219 464313 : portal->atEnd = true; /* disallow fetches until query is set */
220 464313 : portal->visible = true;
221 464313 : portal->creation_time = GetCurrentStatementStartTimestamp();
222 :
223 : /* put portal in table (sets portal->name) */
224 464313 : PortalHashTableInsert(portal, name);
225 :
226 : /* for named portals reuse portal->name copy */
227 464313 : MemoryContextSetIdentifier(portal->portalContext, portal->name[0] ? portal->name : "<unnamed>");
228 :
229 464313 : return portal;
230 : }
231 :
232 : /*
233 : * CreateNewPortal
234 : * Create a new portal, assigning it a random nonconflicting name.
235 : */
236 : Portal
237 16846 : CreateNewPortal(void)
238 : {
239 : static unsigned int unnamed_portal_count = 0;
240 :
241 : char portalname[MAX_PORTALNAME_LEN];
242 :
243 : /* Select a nonconflicting name */
244 : for (;;)
245 : {
246 16846 : unnamed_portal_count++;
247 16846 : sprintf(portalname, "<unnamed portal %u>", unnamed_portal_count);
248 16846 : if (GetPortalByName(portalname) == NULL)
249 16846 : break;
250 : }
251 :
252 16846 : return CreatePortal(portalname, false, false);
253 : }
254 :
255 : /*
256 : * PortalDefineQuery
257 : * A simple subroutine to establish a portal's query.
258 : *
259 : * Notes: as of PG 8.4, caller MUST supply a sourceText string; it is not
260 : * allowed anymore to pass NULL. (If you really don't have source text,
261 : * you can pass a constant string, perhaps "(query not available)".)
262 : *
263 : * commandTag shall be NULL if and only if the original query string
264 : * (before rewriting) was an empty string. Also, the passed commandTag must
265 : * be a pointer to a constant string, since it is not copied.
266 : *
267 : * If cplan is provided, then it is a cached plan containing the stmts, and
268 : * the caller must have done GetCachedPlan(), causing a refcount increment.
269 : * The refcount will be released when the portal is destroyed.
270 : *
271 : * If cplan is NULL, then it is the caller's responsibility to ensure that
272 : * the passed plan trees have adequate lifetime. Typically this is done by
273 : * copying them into the portal's context.
274 : *
275 : * The caller is also responsible for ensuring that the passed prepStmtName
276 : * (if not NULL) and sourceText have adequate lifetime.
277 : *
278 : * NB: this function mustn't do much beyond storing the passed values; in
279 : * particular don't do anything that risks elog(ERROR). If that were to
280 : * happen here before storing the cplan reference, we'd leak the plancache
281 : * refcount that the caller is trying to hand off to us.
282 : */
283 : void
284 464267 : PortalDefineQuery(Portal portal,
285 : const char *prepStmtName,
286 : const char *sourceText,
287 : CommandTag commandTag,
288 : List *stmts,
289 : CachedPlan *cplan)
290 : {
291 : Assert(PortalIsValid(portal));
292 : Assert(portal->status == PORTAL_NEW);
293 :
294 : Assert(sourceText != NULL);
295 : Assert(commandTag != CMDTAG_UNKNOWN || stmts == NIL);
296 :
297 464267 : portal->prepStmtName = prepStmtName;
298 464267 : portal->sourceText = sourceText;
299 464267 : portal->commandTag = commandTag;
300 464267 : SetQueryCompletion(&portal->qc, commandTag, 0);
301 464267 : portal->stmts = stmts;
302 464267 : portal->cplan = cplan;
303 464267 : portal->status = PORTAL_DEFINED;
304 464267 : }
305 :
306 : /*
307 : * PortalReleaseCachedPlan
308 : * Release a portal's reference to its cached plan, if any.
309 : */
310 : static void
311 485983 : PortalReleaseCachedPlan(Portal portal)
312 : {
313 485983 : if (portal->cplan)
314 : {
315 21194 : ReleaseCachedPlan(portal->cplan, NULL);
316 21194 : portal->cplan = NULL;
317 :
318 : /*
319 : * We must also clear portal->stmts which is now a dangling reference
320 : * to the cached plan's plan list. This protects any code that might
321 : * try to examine the Portal later.
322 : */
323 21194 : portal->stmts = NIL;
324 : }
325 485983 : }
326 :
327 : /*
328 : * PortalCreateHoldStore
329 : * Create the tuplestore for a portal.
330 : */
331 : void
332 30709 : PortalCreateHoldStore(Portal portal)
333 : {
334 : MemoryContext oldcxt;
335 :
336 : Assert(portal->holdContext == NULL);
337 : Assert(portal->holdStore == NULL);
338 : Assert(portal->holdSnapshot == NULL);
339 :
340 : /*
341 : * Create the memory context that is used for storage of the tuple set.
342 : * Note this is NOT a child of the portal's portalContext.
343 : */
344 30709 : portal->holdContext =
345 30709 : AllocSetContextCreate(TopPortalContext,
346 : "PortalHoldContext",
347 : ALLOCSET_DEFAULT_SIZES);
348 :
349 : /*
350 : * Create the tuple store, selecting cross-transaction temp files, and
351 : * enabling random access only if cursor requires scrolling.
352 : *
353 : * XXX: Should maintenance_work_mem be used for the portal size?
354 : */
355 30709 : oldcxt = MemoryContextSwitchTo(portal->holdContext);
356 :
357 30709 : portal->holdStore =
358 30709 : tuplestore_begin_heap(portal->cursorOptions & CURSOR_OPT_SCROLL,
359 : true, work_mem);
360 :
361 30709 : MemoryContextSwitchTo(oldcxt);
362 30709 : }
363 :
364 : /*
365 : * PinPortal
366 : * Protect a portal from dropping.
367 : *
368 : * A pinned portal is still unpinned and dropped at transaction or
369 : * subtransaction abort.
370 : */
371 : void
372 8081 : PinPortal(Portal portal)
373 : {
374 8081 : if (portal->portalPinned)
375 0 : elog(ERROR, "portal already pinned");
376 :
377 8081 : portal->portalPinned = true;
378 8081 : }
379 :
380 : void
381 8053 : UnpinPortal(Portal portal)
382 : {
383 8053 : if (!portal->portalPinned)
384 0 : elog(ERROR, "portal not pinned");
385 :
386 8053 : portal->portalPinned = false;
387 8053 : }
388 :
389 : /*
390 : * MarkPortalActive
391 : * Transition a portal from READY to ACTIVE state.
392 : *
393 : * NOTE: never set portal->status = PORTAL_ACTIVE directly; call this instead.
394 : */
395 : void
396 487137 : MarkPortalActive(Portal portal)
397 : {
398 : /* For safety, this is a runtime test not just an Assert */
399 487137 : if (portal->status != PORTAL_READY)
400 12 : ereport(ERROR,
401 : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
402 : errmsg("portal \"%s\" cannot be run", portal->name)));
403 : /* Perform the state transition */
404 487125 : portal->status = PORTAL_ACTIVE;
405 487125 : portal->activeSubid = GetCurrentSubTransactionId();
406 487125 : }
407 :
408 : /*
409 : * MarkPortalDone
410 : * Transition a portal from ACTIVE to DONE state.
411 : *
412 : * NOTE: never set portal->status = PORTAL_DONE directly; call this instead.
413 : */
414 : void
415 232167 : MarkPortalDone(Portal portal)
416 : {
417 : /* Perform the state transition */
418 : Assert(portal->status == PORTAL_ACTIVE);
419 232167 : portal->status = PORTAL_DONE;
420 :
421 : /*
422 : * Allow portalcmds.c to clean up the state it knows about. We might as
423 : * well do that now, since the portal can't be executed any more.
424 : *
425 : * In some cases involving execution of a ROLLBACK command in an already
426 : * aborted transaction, this is necessary, or we'd reach AtCleanup_Portals
427 : * with the cleanup hook still unexecuted.
428 : */
429 232167 : if (portal->cleanup)
430 : {
431 232133 : portal->cleanup(portal);
432 232133 : portal->cleanup = NULL;
433 : }
434 232167 : }
435 :
436 : /*
437 : * MarkPortalFailed
438 : * Transition a portal into FAILED state.
439 : *
440 : * NOTE: never set portal->status = PORTAL_FAILED directly; call this instead.
441 : */
442 : void
443 21477 : MarkPortalFailed(Portal portal)
444 : {
445 : /* Perform the state transition */
446 : Assert(portal->status != PORTAL_DONE);
447 21477 : portal->status = PORTAL_FAILED;
448 :
449 : /*
450 : * Allow portalcmds.c to clean up the state it knows about. We might as
451 : * well do that now, since the portal can't be executed any more.
452 : *
453 : * In some cases involving cleanup of an already aborted transaction, this
454 : * is necessary, or we'd reach AtCleanup_Portals with the cleanup hook
455 : * still unexecuted.
456 : */
457 21477 : if (portal->cleanup)
458 : {
459 21470 : portal->cleanup(portal);
460 21470 : portal->cleanup = NULL;
461 : }
462 21477 : }
463 :
464 : /*
465 : * PortalDrop
466 : * Destroy the portal.
467 : */
468 : void
469 464301 : PortalDrop(Portal portal, bool isTopCommit)
470 : {
471 : Assert(PortalIsValid(portal));
472 :
473 : /*
474 : * Don't allow dropping a pinned portal, it's still needed by whoever
475 : * pinned it.
476 : */
477 464301 : if (portal->portalPinned)
478 0 : ereport(ERROR,
479 : (errcode(ERRCODE_INVALID_CURSOR_STATE),
480 : errmsg("cannot drop pinned portal \"%s\"", portal->name)));
481 :
482 : /*
483 : * Not sure if the PORTAL_ACTIVE case can validly happen or not...
484 : */
485 464301 : if (portal->status == PORTAL_ACTIVE)
486 0 : ereport(ERROR,
487 : (errcode(ERRCODE_INVALID_CURSOR_STATE),
488 : errmsg("cannot drop active portal \"%s\"", portal->name)));
489 :
490 : /*
491 : * Allow portalcmds.c to clean up the state it knows about, in particular
492 : * shutting down the executor if still active. This step potentially runs
493 : * user-defined code so failure has to be expected. It's the cleanup
494 : * hook's responsibility to not try to do that more than once, in the case
495 : * that failure occurs and then we come back to drop the portal again
496 : * during transaction abort.
497 : *
498 : * Note: in most paths of control, this will have been done already in
499 : * MarkPortalDone or MarkPortalFailed. We're just making sure.
500 : */
501 464301 : if (portal->cleanup)
502 : {
503 210610 : portal->cleanup(portal);
504 210610 : portal->cleanup = NULL;
505 : }
506 :
507 : /* There shouldn't be an active snapshot anymore, except after error */
508 : Assert(portal->portalSnapshot == NULL || !isTopCommit);
509 :
510 : /*
511 : * Remove portal from hash table. Because we do this here, we will not
512 : * come back to try to remove the portal again if there's any error in the
513 : * subsequent steps. Better to leak a little memory than to get into an
514 : * infinite error-recovery loop.
515 : */
516 464301 : PortalHashTableDelete(portal);
517 :
518 : /* drop cached plan reference, if any */
519 464301 : PortalReleaseCachedPlan(portal);
520 :
521 : /*
522 : * If portal has a snapshot protecting its data, release that. This needs
523 : * a little care since the registration will be attached to the portal's
524 : * resowner; if the portal failed, we will already have released the
525 : * resowner (and the snapshot) during transaction abort.
526 : */
527 464301 : if (portal->holdSnapshot)
528 : {
529 26868 : if (portal->resowner)
530 26578 : UnregisterSnapshotFromOwner(portal->holdSnapshot,
531 : portal->resowner);
532 26868 : portal->holdSnapshot = NULL;
533 : }
534 :
535 : /*
536 : * Release any resources still attached to the portal. There are several
537 : * cases being covered here:
538 : *
539 : * Top transaction commit (indicated by isTopCommit): normally we should
540 : * do nothing here and let the regular end-of-transaction resource
541 : * releasing mechanism handle these resources too. However, if we have a
542 : * FAILED portal (eg, a cursor that got an error), we'd better clean up
543 : * its resources to avoid resource-leakage warning messages.
544 : *
545 : * Sub transaction commit: never comes here at all, since we don't kill
546 : * any portals in AtSubCommit_Portals().
547 : *
548 : * Main or sub transaction abort: we will do nothing here because
549 : * portal->resowner was already set NULL; the resources were already
550 : * cleaned up in transaction abort.
551 : *
552 : * Ordinary portal drop: must release resources. However, if the portal
553 : * is not FAILED then we do not release its locks. The locks become the
554 : * responsibility of the transaction's ResourceOwner (since it is the
555 : * parent of the portal's owner) and will be released when the transaction
556 : * eventually ends.
557 : */
558 464301 : if (portal->resowner &&
559 435823 : (!isTopCommit || portal->status == PORTAL_FAILED))
560 : {
561 430164 : bool isCommit = (portal->status != PORTAL_FAILED);
562 :
563 430164 : ResourceOwnerRelease(portal->resowner,
564 : RESOURCE_RELEASE_BEFORE_LOCKS,
565 : isCommit, false);
566 430164 : ResourceOwnerRelease(portal->resowner,
567 : RESOURCE_RELEASE_LOCKS,
568 : isCommit, false);
569 430164 : ResourceOwnerRelease(portal->resowner,
570 : RESOURCE_RELEASE_AFTER_LOCKS,
571 : isCommit, false);
572 430164 : ResourceOwnerDelete(portal->resowner);
573 : }
574 464301 : portal->resowner = NULL;
575 :
576 : /*
577 : * Delete tuplestore if present. We should do this even under error
578 : * conditions; since the tuplestore would have been using cross-
579 : * transaction storage, its temp files need to be explicitly deleted.
580 : */
581 464301 : if (portal->holdStore)
582 : {
583 : MemoryContext oldcontext;
584 :
585 30697 : oldcontext = MemoryContextSwitchTo(portal->holdContext);
586 30697 : tuplestore_end(portal->holdStore);
587 30697 : MemoryContextSwitchTo(oldcontext);
588 30697 : portal->holdStore = NULL;
589 : }
590 :
591 : /* delete tuplestore storage, if any */
592 464301 : if (portal->holdContext)
593 30697 : MemoryContextDelete(portal->holdContext);
594 :
595 : /* release subsidiary storage */
596 464301 : MemoryContextDelete(portal->portalContext);
597 :
598 : /* release portal struct (it's in TopPortalContext) */
599 464301 : pfree(portal);
600 464301 : }
601 :
602 : /*
603 : * Delete all declared cursors.
604 : *
605 : * Used by commands: CLOSE ALL, DISCARD ALL
606 : */
607 : void
608 12 : PortalHashTableDeleteAll(void)
609 : {
610 : HASH_SEQ_STATUS status;
611 : PortalHashEnt *hentry;
612 :
613 12 : if (PortalHashTable == NULL)
614 0 : return;
615 :
616 12 : hash_seq_init(&status, PortalHashTable);
617 48 : while ((hentry = hash_seq_search(&status)) != NULL)
618 : {
619 36 : Portal portal = hentry->portal;
620 :
621 : /* Can't close the active portal (the one running the command) */
622 36 : if (portal->status == PORTAL_ACTIVE)
623 20 : continue;
624 :
625 16 : PortalDrop(portal, false);
626 :
627 : /* Restart the iteration in case that led to other drops */
628 16 : hash_seq_term(&status);
629 16 : hash_seq_init(&status, PortalHashTable);
630 : }
631 : }
632 :
633 : /*
634 : * "Hold" a portal. Prepare it for access by later transactions.
635 : */
636 : static void
637 49 : HoldPortal(Portal portal)
638 : {
639 : /*
640 : * Note that PersistHoldablePortal() must release all resources used by
641 : * the portal that are local to the creating transaction.
642 : */
643 49 : PortalCreateHoldStore(portal);
644 49 : PersistHoldablePortal(portal);
645 :
646 : /* drop cached plan reference, if any */
647 47 : PortalReleaseCachedPlan(portal);
648 :
649 : /*
650 : * Any resources belonging to the portal will be released in the upcoming
651 : * transaction-wide cleanup; the portal will no longer have its own
652 : * resources.
653 : */
654 47 : portal->resowner = NULL;
655 :
656 : /*
657 : * Having successfully exported the holdable cursor, mark it as not
658 : * belonging to this transaction.
659 : */
660 47 : portal->createSubid = InvalidSubTransactionId;
661 47 : portal->activeSubid = InvalidSubTransactionId;
662 47 : portal->createLevel = 0;
663 47 : }
664 :
665 : /*
666 : * Pre-commit processing for portals.
667 : *
668 : * Holdable cursors created in this transaction need to be converted to
669 : * materialized form, since we are going to close down the executor and
670 : * release locks. Non-holdable portals created in this transaction are
671 : * simply removed. Portals remaining from prior transactions should be
672 : * left untouched.
673 : *
674 : * Returns true if any portals changed state (possibly causing user-defined
675 : * code to be run), false if not.
676 : */
677 : bool
678 586004 : PreCommit_Portals(bool isPrepare)
679 : {
680 586004 : bool result = false;
681 : HASH_SEQ_STATUS status;
682 : PortalHashEnt *hentry;
683 :
684 586004 : hash_seq_init(&status, PortalHashTable);
685 :
686 628133 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
687 : {
688 42129 : Portal portal = hentry->portal;
689 :
690 : /*
691 : * There should be no pinned portals anymore. Complain if someone
692 : * leaked one. Auto-held portals are allowed; we assume that whoever
693 : * pinned them is managing them.
694 : */
695 42129 : if (portal->portalPinned && !portal->autoHeld)
696 0 : elog(ERROR, "cannot commit while a portal is pinned");
697 :
698 : /*
699 : * Do not touch active portals --- this can only happen in the case of
700 : * a multi-transaction utility command, such as VACUUM, or a commit in
701 : * a procedure.
702 : *
703 : * Note however that any resource owner attached to such a portal is
704 : * still going to go away, so don't leave a dangling pointer. Also
705 : * unregister any snapshots held by the portal, mainly to avoid
706 : * snapshot leak warnings from ResourceOwnerRelease().
707 : */
708 42129 : if (portal->status == PORTAL_ACTIVE)
709 : {
710 36075 : if (portal->holdSnapshot)
711 : {
712 1 : if (portal->resowner)
713 1 : UnregisterSnapshotFromOwner(portal->holdSnapshot,
714 : portal->resowner);
715 1 : portal->holdSnapshot = NULL;
716 : }
717 36075 : portal->resowner = NULL;
718 : /* Clear portalSnapshot too, for cleanliness */
719 36075 : portal->portalSnapshot = NULL;
720 36075 : continue;
721 : }
722 :
723 : /* Is it a holdable portal created in the current xact? */
724 6054 : if ((portal->cursorOptions & CURSOR_OPT_HOLD) &&
725 329 : portal->createSubid != InvalidSubTransactionId &&
726 31 : portal->status == PORTAL_READY)
727 : {
728 : /*
729 : * We are exiting the transaction that created a holdable cursor.
730 : * Instead of dropping the portal, prepare it for access by later
731 : * transactions.
732 : *
733 : * However, if this is PREPARE TRANSACTION rather than COMMIT,
734 : * refuse PREPARE, because the semantics seem pretty unclear.
735 : */
736 31 : if (isPrepare)
737 0 : ereport(ERROR,
738 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
739 : errmsg("cannot PREPARE a transaction that has created a cursor WITH HOLD")));
740 :
741 31 : HoldPortal(portal);
742 :
743 : /* Report we changed state */
744 31 : result = true;
745 : }
746 6023 : else if (portal->createSubid == InvalidSubTransactionId)
747 : {
748 : /*
749 : * Do nothing to cursors held over from a previous transaction
750 : * (including ones we just froze in a previous cycle of this loop)
751 : */
752 340 : continue;
753 : }
754 : else
755 : {
756 : /* Zap all non-holdable portals */
757 5683 : PortalDrop(portal, true);
758 :
759 : /* Report we changed state */
760 5683 : result = true;
761 : }
762 :
763 : /*
764 : * After either freezing or dropping a portal, we have to restart the
765 : * iteration, because we could have invoked user-defined code that
766 : * caused a drop of the next portal in the hash chain.
767 : */
768 5714 : hash_seq_term(&status);
769 5714 : hash_seq_init(&status, PortalHashTable);
770 : }
771 :
772 586004 : return result;
773 : }
774 :
775 : /*
776 : * Abort processing for portals.
777 : *
778 : * At this point we run the cleanup hook if present, but we can't release the
779 : * portal's memory until the cleanup call.
780 : */
781 : void
782 34476 : AtAbort_Portals(void)
783 : {
784 : HASH_SEQ_STATUS status;
785 : PortalHashEnt *hentry;
786 :
787 34476 : hash_seq_init(&status, PortalHashTable);
788 :
789 55327 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
790 : {
791 20851 : Portal portal = hentry->portal;
792 :
793 : /*
794 : * When elog(FATAL) is progress, we need to set the active portal to
795 : * failed, so that PortalCleanup() doesn't run the executor shutdown.
796 : */
797 20851 : if (portal->status == PORTAL_ACTIVE && shmem_exit_inprogress)
798 4 : MarkPortalFailed(portal);
799 :
800 : /*
801 : * Do nothing else to cursors held over from a previous transaction.
802 : */
803 20851 : if (portal->createSubid == InvalidSubTransactionId)
804 83 : continue;
805 :
806 : /*
807 : * Do nothing to auto-held cursors. This is similar to the case of a
808 : * cursor from a previous transaction, but it could also be that the
809 : * cursor was auto-held in this transaction, so it wants to live on.
810 : */
811 20768 : if (portal->autoHeld)
812 0 : continue;
813 :
814 : /*
815 : * If it was created in the current transaction, we can't do normal
816 : * shutdown on a READY portal either; it might refer to objects
817 : * created in the failed transaction. See comments in
818 : * AtSubAbort_Portals.
819 : */
820 20768 : if (portal->status == PORTAL_READY)
821 635 : MarkPortalFailed(portal);
822 :
823 : /*
824 : * Allow portalcmds.c to clean up the state it knows about, if we
825 : * haven't already.
826 : */
827 20768 : if (portal->cleanup)
828 : {
829 88 : portal->cleanup(portal);
830 88 : portal->cleanup = NULL;
831 : }
832 :
833 : /* drop cached plan reference, if any */
834 20768 : PortalReleaseCachedPlan(portal);
835 :
836 : /*
837 : * Any resources belonging to the portal will be released in the
838 : * upcoming transaction-wide cleanup; they will be gone before we run
839 : * PortalDrop.
840 : */
841 20768 : portal->resowner = NULL;
842 :
843 : /*
844 : * Although we can't delete the portal data structure proper, we can
845 : * release any memory in subsidiary contexts, such as executor state.
846 : * The cleanup hook was the last thing that might have needed data
847 : * there. But leave active portals alone.
848 : */
849 20768 : if (portal->status != PORTAL_ACTIVE)
850 20669 : MemoryContextDeleteChildren(portal->portalContext);
851 : }
852 34476 : }
853 :
854 : /*
855 : * Post-abort cleanup for portals.
856 : *
857 : * Delete all portals not held over from prior transactions.
858 : */
859 : void
860 34462 : AtCleanup_Portals(void)
861 : {
862 : HASH_SEQ_STATUS status;
863 : PortalHashEnt *hentry;
864 :
865 34462 : hash_seq_init(&status, PortalHashTable);
866 :
867 54507 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
868 : {
869 20045 : Portal portal = hentry->portal;
870 :
871 : /*
872 : * Do not touch active portals --- this can only happen in the case of
873 : * a multi-transaction command.
874 : */
875 20045 : if (portal->status == PORTAL_ACTIVE)
876 99 : continue;
877 :
878 : /*
879 : * Do nothing to cursors held over from a previous transaction or
880 : * auto-held ones.
881 : */
882 19946 : if (portal->createSubid == InvalidSubTransactionId || portal->autoHeld)
883 : {
884 : Assert(portal->status != PORTAL_ACTIVE);
885 : Assert(portal->resowner == NULL);
886 83 : continue;
887 : }
888 :
889 : /*
890 : * If a portal is still pinned, forcibly unpin it. PortalDrop will not
891 : * let us drop the portal otherwise. Whoever pinned the portal was
892 : * interrupted by the abort too and won't try to use it anymore.
893 : */
894 19863 : if (portal->portalPinned)
895 23 : portal->portalPinned = false;
896 :
897 : /*
898 : * We had better not call any user-defined code during cleanup, so if
899 : * the cleanup hook hasn't been run yet, too bad; we'll just skip it.
900 : */
901 19863 : if (portal->cleanup)
902 : {
903 0 : elog(WARNING, "skipping cleanup for portal \"%s\"", portal->name);
904 0 : portal->cleanup = NULL;
905 : }
906 :
907 : /* Zap it. */
908 19863 : PortalDrop(portal, false);
909 : }
910 34462 : }
911 :
912 : /*
913 : * Portal-related cleanup when we return to the main loop on error.
914 : *
915 : * This is different from the cleanup at transaction abort. Auto-held portals
916 : * are cleaned up on error but not on transaction abort.
917 : */
918 : void
919 30399 : PortalErrorCleanup(void)
920 : {
921 : HASH_SEQ_STATUS status;
922 : PortalHashEnt *hentry;
923 :
924 30399 : hash_seq_init(&status, PortalHashTable);
925 :
926 62757 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
927 : {
928 1959 : Portal portal = hentry->portal;
929 :
930 1959 : if (portal->autoHeld)
931 : {
932 2 : portal->portalPinned = false;
933 2 : PortalDrop(portal, false);
934 : }
935 : }
936 30399 : }
937 :
938 : /*
939 : * Pre-subcommit processing for portals.
940 : *
941 : * Reassign portals created or used in the current subtransaction to the
942 : * parent subtransaction.
943 : */
944 : void
945 5868 : AtSubCommit_Portals(SubTransactionId mySubid,
946 : SubTransactionId parentSubid,
947 : int parentLevel,
948 : ResourceOwner parentXactOwner)
949 : {
950 : HASH_SEQ_STATUS status;
951 : PortalHashEnt *hentry;
952 :
953 5868 : hash_seq_init(&status, PortalHashTable);
954 :
955 16886 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
956 : {
957 5150 : Portal portal = hentry->portal;
958 :
959 5150 : if (portal->createSubid == mySubid)
960 : {
961 30 : portal->createSubid = parentSubid;
962 30 : portal->createLevel = parentLevel;
963 30 : if (portal->resowner)
964 30 : ResourceOwnerNewParent(portal->resowner, parentXactOwner);
965 : }
966 5150 : if (portal->activeSubid == mySubid)
967 111 : portal->activeSubid = parentSubid;
968 : }
969 5868 : }
970 :
971 : /*
972 : * Subtransaction abort handling for portals.
973 : *
974 : * Deactivate portals created or used during the failed subtransaction.
975 : * Note that per AtSubCommit_Portals, this will catch portals created/used
976 : * in descendants of the subtransaction too.
977 : *
978 : * We don't destroy any portals here; that's done in AtSubCleanup_Portals.
979 : */
980 : void
981 5949 : AtSubAbort_Portals(SubTransactionId mySubid,
982 : SubTransactionId parentSubid,
983 : ResourceOwner myXactOwner,
984 : ResourceOwner parentXactOwner)
985 : {
986 : HASH_SEQ_STATUS status;
987 : PortalHashEnt *hentry;
988 :
989 5949 : hash_seq_init(&status, PortalHashTable);
990 :
991 14696 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
992 : {
993 8747 : Portal portal = hentry->portal;
994 :
995 : /* Was it created in this subtransaction? */
996 8747 : if (portal->createSubid != mySubid)
997 : {
998 : /* No, but maybe it was used in this subtransaction? */
999 7880 : if (portal->activeSubid == mySubid)
1000 : {
1001 : /* Maintain activeSubid until the portal is removed */
1002 33 : portal->activeSubid = parentSubid;
1003 :
1004 : /*
1005 : * A MarkPortalActive() caller ran an upper-level portal in
1006 : * this subtransaction and left the portal ACTIVE. This can't
1007 : * happen, but force the portal into FAILED state for the same
1008 : * reasons discussed below.
1009 : *
1010 : * We assume we can get away without forcing upper-level READY
1011 : * portals to fail, even if they were run and then suspended.
1012 : * In theory a suspended upper-level portal could have
1013 : * acquired some references to objects that are about to be
1014 : * destroyed, but there should be sufficient defenses against
1015 : * such cases: the portal's original query cannot contain such
1016 : * references, and any references within, say, cached plans of
1017 : * PL/pgSQL functions are not from active queries and should
1018 : * be protected by revalidation logic.
1019 : */
1020 33 : if (portal->status == PORTAL_ACTIVE)
1021 0 : MarkPortalFailed(portal);
1022 :
1023 : /*
1024 : * Also, if we failed it during the current subtransaction
1025 : * (either just above, or earlier), reattach its resource
1026 : * owner to the current subtransaction's resource owner, so
1027 : * that any resources it still holds will be released while
1028 : * cleaning up this subtransaction. This prevents some corner
1029 : * cases wherein we might get Asserts or worse while cleaning
1030 : * up objects created during the current subtransaction
1031 : * (because they're still referenced within this portal).
1032 : */
1033 33 : if (portal->status == PORTAL_FAILED && portal->resowner)
1034 : {
1035 9 : ResourceOwnerNewParent(portal->resowner, myXactOwner);
1036 9 : portal->resowner = NULL;
1037 : }
1038 : }
1039 : /* Done if it wasn't created in this subtransaction */
1040 7880 : continue;
1041 : }
1042 :
1043 : /*
1044 : * Force any live portals of my own subtransaction into FAILED state.
1045 : * We have to do this because they might refer to objects created or
1046 : * changed in the failed subtransaction, leading to crashes within
1047 : * ExecutorEnd when portalcmds.c tries to close down the portal.
1048 : * Currently, every MarkPortalActive() caller ensures it updates the
1049 : * portal status again before relinquishing control, so ACTIVE can't
1050 : * happen here. If it does happen, dispose the portal like existing
1051 : * MarkPortalActive() callers would.
1052 : */
1053 867 : if (portal->status == PORTAL_READY ||
1054 164 : portal->status == PORTAL_ACTIVE)
1055 703 : MarkPortalFailed(portal);
1056 :
1057 : /*
1058 : * Allow portalcmds.c to clean up the state it knows about, if we
1059 : * haven't already.
1060 : */
1061 867 : if (portal->cleanup)
1062 : {
1063 0 : portal->cleanup(portal);
1064 0 : portal->cleanup = NULL;
1065 : }
1066 :
1067 : /* drop cached plan reference, if any */
1068 867 : PortalReleaseCachedPlan(portal);
1069 :
1070 : /*
1071 : * Any resources belonging to the portal will be released in the
1072 : * upcoming transaction-wide cleanup; they will be gone before we run
1073 : * PortalDrop.
1074 : */
1075 867 : portal->resowner = NULL;
1076 :
1077 : /*
1078 : * Although we can't delete the portal data structure proper, we can
1079 : * release any memory in subsidiary contexts, such as executor state.
1080 : * The cleanup hook was the last thing that might have needed data
1081 : * there.
1082 : */
1083 867 : MemoryContextDeleteChildren(portal->portalContext);
1084 : }
1085 5949 : }
1086 :
1087 : /*
1088 : * Post-subabort cleanup for portals.
1089 : *
1090 : * Drop all portals created in the failed subtransaction (but note that
1091 : * we will not drop any that were reassigned to the parent above).
1092 : */
1093 : void
1094 5949 : AtSubCleanup_Portals(SubTransactionId mySubid)
1095 : {
1096 : HASH_SEQ_STATUS status;
1097 : PortalHashEnt *hentry;
1098 :
1099 5949 : hash_seq_init(&status, PortalHashTable);
1100 :
1101 14537 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1102 : {
1103 8588 : Portal portal = hentry->portal;
1104 :
1105 8588 : if (portal->createSubid != mySubid)
1106 7880 : continue;
1107 :
1108 : /*
1109 : * If a portal is still pinned, forcibly unpin it. PortalDrop will not
1110 : * let us drop the portal otherwise. Whoever pinned the portal was
1111 : * interrupted by the abort too and won't try to use it anymore.
1112 : */
1113 708 : if (portal->portalPinned)
1114 3 : portal->portalPinned = false;
1115 :
1116 : /*
1117 : * We had better not call any user-defined code during cleanup, so if
1118 : * the cleanup hook hasn't been run yet, too bad; we'll just skip it.
1119 : */
1120 708 : if (portal->cleanup)
1121 : {
1122 0 : elog(WARNING, "skipping cleanup for portal \"%s\"", portal->name);
1123 0 : portal->cleanup = NULL;
1124 : }
1125 :
1126 : /* Zap it. */
1127 708 : PortalDrop(portal, false);
1128 : }
1129 5949 : }
1130 :
1131 : /* Find all available cursors */
1132 : Datum
1133 75 : pg_cursor(PG_FUNCTION_ARGS)
1134 : {
1135 75 : ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
1136 : HASH_SEQ_STATUS hash_seq;
1137 : PortalHashEnt *hentry;
1138 :
1139 : /*
1140 : * We put all the tuples into a tuplestore in one scan of the hashtable.
1141 : * This avoids any issue of the hashtable possibly changing between calls.
1142 : */
1143 75 : InitMaterializedSRF(fcinfo, 0);
1144 :
1145 75 : hash_seq_init(&hash_seq, PortalHashTable);
1146 238 : while ((hentry = hash_seq_search(&hash_seq)) != NULL)
1147 : {
1148 163 : Portal portal = hentry->portal;
1149 : Datum values[6];
1150 163 : bool nulls[6] = {0};
1151 :
1152 : /* report only "visible" entries */
1153 163 : if (!portal->visible)
1154 79 : continue;
1155 : /* also ignore it if PortalDefineQuery hasn't been called yet */
1156 84 : if (!portal->sourceText)
1157 0 : continue;
1158 :
1159 84 : values[0] = CStringGetTextDatum(portal->name);
1160 84 : values[1] = CStringGetTextDatum(portal->sourceText);
1161 84 : values[2] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_HOLD);
1162 84 : values[3] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_BINARY);
1163 84 : values[4] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_SCROLL);
1164 84 : values[5] = TimestampTzGetDatum(portal->creation_time);
1165 :
1166 84 : tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls);
1167 : }
1168 :
1169 75 : return (Datum) 0;
1170 : }
1171 :
1172 : bool
1173 37 : ThereAreNoReadyPortals(void)
1174 : {
1175 : HASH_SEQ_STATUS status;
1176 : PortalHashEnt *hentry;
1177 :
1178 37 : hash_seq_init(&status, PortalHashTable);
1179 :
1180 74 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1181 : {
1182 37 : Portal portal = hentry->portal;
1183 :
1184 37 : if (portal->status == PORTAL_READY)
1185 0 : return false;
1186 : }
1187 :
1188 37 : return true;
1189 : }
1190 :
1191 : /*
1192 : * Hold all pinned portals.
1193 : *
1194 : * When initiating a COMMIT or ROLLBACK inside a procedure, this must be
1195 : * called to protect internally-generated cursors from being dropped during
1196 : * the transaction shutdown. Currently, SPI calls this automatically; PLs
1197 : * that initiate COMMIT or ROLLBACK some other way are on the hook to do it
1198 : * themselves. (Note that we couldn't do this in, say, AtAbort_Portals
1199 : * because we need to run user-defined code while persisting a portal.
1200 : * It's too late to do that once transaction abort has started.)
1201 : *
1202 : * We protect such portals by converting them to held cursors. We mark them
1203 : * as "auto-held" so that exception exit knows to clean them up. (In normal,
1204 : * non-exception code paths, the PL needs to clean such portals itself, since
1205 : * transaction end won't do it anymore; but that should be normal practice
1206 : * anyway.)
1207 : */
1208 : void
1209 2216 : HoldPinnedPortals(void)
1210 : {
1211 : HASH_SEQ_STATUS status;
1212 : PortalHashEnt *hentry;
1213 :
1214 2216 : hash_seq_init(&status, PortalHashTable);
1215 :
1216 4492 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1217 : {
1218 2279 : Portal portal = hentry->portal;
1219 :
1220 2279 : if (portal->portalPinned && !portal->autoHeld)
1221 : {
1222 : /*
1223 : * Doing transaction control, especially abort, inside a cursor
1224 : * loop that is not read-only, for example using UPDATE ...
1225 : * RETURNING, has weird semantics issues. Also, this
1226 : * implementation wouldn't work, because such portals cannot be
1227 : * held. (The core grammar enforces that only SELECT statements
1228 : * can drive a cursor, but for example PL/pgSQL does not restrict
1229 : * it.)
1230 : */
1231 19 : if (portal->strategy != PORTAL_ONE_SELECT)
1232 1 : ereport(ERROR,
1233 : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1234 : errmsg("cannot perform transaction commands inside a cursor loop that is not read-only")));
1235 :
1236 : /* Verify it's in a suitable state to be held */
1237 18 : if (portal->status != PORTAL_READY)
1238 0 : elog(ERROR, "pinned portal is not ready to be auto-held");
1239 :
1240 18 : HoldPortal(portal);
1241 16 : portal->autoHeld = true;
1242 : }
1243 : }
1244 2213 : }
1245 :
1246 : /*
1247 : * Drop the outer active snapshots for all portals, so that no snapshots
1248 : * remain active.
1249 : *
1250 : * Like HoldPinnedPortals, this must be called when initiating a COMMIT or
1251 : * ROLLBACK inside a procedure. This has to be separate from that since it
1252 : * should not be run until we're done with steps that are likely to fail.
1253 : *
1254 : * It's tempting to fold this into PreCommit_Portals, but to do so, we'd
1255 : * need to clean up snapshot management in VACUUM and perhaps other places.
1256 : */
1257 : void
1258 2213 : ForgetPortalSnapshots(void)
1259 : {
1260 : HASH_SEQ_STATUS status;
1261 : PortalHashEnt *hentry;
1262 2213 : int numPortalSnaps = 0;
1263 2213 : int numActiveSnaps = 0;
1264 :
1265 : /* First, scan PortalHashTable and clear portalSnapshot fields */
1266 2213 : hash_seq_init(&status, PortalHashTable);
1267 :
1268 6702 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1269 : {
1270 2276 : Portal portal = hentry->portal;
1271 :
1272 2276 : if (portal->portalSnapshot != NULL)
1273 : {
1274 2213 : portal->portalSnapshot = NULL;
1275 2213 : numPortalSnaps++;
1276 : }
1277 : /* portal->holdSnapshot will be cleaned up in PreCommit_Portals */
1278 : }
1279 :
1280 : /*
1281 : * Now, pop all the active snapshots, which should be just those that were
1282 : * portal snapshots. Ideally we'd drive this directly off the portal
1283 : * scan, but there's no good way to visit the portals in the correct
1284 : * order. So just cross-check after the fact.
1285 : */
1286 4426 : while (ActiveSnapshotSet())
1287 : {
1288 2213 : PopActiveSnapshot();
1289 2213 : numActiveSnaps++;
1290 : }
1291 :
1292 2213 : if (numPortalSnaps != numActiveSnaps)
1293 0 : elog(ERROR, "portal snapshots (%d) did not account for all active snapshots (%d)",
1294 : numPortalSnaps, numActiveSnaps);
1295 2213 : }
|