Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * portalmem.c
4 : * backend portal memory management
5 : *
6 : * Portals are objects representing the execution state of a query.
7 : * This module provides memory management services for portals, but it
8 : * doesn't actually run the executor for them.
9 : *
10 : *
11 : * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
12 : * Portions Copyright (c) 1994, Regents of the University of California
13 : *
14 : * IDENTIFICATION
15 : * src/backend/utils/mmgr/portalmem.c
16 : *
17 : *-------------------------------------------------------------------------
18 : */
19 : #include "postgres.h"
20 :
21 : #include "access/xact.h"
22 : #include "catalog/pg_type.h"
23 : #include "commands/portalcmds.h"
24 : #include "miscadmin.h"
25 : #include "storage/ipc.h"
26 : #include "utils/builtins.h"
27 : #include "utils/memutils.h"
28 : #include "utils/snapmgr.h"
29 : #include "utils/timestamp.h"
30 :
31 : /*
32 : * Estimate of the maximum number of open portals a user would have,
33 : * used in initially sizing the PortalHashTable in EnablePortalManager().
34 : * Since the hash table can expand, there's no need to make this overly
35 : * generous, and keeping it small avoids unnecessary overhead in the
36 : * hash_seq_search() calls executed during transaction end.
37 : */
38 : #define PORTALS_PER_USER 16
39 :
40 :
41 : /* ----------------
42 : * Global state
43 : * ----------------
44 : */
45 :
46 : #define MAX_PORTALNAME_LEN NAMEDATALEN
47 :
48 : typedef struct portalhashent
49 : {
50 : char portalname[MAX_PORTALNAME_LEN];
51 : Portal portal;
52 : } PortalHashEnt;
53 :
54 : static HTAB *PortalHashTable = NULL;
55 :
56 : #define PortalHashTableLookup(NAME, PORTAL) \
57 : do { \
58 : PortalHashEnt *hentry; \
59 : \
60 : hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
61 : (NAME), HASH_FIND, NULL); \
62 : if (hentry) \
63 : PORTAL = hentry->portal; \
64 : else \
65 : PORTAL = NULL; \
66 : } while(0)
67 :
68 : #define PortalHashTableInsert(PORTAL, NAME) \
69 : do { \
70 : PortalHashEnt *hentry; bool found; \
71 : \
72 : hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
73 : (NAME), HASH_ENTER, &found); \
74 : if (found) \
75 : elog(ERROR, "duplicate portal name"); \
76 : hentry->portal = PORTAL; \
77 : /* To avoid duplicate storage, make PORTAL->name point to htab entry */ \
78 : PORTAL->name = hentry->portalname; \
79 : } while(0)
80 :
81 : #define PortalHashTableDelete(PORTAL) \
82 : do { \
83 : PortalHashEnt *hentry; \
84 : \
85 : hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
86 : PORTAL->name, HASH_REMOVE, NULL); \
87 : if (hentry == NULL) \
88 : elog(WARNING, "trying to delete portal name that does not exist"); \
89 : } while(0)
90 :
91 : static MemoryContext TopPortalContext = NULL;
92 :
93 :
94 : /* ----------------------------------------------------------------
95 : * public portal interface functions
96 : * ----------------------------------------------------------------
97 : */
98 :
99 : /*
100 : * EnablePortalManager
101 : * Enables the portal management module at backend startup.
102 : */
103 : void
104 12528 : EnablePortalManager(void)
105 : {
106 : HASHCTL ctl;
107 :
108 : Assert(TopPortalContext == NULL);
109 :
110 12528 : TopPortalContext = AllocSetContextCreate(TopMemoryContext,
111 : "TopPortalContext",
112 : ALLOCSET_DEFAULT_SIZES);
113 :
114 12528 : ctl.keysize = MAX_PORTALNAME_LEN;
115 12528 : ctl.entrysize = sizeof(PortalHashEnt);
116 :
117 : /*
118 : * use PORTALS_PER_USER as a guess of how many hash table entries to
119 : * create, initially
120 : */
121 12528 : PortalHashTable = hash_create("Portal hash", PORTALS_PER_USER,
122 : &ctl, HASH_ELEM | HASH_STRINGS);
123 12528 : }
124 :
125 : /*
126 : * GetPortalByName
127 : * Returns a portal given a portal name, or NULL if name not found.
128 : */
129 : Portal
130 668752 : GetPortalByName(const char *name)
131 : {
132 : Portal portal;
133 :
134 668752 : if (PointerIsValid(name))
135 668752 : PortalHashTableLookup(name, portal);
136 : else
137 0 : portal = NULL;
138 :
139 668752 : return portal;
140 : }
141 :
142 : /*
143 : * PortalGetPrimaryStmt
144 : * Get the "primary" stmt within a portal, ie, the one marked canSetTag.
145 : *
146 : * Returns NULL if no such stmt. If multiple PlannedStmt structs within the
147 : * portal are marked canSetTag, returns the first one. Neither of these
148 : * cases should occur in present usages of this function.
149 : */
150 : PlannedStmt *
151 184066 : PortalGetPrimaryStmt(Portal portal)
152 : {
153 : ListCell *lc;
154 :
155 184066 : foreach(lc, portal->stmts)
156 : {
157 184066 : PlannedStmt *stmt = lfirst_node(PlannedStmt, lc);
158 :
159 184066 : if (stmt->canSetTag)
160 184066 : return stmt;
161 : }
162 0 : return NULL;
163 : }
164 :
165 : /*
166 : * CreatePortal
167 : * Returns a new portal given a name.
168 : *
169 : * allowDup: if true, automatically drop any pre-existing portal of the
170 : * same name (if false, an error is raised).
171 : *
172 : * dupSilent: if true, don't even emit a WARNING.
173 : */
174 : Portal
175 585974 : CreatePortal(const char *name, bool allowDup, bool dupSilent)
176 : {
177 : Portal portal;
178 :
179 : AssertArg(PointerIsValid(name));
180 :
181 585974 : portal = GetPortalByName(name);
182 585974 : if (PortalIsValid(portal))
183 : {
184 5964 : if (!allowDup)
185 0 : ereport(ERROR,
186 : (errcode(ERRCODE_DUPLICATE_CURSOR),
187 : errmsg("cursor \"%s\" already exists", name)));
188 5964 : if (!dupSilent)
189 0 : ereport(WARNING,
190 : (errcode(ERRCODE_DUPLICATE_CURSOR),
191 : errmsg("closing existing cursor \"%s\"",
192 : name)));
193 5964 : PortalDrop(portal, false);
194 : }
195 :
196 : /* make new portal structure */
197 585974 : portal = (Portal) MemoryContextAllocZero(TopPortalContext, sizeof *portal);
198 :
199 : /* initialize portal context; typically it won't store much */
200 585974 : portal->portalContext = AllocSetContextCreate(TopPortalContext,
201 : "PortalContext",
202 : ALLOCSET_SMALL_SIZES);
203 :
204 : /* create a resource owner for the portal */
205 585974 : portal->resowner = ResourceOwnerCreate(CurTransactionResourceOwner,
206 : "Portal");
207 :
208 : /* initialize portal fields that don't start off zero */
209 585974 : portal->status = PORTAL_NEW;
210 585974 : portal->cleanup = PortalCleanup;
211 585974 : portal->createSubid = GetCurrentSubTransactionId();
212 585974 : portal->activeSubid = portal->createSubid;
213 585974 : portal->strategy = PORTAL_MULTI_QUERY;
214 585974 : portal->cursorOptions = CURSOR_OPT_NO_SCROLL;
215 585974 : portal->atStart = true;
216 585974 : portal->atEnd = true; /* disallow fetches until query is set */
217 585974 : portal->visible = true;
218 585974 : portal->creation_time = GetCurrentStatementStartTimestamp();
219 :
220 : /* put portal in table (sets portal->name) */
221 585974 : PortalHashTableInsert(portal, name);
222 :
223 : /* for named portals reuse portal->name copy */
224 585974 : MemoryContextSetIdentifier(portal->portalContext, portal->name[0] ? portal->name : "<unnamed>");
225 :
226 585974 : return portal;
227 : }
228 :
229 : /*
230 : * CreateNewPortal
231 : * Create a new portal, assigning it a random nonconflicting name.
232 : */
233 : Portal
234 5350 : CreateNewPortal(void)
235 : {
236 : static unsigned int unnamed_portal_count = 0;
237 :
238 : char portalname[MAX_PORTALNAME_LEN];
239 :
240 : /* Select a nonconflicting name */
241 : for (;;)
242 : {
243 0 : unnamed_portal_count++;
244 5350 : sprintf(portalname, "<unnamed portal %u>", unnamed_portal_count);
245 5350 : if (GetPortalByName(portalname) == NULL)
246 5350 : break;
247 : }
248 :
249 5350 : return CreatePortal(portalname, false, false);
250 : }
251 :
252 : /*
253 : * PortalDefineQuery
254 : * A simple subroutine to establish a portal's query.
255 : *
256 : * Notes: as of PG 8.4, caller MUST supply a sourceText string; it is not
257 : * allowed anymore to pass NULL. (If you really don't have source text,
258 : * you can pass a constant string, perhaps "(query not available)".)
259 : *
260 : * commandTag shall be NULL if and only if the original query string
261 : * (before rewriting) was an empty string. Also, the passed commandTag must
262 : * be a pointer to a constant string, since it is not copied.
263 : *
264 : * If cplan is provided, then it is a cached plan containing the stmts, and
265 : * the caller must have done GetCachedPlan(), causing a refcount increment.
266 : * The refcount will be released when the portal is destroyed.
267 : *
268 : * If cplan is NULL, then it is the caller's responsibility to ensure that
269 : * the passed plan trees have adequate lifetime. Typically this is done by
270 : * copying them into the portal's context.
271 : *
272 : * The caller is also responsible for ensuring that the passed prepStmtName
273 : * (if not NULL) and sourceText have adequate lifetime.
274 : *
275 : * NB: this function mustn't do much beyond storing the passed values; in
276 : * particular don't do anything that risks elog(ERROR). If that were to
277 : * happen here before storing the cplan reference, we'd leak the plancache
278 : * refcount that the caller is trying to hand off to us.
279 : */
280 : void
281 585954 : PortalDefineQuery(Portal portal,
282 : const char *prepStmtName,
283 : const char *sourceText,
284 : CommandTag commandTag,
285 : List *stmts,
286 : CachedPlan *cplan)
287 : {
288 : AssertArg(PortalIsValid(portal));
289 : AssertState(portal->status == PORTAL_NEW);
290 :
291 : AssertArg(sourceText != NULL);
292 : AssertArg(commandTag != CMDTAG_UNKNOWN || stmts == NIL);
293 :
294 585954 : portal->prepStmtName = prepStmtName;
295 585954 : portal->sourceText = sourceText;
296 585954 : portal->qc.commandTag = commandTag;
297 585954 : portal->qc.nprocessed = 0;
298 585954 : portal->commandTag = commandTag;
299 585954 : portal->stmts = stmts;
300 585954 : portal->cplan = cplan;
301 585954 : portal->status = PORTAL_DEFINED;
302 585954 : }
303 :
304 : /*
305 : * PortalReleaseCachedPlan
306 : * Release a portal's reference to its cached plan, if any.
307 : */
308 : static void
309 599914 : PortalReleaseCachedPlan(Portal portal)
310 : {
311 599914 : if (portal->cplan)
312 : {
313 28836 : ReleaseCachedPlan(portal->cplan, false);
314 28836 : portal->cplan = NULL;
315 :
316 : /*
317 : * We must also clear portal->stmts which is now a dangling reference
318 : * to the cached plan's plan list. This protects any code that might
319 : * try to examine the Portal later.
320 : */
321 28836 : portal->stmts = NIL;
322 : }
323 599914 : }
324 :
325 : /*
326 : * PortalCreateHoldStore
327 : * Create the tuplestore for a portal.
328 : */
329 : void
330 17760 : PortalCreateHoldStore(Portal portal)
331 : {
332 : MemoryContext oldcxt;
333 :
334 : Assert(portal->holdContext == NULL);
335 : Assert(portal->holdStore == NULL);
336 : Assert(portal->holdSnapshot == NULL);
337 :
338 : /*
339 : * Create the memory context that is used for storage of the tuple set.
340 : * Note this is NOT a child of the portal's portalContext.
341 : */
342 17760 : portal->holdContext =
343 17760 : AllocSetContextCreate(TopPortalContext,
344 : "PortalHoldContext",
345 : ALLOCSET_DEFAULT_SIZES);
346 :
347 : /*
348 : * Create the tuple store, selecting cross-transaction temp files, and
349 : * enabling random access only if cursor requires scrolling.
350 : *
351 : * XXX: Should maintenance_work_mem be used for the portal size?
352 : */
353 17760 : oldcxt = MemoryContextSwitchTo(portal->holdContext);
354 :
355 17760 : portal->holdStore =
356 17760 : tuplestore_begin_heap(portal->cursorOptions & CURSOR_OPT_SCROLL,
357 : true, work_mem);
358 :
359 17760 : MemoryContextSwitchTo(oldcxt);
360 17760 : }
361 :
362 : /*
363 : * PinPortal
364 : * Protect a portal from dropping.
365 : *
366 : * A pinned portal is still unpinned and dropped at transaction or
367 : * subtransaction abort.
368 : */
369 : void
370 4428 : PinPortal(Portal portal)
371 : {
372 4428 : if (portal->portalPinned)
373 0 : elog(ERROR, "portal already pinned");
374 :
375 4428 : portal->portalPinned = true;
376 4428 : }
377 :
378 : void
379 4392 : UnpinPortal(Portal portal)
380 : {
381 4392 : if (!portal->portalPinned)
382 0 : elog(ERROR, "portal not pinned");
383 :
384 4392 : portal->portalPinned = false;
385 4392 : }
386 :
387 : /*
388 : * MarkPortalActive
389 : * Transition a portal from READY to ACTIVE state.
390 : *
391 : * NOTE: never set portal->status = PORTAL_ACTIVE directly; call this instead.
392 : */
393 : void
394 592634 : MarkPortalActive(Portal portal)
395 : {
396 : /* For safety, this is a runtime test not just an Assert */
397 592634 : if (portal->status != PORTAL_READY)
398 12 : ereport(ERROR,
399 : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
400 : errmsg("portal \"%s\" cannot be run", portal->name)));
401 : /* Perform the state transition */
402 592622 : portal->status = PORTAL_ACTIVE;
403 592622 : portal->activeSubid = GetCurrentSubTransactionId();
404 592622 : }
405 :
406 : /*
407 : * MarkPortalDone
408 : * Transition a portal from ACTIVE to DONE state.
409 : *
410 : * NOTE: never set portal->status = PORTAL_DONE directly; call this instead.
411 : */
412 : void
413 405866 : MarkPortalDone(Portal portal)
414 : {
415 : /* Perform the state transition */
416 : Assert(portal->status == PORTAL_ACTIVE);
417 405866 : portal->status = PORTAL_DONE;
418 :
419 : /*
420 : * Allow portalcmds.c to clean up the state it knows about. We might as
421 : * well do that now, since the portal can't be executed any more.
422 : *
423 : * In some cases involving execution of a ROLLBACK command in an already
424 : * aborted transaction, this is necessary, or we'd reach AtCleanup_Portals
425 : * with the cleanup hook still unexecuted.
426 : */
427 405866 : if (PointerIsValid(portal->cleanup))
428 : {
429 405830 : portal->cleanup(portal);
430 405830 : portal->cleanup = NULL;
431 : }
432 405866 : }
433 :
434 : /*
435 : * MarkPortalFailed
436 : * Transition a portal into FAILED state.
437 : *
438 : * NOTE: never set portal->status = PORTAL_FAILED directly; call this instead.
439 : */
440 : void
441 13732 : MarkPortalFailed(Portal portal)
442 : {
443 : /* Perform the state transition */
444 : Assert(portal->status != PORTAL_DONE);
445 13732 : portal->status = PORTAL_FAILED;
446 :
447 : /*
448 : * Allow portalcmds.c to clean up the state it knows about. We might as
449 : * well do that now, since the portal can't be executed any more.
450 : *
451 : * In some cases involving cleanup of an already aborted transaction, this
452 : * is necessary, or we'd reach AtCleanup_Portals with the cleanup hook
453 : * still unexecuted.
454 : */
455 13732 : if (PointerIsValid(portal->cleanup))
456 : {
457 13732 : portal->cleanup(portal);
458 13732 : portal->cleanup = NULL;
459 : }
460 13732 : }
461 :
462 : /*
463 : * PortalDrop
464 : * Destroy the portal.
465 : */
466 : void
467 585966 : PortalDrop(Portal portal, bool isTopCommit)
468 : {
469 : AssertArg(PortalIsValid(portal));
470 :
471 : /*
472 : * Don't allow dropping a pinned portal, it's still needed by whoever
473 : * pinned it.
474 : */
475 585966 : if (portal->portalPinned)
476 0 : ereport(ERROR,
477 : (errcode(ERRCODE_INVALID_CURSOR_STATE),
478 : errmsg("cannot drop pinned portal \"%s\"", portal->name)));
479 :
480 : /*
481 : * Not sure if the PORTAL_ACTIVE case can validly happen or not...
482 : */
483 585966 : if (portal->status == PORTAL_ACTIVE)
484 0 : ereport(ERROR,
485 : (errcode(ERRCODE_INVALID_CURSOR_STATE),
486 : errmsg("cannot drop active portal \"%s\"", portal->name)));
487 :
488 : /*
489 : * Allow portalcmds.c to clean up the state it knows about, in particular
490 : * shutting down the executor if still active. This step potentially runs
491 : * user-defined code so failure has to be expected. It's the cleanup
492 : * hook's responsibility to not try to do that more than once, in the case
493 : * that failure occurs and then we come back to drop the portal again
494 : * during transaction abort.
495 : *
496 : * Note: in most paths of control, this will have been done already in
497 : * MarkPortalDone or MarkPortalFailed. We're just making sure.
498 : */
499 585966 : if (PointerIsValid(portal->cleanup))
500 : {
501 166346 : portal->cleanup(portal);
502 166346 : portal->cleanup = NULL;
503 : }
504 :
505 : /*
506 : * Remove portal from hash table. Because we do this here, we will not
507 : * come back to try to remove the portal again if there's any error in the
508 : * subsequent steps. Better to leak a little memory than to get into an
509 : * infinite error-recovery loop.
510 : */
511 585966 : PortalHashTableDelete(portal);
512 :
513 : /* drop cached plan reference, if any */
514 585966 : PortalReleaseCachedPlan(portal);
515 :
516 : /*
517 : * If portal has a snapshot protecting its data, release that. This needs
518 : * a little care since the registration will be attached to the portal's
519 : * resowner; if the portal failed, we will already have released the
520 : * resowner (and the snapshot) during transaction abort.
521 : */
522 585966 : if (portal->holdSnapshot)
523 : {
524 12902 : if (portal->resowner)
525 12736 : UnregisterSnapshotFromOwner(portal->holdSnapshot,
526 : portal->resowner);
527 12902 : portal->holdSnapshot = NULL;
528 : }
529 :
530 : /*
531 : * Release any resources still attached to the portal. There are several
532 : * cases being covered here:
533 : *
534 : * Top transaction commit (indicated by isTopCommit): normally we should
535 : * do nothing here and let the regular end-of-transaction resource
536 : * releasing mechanism handle these resources too. However, if we have a
537 : * FAILED portal (eg, a cursor that got an error), we'd better clean up
538 : * its resources to avoid resource-leakage warning messages.
539 : *
540 : * Sub transaction commit: never comes here at all, since we don't kill
541 : * any portals in AtSubCommit_Portals().
542 : *
543 : * Main or sub transaction abort: we will do nothing here because
544 : * portal->resowner was already set NULL; the resources were already
545 : * cleaned up in transaction abort.
546 : *
547 : * Ordinary portal drop: must release resources. However, if the portal
548 : * is not FAILED then we do not release its locks. The locks become the
549 : * responsibility of the transaction's ResourceOwner (since it is the
550 : * parent of the portal's owner) and will be released when the transaction
551 : * eventually ends.
552 : */
553 585966 : if (portal->resowner &&
554 566732 : (!isTopCommit || portal->status == PORTAL_FAILED))
555 : {
556 545106 : bool isCommit = (portal->status != PORTAL_FAILED);
557 :
558 545106 : ResourceOwnerRelease(portal->resowner,
559 : RESOURCE_RELEASE_BEFORE_LOCKS,
560 : isCommit, false);
561 545106 : ResourceOwnerRelease(portal->resowner,
562 : RESOURCE_RELEASE_LOCKS,
563 : isCommit, false);
564 545106 : ResourceOwnerRelease(portal->resowner,
565 : RESOURCE_RELEASE_AFTER_LOCKS,
566 : isCommit, false);
567 545106 : ResourceOwnerDelete(portal->resowner);
568 : }
569 585966 : portal->resowner = NULL;
570 :
571 : /*
572 : * Delete tuplestore if present. We should do this even under error
573 : * conditions; since the tuplestore would have been using cross-
574 : * transaction storage, its temp files need to be explicitly deleted.
575 : */
576 585966 : if (portal->holdStore)
577 : {
578 : MemoryContext oldcontext;
579 :
580 17752 : oldcontext = MemoryContextSwitchTo(portal->holdContext);
581 17752 : tuplestore_end(portal->holdStore);
582 17752 : MemoryContextSwitchTo(oldcontext);
583 17752 : portal->holdStore = NULL;
584 : }
585 :
586 : /* delete tuplestore storage, if any */
587 585966 : if (portal->holdContext)
588 17752 : MemoryContextDelete(portal->holdContext);
589 :
590 : /* release subsidiary storage */
591 585966 : MemoryContextDelete(portal->portalContext);
592 :
593 : /* release portal struct (it's in TopPortalContext) */
594 585966 : pfree(portal);
595 585966 : }
596 :
597 : /*
598 : * Delete all declared cursors.
599 : *
600 : * Used by commands: CLOSE ALL, DISCARD ALL
601 : */
602 : void
603 12 : PortalHashTableDeleteAll(void)
604 : {
605 : HASH_SEQ_STATUS status;
606 : PortalHashEnt *hentry;
607 :
608 12 : if (PortalHashTable == NULL)
609 0 : return;
610 :
611 12 : hash_seq_init(&status, PortalHashTable);
612 48 : while ((hentry = hash_seq_search(&status)) != NULL)
613 : {
614 36 : Portal portal = hentry->portal;
615 :
616 : /* Can't close the active portal (the one running the command) */
617 36 : if (portal->status == PORTAL_ACTIVE)
618 20 : continue;
619 :
620 16 : PortalDrop(portal, false);
621 :
622 : /* Restart the iteration in case that led to other drops */
623 16 : hash_seq_term(&status);
624 16 : hash_seq_init(&status, PortalHashTable);
625 : }
626 : }
627 :
628 : /*
629 : * "Hold" a portal. Prepare it for access by later transactions.
630 : */
631 : static void
632 48 : HoldPortal(Portal portal)
633 : {
634 : /*
635 : * Note that PersistHoldablePortal() must release all resources used by
636 : * the portal that are local to the creating transaction.
637 : */
638 48 : PortalCreateHoldStore(portal);
639 48 : PersistHoldablePortal(portal);
640 :
641 : /* drop cached plan reference, if any */
642 44 : PortalReleaseCachedPlan(portal);
643 :
644 : /*
645 : * Any resources belonging to the portal will be released in the upcoming
646 : * transaction-wide cleanup; the portal will no longer have its own
647 : * resources.
648 : */
649 44 : portal->resowner = NULL;
650 :
651 : /*
652 : * Having successfully exported the holdable cursor, mark it as not
653 : * belonging to this transaction.
654 : */
655 44 : portal->createSubid = InvalidSubTransactionId;
656 44 : portal->activeSubid = InvalidSubTransactionId;
657 44 : }
658 :
659 : /*
660 : * Pre-commit processing for portals.
661 : *
662 : * Holdable cursors created in this transaction need to be converted to
663 : * materialized form, since we are going to close down the executor and
664 : * release locks. Non-holdable portals created in this transaction are
665 : * simply removed. Portals remaining from prior transactions should be
666 : * left untouched.
667 : *
668 : * Returns true if any portals changed state (possibly causing user-defined
669 : * code to be run), false if not.
670 : */
671 : bool
672 544794 : PreCommit_Portals(bool isPrepare)
673 : {
674 544794 : bool result = false;
675 : HASH_SEQ_STATUS status;
676 : PortalHashEnt *hentry;
677 :
678 544794 : hash_seq_init(&status, PortalHashTable);
679 :
680 657884 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
681 : {
682 113090 : Portal portal = hentry->portal;
683 :
684 : /*
685 : * There should be no pinned portals anymore. Complain if someone
686 : * leaked one. Auto-held portals are allowed; we assume that whoever
687 : * pinned them is managing them.
688 : */
689 113090 : if (portal->portalPinned && !portal->autoHeld)
690 0 : elog(ERROR, "cannot commit while a portal is pinned");
691 :
692 : /*
693 : * Do not touch active portals --- this can only happen in the case of
694 : * a multi-transaction utility command, such as VACUUM, or a commit in
695 : * a procedure.
696 : *
697 : * Note however that any resource owner attached to such a portal is
698 : * still going to go away, so don't leave a dangling pointer. Also
699 : * unregister any snapshots held by the portal, mainly to avoid
700 : * snapshot leak warnings from ResourceOwnerRelease().
701 : */
702 113090 : if (portal->status == PORTAL_ACTIVE)
703 : {
704 91164 : if (portal->holdSnapshot)
705 : {
706 2 : if (portal->resowner)
707 2 : UnregisterSnapshotFromOwner(portal->holdSnapshot,
708 : portal->resowner);
709 2 : portal->holdSnapshot = NULL;
710 : }
711 91164 : portal->resowner = NULL;
712 91164 : continue;
713 : }
714 :
715 : /* Is it a holdable portal created in the current xact? */
716 21926 : if ((portal->cursorOptions & CURSOR_OPT_HOLD) &&
717 240 : portal->createSubid != InvalidSubTransactionId &&
718 20 : portal->status == PORTAL_READY)
719 : {
720 : /*
721 : * We are exiting the transaction that created a holdable cursor.
722 : * Instead of dropping the portal, prepare it for access by later
723 : * transactions.
724 : *
725 : * However, if this is PREPARE TRANSACTION rather than COMMIT,
726 : * refuse PREPARE, because the semantics seem pretty unclear.
727 : */
728 20 : if (isPrepare)
729 0 : ereport(ERROR,
730 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
731 : errmsg("cannot PREPARE a transaction that has created a cursor WITH HOLD")));
732 :
733 20 : HoldPortal(portal);
734 :
735 : /* Report we changed state */
736 20 : result = true;
737 : }
738 21906 : else if (portal->createSubid == InvalidSubTransactionId)
739 : {
740 : /*
741 : * Do nothing to cursors held over from a previous transaction
742 : * (including ones we just froze in a previous cycle of this loop)
743 : */
744 280 : continue;
745 : }
746 : else
747 : {
748 : /* Zap all non-holdable portals */
749 21626 : PortalDrop(portal, true);
750 :
751 : /* Report we changed state */
752 21626 : result = true;
753 : }
754 :
755 : /*
756 : * After either freezing or dropping a portal, we have to restart the
757 : * iteration, because we could have invoked user-defined code that
758 : * caused a drop of the next portal in the hash chain.
759 : */
760 21646 : hash_seq_term(&status);
761 21646 : hash_seq_init(&status, PortalHashTable);
762 : }
763 :
764 544794 : return result;
765 : }
766 :
767 : /*
768 : * Abort processing for portals.
769 : *
770 : * At this point we run the cleanup hook if present, but we can't release the
771 : * portal's memory until the cleanup call.
772 : */
773 : void
774 22134 : AtAbort_Portals(void)
775 : {
776 : HASH_SEQ_STATUS status;
777 : PortalHashEnt *hentry;
778 :
779 22134 : hash_seq_init(&status, PortalHashTable);
780 :
781 36010 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
782 : {
783 13876 : Portal portal = hentry->portal;
784 :
785 : /*
786 : * When elog(FATAL) is progress, we need to set the active portal to
787 : * failed, so that PortalCleanup() doesn't run the executor shutdown.
788 : */
789 13876 : if (portal->status == PORTAL_ACTIVE && shmem_exit_inprogress)
790 0 : MarkPortalFailed(portal);
791 :
792 : /*
793 : * Do nothing else to cursors held over from a previous transaction.
794 : */
795 13876 : if (portal->createSubid == InvalidSubTransactionId)
796 102 : continue;
797 :
798 : /*
799 : * Do nothing to auto-held cursors. This is similar to the case of a
800 : * cursor from a previous transaction, but it could also be that the
801 : * cursor was auto-held in this transaction, so it wants to live on.
802 : */
803 13774 : if (portal->autoHeld)
804 0 : continue;
805 :
806 : /*
807 : * If it was created in the current transaction, we can't do normal
808 : * shutdown on a READY portal either; it might refer to objects
809 : * created in the failed transaction. See comments in
810 : * AtSubAbort_Portals.
811 : */
812 13774 : if (portal->status == PORTAL_READY)
813 212 : MarkPortalFailed(portal);
814 :
815 : /*
816 : * Allow portalcmds.c to clean up the state it knows about, if we
817 : * haven't already.
818 : */
819 13774 : if (PointerIsValid(portal->cleanup))
820 : {
821 58 : portal->cleanup(portal);
822 58 : portal->cleanup = NULL;
823 : }
824 :
825 : /* drop cached plan reference, if any */
826 13774 : PortalReleaseCachedPlan(portal);
827 :
828 : /*
829 : * Any resources belonging to the portal will be released in the
830 : * upcoming transaction-wide cleanup; they will be gone before we run
831 : * PortalDrop.
832 : */
833 13774 : portal->resowner = NULL;
834 :
835 : /*
836 : * Although we can't delete the portal data structure proper, we can
837 : * release any memory in subsidiary contexts, such as executor state.
838 : * The cleanup hook was the last thing that might have needed data
839 : * there. But leave active portals alone.
840 : */
841 13774 : if (portal->status != PORTAL_ACTIVE)
842 13630 : MemoryContextDeleteChildren(portal->portalContext);
843 : }
844 22134 : }
845 :
846 : /*
847 : * Post-abort cleanup for portals.
848 : *
849 : * Delete all portals not held over from prior transactions. */
850 : void
851 22122 : AtCleanup_Portals(void)
852 : {
853 : HASH_SEQ_STATUS status;
854 : PortalHashEnt *hentry;
855 :
856 22122 : hash_seq_init(&status, PortalHashTable);
857 :
858 35182 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
859 : {
860 13060 : Portal portal = hentry->portal;
861 :
862 : /*
863 : * Do not touch active portals --- this can only happen in the case of
864 : * a multi-transaction command.
865 : */
866 13060 : if (portal->status == PORTAL_ACTIVE)
867 144 : continue;
868 :
869 : /*
870 : * Do nothing to cursors held over from a previous transaction or
871 : * auto-held ones.
872 : */
873 12916 : if (portal->createSubid == InvalidSubTransactionId || portal->autoHeld)
874 : {
875 : Assert(portal->status != PORTAL_ACTIVE);
876 : Assert(portal->resowner == NULL);
877 102 : continue;
878 : }
879 :
880 : /*
881 : * If a portal is still pinned, forcibly unpin it. PortalDrop will not
882 : * let us drop the portal otherwise. Whoever pinned the portal was
883 : * interrupted by the abort too and won't try to use it anymore.
884 : */
885 12814 : if (portal->portalPinned)
886 26 : portal->portalPinned = false;
887 :
888 : /*
889 : * We had better not call any user-defined code during cleanup, so if
890 : * the cleanup hook hasn't been run yet, too bad; we'll just skip it.
891 : */
892 12814 : if (PointerIsValid(portal->cleanup))
893 : {
894 0 : elog(WARNING, "skipping cleanup for portal \"%s\"", portal->name);
895 0 : portal->cleanup = NULL;
896 : }
897 :
898 : /* Zap it. */
899 12814 : PortalDrop(portal, false);
900 : }
901 22122 : }
902 :
903 : /*
904 : * Portal-related cleanup when we return to the main loop on error.
905 : *
906 : * This is different from the cleanup at transaction abort. Auto-held portals
907 : * are cleaned up on error but not on transaction abort.
908 : */
909 : void
910 20000 : PortalErrorCleanup(void)
911 : {
912 : HASH_SEQ_STATUS status;
913 : PortalHashEnt *hentry;
914 :
915 20000 : hash_seq_init(&status, PortalHashTable);
916 :
917 21112 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
918 : {
919 1112 : Portal portal = hentry->portal;
920 :
921 1112 : if (portal->autoHeld)
922 : {
923 4 : portal->portalPinned = false;
924 4 : PortalDrop(portal, false);
925 : }
926 : }
927 20000 : }
928 :
929 : /*
930 : * Pre-subcommit processing for portals.
931 : *
932 : * Reassign portals created or used in the current subtransaction to the
933 : * parent subtransaction.
934 : */
935 : void
936 4752 : AtSubCommit_Portals(SubTransactionId mySubid,
937 : SubTransactionId parentSubid,
938 : ResourceOwner parentXactOwner)
939 : {
940 : HASH_SEQ_STATUS status;
941 : PortalHashEnt *hentry;
942 :
943 4752 : hash_seq_init(&status, PortalHashTable);
944 :
945 9170 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
946 : {
947 4418 : Portal portal = hentry->portal;
948 :
949 4418 : if (portal->createSubid == mySubid)
950 : {
951 58 : portal->createSubid = parentSubid;
952 58 : if (portal->resowner)
953 58 : ResourceOwnerNewParent(portal->resowner, parentXactOwner);
954 : }
955 4418 : if (portal->activeSubid == mySubid)
956 208 : portal->activeSubid = parentSubid;
957 : }
958 4752 : }
959 :
960 : /*
961 : * Subtransaction abort handling for portals.
962 : *
963 : * Deactivate portals created or used during the failed subtransaction.
964 : * Note that per AtSubCommit_Portals, this will catch portals created/used
965 : * in descendants of the subtransaction too.
966 : *
967 : * We don't destroy any portals here; that's done in AtSubCleanup_Portals.
968 : */
969 : void
970 3340 : AtSubAbort_Portals(SubTransactionId mySubid,
971 : SubTransactionId parentSubid,
972 : ResourceOwner myXactOwner,
973 : ResourceOwner parentXactOwner)
974 : {
975 : HASH_SEQ_STATUS status;
976 : PortalHashEnt *hentry;
977 :
978 3340 : hash_seq_init(&status, PortalHashTable);
979 :
980 8578 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
981 : {
982 5238 : Portal portal = hentry->portal;
983 :
984 : /* Was it created in this subtransaction? */
985 5238 : if (portal->createSubid != mySubid)
986 : {
987 : /* No, but maybe it was used in this subtransaction? */
988 5108 : if (portal->activeSubid == mySubid)
989 : {
990 : /* Maintain activeSubid until the portal is removed */
991 28 : portal->activeSubid = parentSubid;
992 :
993 : /*
994 : * A MarkPortalActive() caller ran an upper-level portal in
995 : * this subtransaction and left the portal ACTIVE. This can't
996 : * happen, but force the portal into FAILED state for the same
997 : * reasons discussed below.
998 : *
999 : * We assume we can get away without forcing upper-level READY
1000 : * portals to fail, even if they were run and then suspended.
1001 : * In theory a suspended upper-level portal could have
1002 : * acquired some references to objects that are about to be
1003 : * destroyed, but there should be sufficient defenses against
1004 : * such cases: the portal's original query cannot contain such
1005 : * references, and any references within, say, cached plans of
1006 : * PL/pgSQL functions are not from active queries and should
1007 : * be protected by revalidation logic.
1008 : */
1009 28 : if (portal->status == PORTAL_ACTIVE)
1010 0 : MarkPortalFailed(portal);
1011 :
1012 : /*
1013 : * Also, if we failed it during the current subtransaction
1014 : * (either just above, or earlier), reattach its resource
1015 : * owner to the current subtransaction's resource owner, so
1016 : * that any resources it still holds will be released while
1017 : * cleaning up this subtransaction. This prevents some corner
1018 : * cases wherein we might get Asserts or worse while cleaning
1019 : * up objects created during the current subtransaction
1020 : * (because they're still referenced within this portal).
1021 : */
1022 28 : if (portal->status == PORTAL_FAILED && portal->resowner)
1023 : {
1024 8 : ResourceOwnerNewParent(portal->resowner, myXactOwner);
1025 8 : portal->resowner = NULL;
1026 : }
1027 : }
1028 : /* Done if it wasn't created in this subtransaction */
1029 5108 : continue;
1030 : }
1031 :
1032 : /*
1033 : * Force any live portals of my own subtransaction into FAILED state.
1034 : * We have to do this because they might refer to objects created or
1035 : * changed in the failed subtransaction, leading to crashes within
1036 : * ExecutorEnd when portalcmds.c tries to close down the portal.
1037 : * Currently, every MarkPortalActive() caller ensures it updates the
1038 : * portal status again before relinquishing control, so ACTIVE can't
1039 : * happen here. If it does happen, dispose the portal like existing
1040 : * MarkPortalActive() callers would.
1041 : */
1042 130 : if (portal->status == PORTAL_READY ||
1043 120 : portal->status == PORTAL_ACTIVE)
1044 10 : MarkPortalFailed(portal);
1045 :
1046 : /*
1047 : * Allow portalcmds.c to clean up the state it knows about, if we
1048 : * haven't already.
1049 : */
1050 130 : if (PointerIsValid(portal->cleanup))
1051 : {
1052 0 : portal->cleanup(portal);
1053 0 : portal->cleanup = NULL;
1054 : }
1055 :
1056 : /* drop cached plan reference, if any */
1057 130 : PortalReleaseCachedPlan(portal);
1058 :
1059 : /*
1060 : * Any resources belonging to the portal will be released in the
1061 : * upcoming transaction-wide cleanup; they will be gone before we run
1062 : * PortalDrop.
1063 : */
1064 130 : portal->resowner = NULL;
1065 :
1066 : /*
1067 : * Although we can't delete the portal data structure proper, we can
1068 : * release any memory in subsidiary contexts, such as executor state.
1069 : * The cleanup hook was the last thing that might have needed data
1070 : * there.
1071 : */
1072 130 : MemoryContextDeleteChildren(portal->portalContext);
1073 : }
1074 3340 : }
1075 :
1076 : /*
1077 : * Post-subabort cleanup for portals.
1078 : *
1079 : * Drop all portals created in the failed subtransaction (but note that
1080 : * we will not drop any that were reassigned to the parent above).
1081 : */
1082 : void
1083 3340 : AtSubCleanup_Portals(SubTransactionId mySubid)
1084 : {
1085 : HASH_SEQ_STATUS status;
1086 : PortalHashEnt *hentry;
1087 :
1088 3340 : hash_seq_init(&status, PortalHashTable);
1089 :
1090 8460 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1091 : {
1092 5120 : Portal portal = hentry->portal;
1093 :
1094 5120 : if (portal->createSubid != mySubid)
1095 5108 : continue;
1096 :
1097 : /*
1098 : * If a portal is still pinned, forcibly unpin it. PortalDrop will not
1099 : * let us drop the portal otherwise. Whoever pinned the portal was
1100 : * interrupted by the abort too and won't try to use it anymore.
1101 : */
1102 12 : if (portal->portalPinned)
1103 6 : portal->portalPinned = false;
1104 :
1105 : /*
1106 : * We had better not call any user-defined code during cleanup, so if
1107 : * the cleanup hook hasn't been run yet, too bad; we'll just skip it.
1108 : */
1109 12 : if (PointerIsValid(portal->cleanup))
1110 : {
1111 0 : elog(WARNING, "skipping cleanup for portal \"%s\"", portal->name);
1112 0 : portal->cleanup = NULL;
1113 : }
1114 :
1115 : /* Zap it. */
1116 12 : PortalDrop(portal, false);
1117 : }
1118 3340 : }
1119 :
1120 : /* Find all available cursors */
1121 : Datum
1122 82 : pg_cursor(PG_FUNCTION_ARGS)
1123 : {
1124 82 : ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
1125 : TupleDesc tupdesc;
1126 : Tuplestorestate *tupstore;
1127 : MemoryContext per_query_ctx;
1128 : MemoryContext oldcontext;
1129 : HASH_SEQ_STATUS hash_seq;
1130 : PortalHashEnt *hentry;
1131 :
1132 : /* check to see if caller supports us returning a tuplestore */
1133 82 : if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
1134 0 : ereport(ERROR,
1135 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1136 : errmsg("set-valued function called in context that cannot accept a set")));
1137 82 : if (!(rsinfo->allowedModes & SFRM_Materialize))
1138 0 : ereport(ERROR,
1139 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1140 : errmsg("materialize mode required, but it is not allowed in this context")));
1141 :
1142 : /* need to build tuplestore in query context */
1143 82 : per_query_ctx = rsinfo->econtext->ecxt_per_query_memory;
1144 82 : oldcontext = MemoryContextSwitchTo(per_query_ctx);
1145 :
1146 : /*
1147 : * build tupdesc for result tuples. This must match the definition of the
1148 : * pg_cursors view in system_views.sql
1149 : */
1150 82 : tupdesc = CreateTemplateTupleDesc(6);
1151 82 : TupleDescInitEntry(tupdesc, (AttrNumber) 1, "name",
1152 : TEXTOID, -1, 0);
1153 82 : TupleDescInitEntry(tupdesc, (AttrNumber) 2, "statement",
1154 : TEXTOID, -1, 0);
1155 82 : TupleDescInitEntry(tupdesc, (AttrNumber) 3, "is_holdable",
1156 : BOOLOID, -1, 0);
1157 82 : TupleDescInitEntry(tupdesc, (AttrNumber) 4, "is_binary",
1158 : BOOLOID, -1, 0);
1159 82 : TupleDescInitEntry(tupdesc, (AttrNumber) 5, "is_scrollable",
1160 : BOOLOID, -1, 0);
1161 82 : TupleDescInitEntry(tupdesc, (AttrNumber) 6, "creation_time",
1162 : TIMESTAMPTZOID, -1, 0);
1163 :
1164 : /*
1165 : * We put all the tuples into a tuplestore in one scan of the hashtable.
1166 : * This avoids any issue of the hashtable possibly changing between calls.
1167 : */
1168 : tupstore =
1169 82 : tuplestore_begin_heap(rsinfo->allowedModes & SFRM_Materialize_Random,
1170 : false, work_mem);
1171 :
1172 : /* generate junk in short-term context */
1173 82 : MemoryContextSwitchTo(oldcontext);
1174 :
1175 82 : hash_seq_init(&hash_seq, PortalHashTable);
1176 248 : while ((hentry = hash_seq_search(&hash_seq)) != NULL)
1177 : {
1178 166 : Portal portal = hentry->portal;
1179 : Datum values[6];
1180 : bool nulls[6];
1181 :
1182 : /* report only "visible" entries */
1183 166 : if (!portal->visible)
1184 86 : continue;
1185 :
1186 80 : MemSet(nulls, 0, sizeof(nulls));
1187 :
1188 80 : values[0] = CStringGetTextDatum(portal->name);
1189 80 : values[1] = CStringGetTextDatum(portal->sourceText);
1190 80 : values[2] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_HOLD);
1191 80 : values[3] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_BINARY);
1192 80 : values[4] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_SCROLL);
1193 80 : values[5] = TimestampTzGetDatum(portal->creation_time);
1194 :
1195 80 : tuplestore_putvalues(tupstore, tupdesc, values, nulls);
1196 : }
1197 :
1198 : /* clean up and return the tuplestore */
1199 : tuplestore_donestoring(tupstore);
1200 :
1201 82 : rsinfo->returnMode = SFRM_Materialize;
1202 82 : rsinfo->setResult = tupstore;
1203 82 : rsinfo->setDesc = tupdesc;
1204 :
1205 82 : return (Datum) 0;
1206 : }
1207 :
1208 : bool
1209 36 : ThereAreNoReadyPortals(void)
1210 : {
1211 : HASH_SEQ_STATUS status;
1212 : PortalHashEnt *hentry;
1213 :
1214 36 : hash_seq_init(&status, PortalHashTable);
1215 :
1216 72 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1217 : {
1218 36 : Portal portal = hentry->portal;
1219 :
1220 36 : if (portal->status == PORTAL_READY)
1221 0 : return false;
1222 : }
1223 :
1224 36 : return true;
1225 : }
1226 :
1227 : /*
1228 : * Hold all pinned portals.
1229 : *
1230 : * When initiating a COMMIT or ROLLBACK inside a procedure, this must be
1231 : * called to protect internally-generated cursors from being dropped during
1232 : * the transaction shutdown. Currently, SPI calls this automatically; PLs
1233 : * that initiate COMMIT or ROLLBACK some other way are on the hook to do it
1234 : * themselves. (Note that we couldn't do this in, say, AtAbort_Portals
1235 : * because we need to run user-defined code while persisting a portal.
1236 : * It's too late to do that once transaction abort has started.)
1237 : *
1238 : * We protect such portals by converting them to held cursors. We mark them
1239 : * as "auto-held" so that exception exit knows to clean them up. (In normal,
1240 : * non-exception code paths, the PL needs to clean such portals itself, since
1241 : * transaction end won't do it anymore; but that should be normal practice
1242 : * anyway.)
1243 : */
1244 : void
1245 4332 : HoldPinnedPortals(void)
1246 : {
1247 : HASH_SEQ_STATUS status;
1248 : PortalHashEnt *hentry;
1249 :
1250 4332 : hash_seq_init(&status, PortalHashTable);
1251 :
1252 8760 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1253 : {
1254 4434 : Portal portal = hentry->portal;
1255 :
1256 4434 : if (portal->portalPinned && !portal->autoHeld)
1257 : {
1258 : /*
1259 : * Doing transaction control, especially abort, inside a cursor
1260 : * loop that is not read-only, for example using UPDATE ...
1261 : * RETURNING, has weird semantics issues. Also, this
1262 : * implementation wouldn't work, because such portals cannot be
1263 : * held. (The core grammar enforces that only SELECT statements
1264 : * can drive a cursor, but for example PL/pgSQL does not restrict
1265 : * it.)
1266 : */
1267 30 : if (portal->strategy != PORTAL_ONE_SELECT)
1268 2 : ereport(ERROR,
1269 : (errcode(ERRCODE_INVALID_TRANSACTION_TERMINATION),
1270 : errmsg("cannot perform transaction commands inside a cursor loop that is not read-only")));
1271 :
1272 : /* Verify it's in a suitable state to be held */
1273 28 : if (portal->status != PORTAL_READY)
1274 0 : elog(ERROR, "pinned portal is not ready to be auto-held");
1275 :
1276 28 : HoldPortal(portal);
1277 24 : portal->autoHeld = true;
1278 : }
1279 : }
1280 4326 : }
|