LCOV - code coverage report
Current view: top level - src/backend/storage/lmgr - deadlock.c (source / functions) Coverage Total Hit
Test: PostgreSQL 19devel Lines: 86.2 % 298 257
Test Date: 2026-02-17 17:20:33 Functions: 91.7 % 12 11
Legend: Lines:     hit not hit

            Line data    Source code
       1              : /*-------------------------------------------------------------------------
       2              :  *
       3              :  * deadlock.c
       4              :  *    POSTGRES deadlock detection code
       5              :  *
       6              :  * See src/backend/storage/lmgr/README for a description of the deadlock
       7              :  * detection and resolution algorithms.
       8              :  *
       9              :  *
      10              :  * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
      11              :  * Portions Copyright (c) 1994, Regents of the University of California
      12              :  *
      13              :  *
      14              :  * IDENTIFICATION
      15              :  *    src/backend/storage/lmgr/deadlock.c
      16              :  *
      17              :  *  Interface:
      18              :  *
      19              :  *  DeadLockCheck()
      20              :  *  DeadLockReport()
      21              :  *  RememberSimpleDeadLock()
      22              :  *  InitDeadLockChecking()
      23              :  *
      24              :  *-------------------------------------------------------------------------
      25              :  */
      26              : #include "postgres.h"
      27              : 
      28              : #include "miscadmin.h"
      29              : #include "pg_trace.h"
      30              : #include "pgstat.h"
      31              : #include "storage/lmgr.h"
      32              : #include "storage/proc.h"
      33              : #include "storage/procnumber.h"
      34              : #include "utils/memutils.h"
      35              : 
      36              : 
      37              : /*
      38              :  * One edge in the waits-for graph.
      39              :  *
      40              :  * waiter and blocker may or may not be members of a lock group, but if either
      41              :  * is, it will be the leader rather than any other member of the lock group.
      42              :  * The group leaders act as representatives of the whole group even though
      43              :  * those particular processes need not be waiting at all.  There will be at
      44              :  * least one member of the waiter's lock group on the wait queue for the given
      45              :  * lock, maybe more.
      46              :  */
      47              : typedef struct
      48              : {
      49              :     PGPROC     *waiter;         /* the leader of the waiting lock group */
      50              :     PGPROC     *blocker;        /* the leader of the group it is waiting for */
      51              :     LOCK       *lock;           /* the lock being waited for */
      52              :     int         pred;           /* workspace for TopoSort */
      53              :     int         link;           /* workspace for TopoSort */
      54              : } EDGE;
      55              : 
      56              : /* One potential reordering of a lock's wait queue */
      57              : typedef struct
      58              : {
      59              :     LOCK       *lock;           /* the lock whose wait queue is described */
      60              :     PGPROC    **procs;          /* array of PGPROC *'s in new wait order */
      61              :     int         nProcs;
      62              : } WAIT_ORDER;
      63              : 
      64              : /*
      65              :  * Information saved about each edge in a detected deadlock cycle.  This
      66              :  * is used to print a diagnostic message upon failure.
      67              :  *
      68              :  * Note: because we want to examine this info after releasing the lock
      69              :  * manager's partition locks, we can't just store LOCK and PGPROC pointers;
      70              :  * we must extract out all the info we want to be able to print.
      71              :  */
      72              : typedef struct
      73              : {
      74              :     LOCKTAG     locktag;        /* ID of awaited lock object */
      75              :     LOCKMODE    lockmode;       /* type of lock we're waiting for */
      76              :     int         pid;            /* PID of blocked backend */
      77              : } DEADLOCK_INFO;
      78              : 
      79              : 
      80              : static bool DeadLockCheckRecurse(PGPROC *proc);
      81              : static int  TestConfiguration(PGPROC *startProc);
      82              : static bool FindLockCycle(PGPROC *checkProc,
      83              :                           EDGE *softEdges, int *nSoftEdges);
      84              : static bool FindLockCycleRecurse(PGPROC *checkProc, int depth,
      85              :                                  EDGE *softEdges, int *nSoftEdges);
      86              : static bool FindLockCycleRecurseMember(PGPROC *checkProc,
      87              :                                        PGPROC *checkProcLeader,
      88              :                                        int depth, EDGE *softEdges, int *nSoftEdges);
      89              : static bool ExpandConstraints(EDGE *constraints, int nConstraints);
      90              : static bool TopoSort(LOCK *lock, EDGE *constraints, int nConstraints,
      91              :                      PGPROC **ordering);
      92              : 
      93              : #ifdef DEBUG_DEADLOCK
      94              : static void PrintLockQueue(LOCK *lock, const char *info);
      95              : #endif
      96              : 
      97              : 
      98              : /*
      99              :  * Working space for the deadlock detector
     100              :  */
     101              : 
     102              : /* Workspace for FindLockCycle */
     103              : static PGPROC **visitedProcs;   /* Array of visited procs */
     104              : static int  nVisitedProcs;
     105              : 
     106              : /* Workspace for TopoSort */
     107              : static PGPROC **topoProcs;      /* Array of not-yet-output procs */
     108              : static int *beforeConstraints;  /* Counts of remaining before-constraints */
     109              : static int *afterConstraints;   /* List head for after-constraints */
     110              : 
     111              : /* Output area for ExpandConstraints */
     112              : static WAIT_ORDER *waitOrders;  /* Array of proposed queue rearrangements */
     113              : static int  nWaitOrders;
     114              : static PGPROC **waitOrderProcs; /* Space for waitOrders queue contents */
     115              : 
     116              : /* Current list of constraints being considered */
     117              : static EDGE *curConstraints;
     118              : static int  nCurConstraints;
     119              : static int  maxCurConstraints;
     120              : 
     121              : /* Storage space for results from FindLockCycle */
     122              : static EDGE *possibleConstraints;
     123              : static int  nPossibleConstraints;
     124              : static int  maxPossibleConstraints;
     125              : static DEADLOCK_INFO *deadlockDetails;
     126              : static int  nDeadlockDetails;
     127              : 
     128              : /* PGPROC pointer of any blocking autovacuum worker found */
     129              : static PGPROC *blocking_autovacuum_proc = NULL;
     130              : 
     131              : 
     132              : /*
     133              :  * InitDeadLockChecking -- initialize deadlock checker during backend startup
     134              :  *
     135              :  * This does per-backend initialization of the deadlock checker; primarily,
     136              :  * allocation of working memory for DeadLockCheck.  We do this per-backend
     137              :  * since there's no percentage in making the kernel do copy-on-write
     138              :  * inheritance of workspace from the postmaster.  We allocate the space at
     139              :  * startup because the deadlock checker is run with all the partitions of the
     140              :  * lock table locked, and we want to keep that section as short as possible.
     141              :  */
     142              : void
     143        18456 : InitDeadLockChecking(void)
     144              : {
     145              :     MemoryContext oldcxt;
     146              : 
     147              :     /* Make sure allocations are permanent */
     148        18456 :     oldcxt = MemoryContextSwitchTo(TopMemoryContext);
     149              : 
     150              :     /*
     151              :      * FindLockCycle needs at most MaxBackends entries in visitedProcs[] and
     152              :      * deadlockDetails[].
     153              :      */
     154        18456 :     visitedProcs = (PGPROC **) palloc(MaxBackends * sizeof(PGPROC *));
     155        18456 :     deadlockDetails = (DEADLOCK_INFO *) palloc(MaxBackends * sizeof(DEADLOCK_INFO));
     156              : 
     157              :     /*
     158              :      * TopoSort needs to consider at most MaxBackends wait-queue entries, and
     159              :      * it needn't run concurrently with FindLockCycle.
     160              :      */
     161        18456 :     topoProcs = visitedProcs;   /* re-use this space */
     162        18456 :     beforeConstraints = (int *) palloc(MaxBackends * sizeof(int));
     163        18456 :     afterConstraints = (int *) palloc(MaxBackends * sizeof(int));
     164              : 
     165              :     /*
     166              :      * We need to consider rearranging at most MaxBackends/2 wait queues
     167              :      * (since it takes at least two waiters in a queue to create a soft edge),
     168              :      * and the expanded form of the wait queues can't involve more than
     169              :      * MaxBackends total waiters.
     170              :      */
     171        18456 :     waitOrders = (WAIT_ORDER *)
     172        18456 :         palloc((MaxBackends / 2) * sizeof(WAIT_ORDER));
     173        18456 :     waitOrderProcs = (PGPROC **) palloc(MaxBackends * sizeof(PGPROC *));
     174              : 
     175              :     /*
     176              :      * Allow at most MaxBackends distinct constraints in a configuration. (Is
     177              :      * this enough?  In practice it seems it should be, but I don't quite see
     178              :      * how to prove it.  If we run out, we might fail to find a workable wait
     179              :      * queue rearrangement even though one exists.)  NOTE that this number
     180              :      * limits the maximum recursion depth of DeadLockCheckRecurse. Making it
     181              :      * really big might potentially allow a stack-overflow problem.
     182              :      */
     183        18456 :     maxCurConstraints = MaxBackends;
     184        18456 :     curConstraints = (EDGE *) palloc(maxCurConstraints * sizeof(EDGE));
     185              : 
     186              :     /*
     187              :      * Allow up to 3*MaxBackends constraints to be saved without having to
     188              :      * re-run TestConfiguration.  (This is probably more than enough, but we
     189              :      * can survive if we run low on space by doing excess runs of
     190              :      * TestConfiguration to re-compute constraint lists each time needed.) The
     191              :      * last MaxBackends entries in possibleConstraints[] are reserved as
     192              :      * output workspace for FindLockCycle.
     193              :      */
     194              :     {
     195              :         StaticAssertDecl(MAX_BACKENDS_BITS <= (32 - 3),
     196              :                          "MAX_BACKENDS_BITS too big for * 4");
     197        18456 :         maxPossibleConstraints = MaxBackends * 4;
     198        18456 :         possibleConstraints =
     199        18456 :             (EDGE *) palloc(maxPossibleConstraints * sizeof(EDGE));
     200              :     }
     201              : 
     202        18456 :     MemoryContextSwitchTo(oldcxt);
     203        18456 : }
     204              : 
     205              : /*
     206              :  * DeadLockCheck -- Checks for deadlocks for a given process
     207              :  *
     208              :  * This code looks for deadlocks involving the given process.  If any
     209              :  * are found, it tries to rearrange lock wait queues to resolve the
     210              :  * deadlock.  If resolution is impossible, return DS_HARD_DEADLOCK ---
     211              :  * the caller is then expected to abort the given proc's transaction.
     212              :  *
     213              :  * Caller must already have locked all partitions of the lock tables.
     214              :  *
     215              :  * On failure, deadlock details are recorded in deadlockDetails[] for
     216              :  * subsequent printing by DeadLockReport().  That activity is separate
     217              :  * because we don't want to do it while holding all those LWLocks.
     218              :  */
     219              : DeadLockState
     220           23 : DeadLockCheck(PGPROC *proc)
     221              : {
     222              :     /* Initialize to "no constraints" */
     223           23 :     nCurConstraints = 0;
     224           23 :     nPossibleConstraints = 0;
     225           23 :     nWaitOrders = 0;
     226              : 
     227              :     /* Initialize to not blocked by an autovacuum worker */
     228           23 :     blocking_autovacuum_proc = NULL;
     229              : 
     230              :     /* Search for deadlocks and possible fixes */
     231           23 :     if (DeadLockCheckRecurse(proc))
     232              :     {
     233              :         /*
     234              :          * Call FindLockCycle one more time, to record the correct
     235              :          * deadlockDetails[] for the basic state with no rearrangements.
     236              :          */
     237              :         int         nSoftEdges;
     238              : 
     239              :         TRACE_POSTGRESQL_DEADLOCK_FOUND();
     240              : 
     241            5 :         nWaitOrders = 0;
     242            5 :         if (!FindLockCycle(proc, possibleConstraints, &nSoftEdges))
     243            0 :             elog(FATAL, "deadlock seems to have disappeared");
     244              : 
     245            5 :         return DS_HARD_DEADLOCK;    /* cannot find a non-deadlocked state */
     246              :     }
     247              : 
     248              :     /* Apply any needed rearrangements of wait queues */
     249           21 :     for (int i = 0; i < nWaitOrders; i++)
     250              :     {
     251            3 :         LOCK       *lock = waitOrders[i].lock;
     252            3 :         PGPROC    **procs = waitOrders[i].procs;
     253            3 :         int         nProcs = waitOrders[i].nProcs;
     254            3 :         dclist_head *waitQueue = &lock->waitProcs;
     255              : 
     256              :         Assert(nProcs == dclist_count(waitQueue));
     257              : 
     258              : #ifdef DEBUG_DEADLOCK
     259              :         PrintLockQueue(lock, "DeadLockCheck:");
     260              : #endif
     261              : 
     262              :         /* Reset the queue and re-add procs in the desired order */
     263            3 :         dclist_init(waitQueue);
     264           12 :         for (int j = 0; j < nProcs; j++)
     265            9 :             dclist_push_tail(waitQueue, &procs[j]->links);
     266              : 
     267              : #ifdef DEBUG_DEADLOCK
     268              :         PrintLockQueue(lock, "rearranged to:");
     269              : #endif
     270              : 
     271              :         /* See if any waiters for the lock can be woken up now */
     272            3 :         ProcLockWakeup(GetLocksMethodTable(lock), lock);
     273              :     }
     274              : 
     275              :     /* Return code tells caller if we had to escape a deadlock or not */
     276           18 :     if (nWaitOrders > 0)
     277            3 :         return DS_SOFT_DEADLOCK;
     278           15 :     else if (blocking_autovacuum_proc != NULL)
     279            0 :         return DS_BLOCKED_BY_AUTOVACUUM;
     280              :     else
     281           15 :         return DS_NO_DEADLOCK;
     282              : }
     283              : 
     284              : /*
     285              :  * Return the PGPROC of the autovacuum that's blocking a process.
     286              :  *
     287              :  * We reset the saved pointer as soon as we pass it back.
     288              :  */
     289              : PGPROC *
     290            0 : GetBlockingAutoVacuumPgproc(void)
     291              : {
     292              :     PGPROC     *ptr;
     293              : 
     294            0 :     ptr = blocking_autovacuum_proc;
     295            0 :     blocking_autovacuum_proc = NULL;
     296              : 
     297            0 :     return ptr;
     298              : }
     299              : 
     300              : /*
     301              :  * DeadLockCheckRecurse -- recursively search for valid orderings
     302              :  *
     303              :  * curConstraints[] holds the current set of constraints being considered
     304              :  * by an outer level of recursion.  Add to this each possible solution
     305              :  * constraint for any cycle detected at this level.
     306              :  *
     307              :  * Returns true if no solution exists.  Returns false if a deadlock-free
     308              :  * state is attainable, in which case waitOrders[] shows the required
     309              :  * rearrangements of lock wait queues (if any).
     310              :  */
     311              : static bool
     312           26 : DeadLockCheckRecurse(PGPROC *proc)
     313              : {
     314              :     int         nEdges;
     315              :     int         oldPossibleConstraints;
     316              :     bool        savedList;
     317              :     int         i;
     318              : 
     319           26 :     nEdges = TestConfiguration(proc);
     320           26 :     if (nEdges < 0)
     321            5 :         return true;            /* hard deadlock --- no solution */
     322           21 :     if (nEdges == 0)
     323           18 :         return false;           /* good configuration found */
     324            3 :     if (nCurConstraints >= maxCurConstraints)
     325            0 :         return true;            /* out of room for active constraints? */
     326            3 :     oldPossibleConstraints = nPossibleConstraints;
     327            3 :     if (nPossibleConstraints + nEdges + MaxBackends <= maxPossibleConstraints)
     328              :     {
     329              :         /* We can save the edge list in possibleConstraints[] */
     330            3 :         nPossibleConstraints += nEdges;
     331            3 :         savedList = true;
     332              :     }
     333              :     else
     334              :     {
     335              :         /* Not room; will need to regenerate the edges on-the-fly */
     336            0 :         savedList = false;
     337              :     }
     338              : 
     339              :     /*
     340              :      * Try each available soft edge as an addition to the configuration.
     341              :      */
     342            3 :     for (i = 0; i < nEdges; i++)
     343              :     {
     344            3 :         if (!savedList && i > 0)
     345              :         {
     346              :             /* Regenerate the list of possible added constraints */
     347            0 :             if (nEdges != TestConfiguration(proc))
     348            0 :                 elog(FATAL, "inconsistent results during deadlock check");
     349              :         }
     350            3 :         curConstraints[nCurConstraints] =
     351            3 :             possibleConstraints[oldPossibleConstraints + i];
     352            3 :         nCurConstraints++;
     353            3 :         if (!DeadLockCheckRecurse(proc))
     354            3 :             return false;       /* found a valid solution! */
     355              :         /* give up on that added constraint, try again */
     356            0 :         nCurConstraints--;
     357              :     }
     358            0 :     nPossibleConstraints = oldPossibleConstraints;
     359            0 :     return true;                /* no solution found */
     360              : }
     361              : 
     362              : 
     363              : /*--------------------
     364              :  * Test a configuration (current set of constraints) for validity.
     365              :  *
     366              :  * Returns:
     367              :  *      0: the configuration is good (no deadlocks)
     368              :  *     -1: the configuration has a hard deadlock or is not self-consistent
     369              :  *      >0: the configuration has one or more soft deadlocks
     370              :  *
     371              :  * In the soft-deadlock case, one of the soft cycles is chosen arbitrarily
     372              :  * and a list of its soft edges is returned beginning at
     373              :  * possibleConstraints+nPossibleConstraints.  The return value is the
     374              :  * number of soft edges.
     375              :  *--------------------
     376              :  */
     377              : static int
     378           26 : TestConfiguration(PGPROC *startProc)
     379              : {
     380           26 :     int         softFound = 0;
     381           26 :     EDGE       *softEdges = possibleConstraints + nPossibleConstraints;
     382              :     int         nSoftEdges;
     383              :     int         i;
     384              : 
     385              :     /*
     386              :      * Make sure we have room for FindLockCycle's output.
     387              :      */
     388           26 :     if (nPossibleConstraints + MaxBackends > maxPossibleConstraints)
     389            0 :         return -1;
     390              : 
     391              :     /*
     392              :      * Expand current constraint set into wait orderings.  Fail if the
     393              :      * constraint set is not self-consistent.
     394              :      */
     395           26 :     if (!ExpandConstraints(curConstraints, nCurConstraints))
     396            0 :         return -1;
     397              : 
     398              :     /*
     399              :      * Check for cycles involving startProc or any of the procs mentioned in
     400              :      * constraints.  We check startProc last because if it has a soft cycle
     401              :      * still to be dealt with, we want to deal with that first.
     402              :      */
     403           29 :     for (i = 0; i < nCurConstraints; i++)
     404              :     {
     405            3 :         if (FindLockCycle(curConstraints[i].waiter, softEdges, &nSoftEdges))
     406              :         {
     407            0 :             if (nSoftEdges == 0)
     408            0 :                 return -1;      /* hard deadlock detected */
     409            0 :             softFound = nSoftEdges;
     410              :         }
     411            3 :         if (FindLockCycle(curConstraints[i].blocker, softEdges, &nSoftEdges))
     412              :         {
     413            0 :             if (nSoftEdges == 0)
     414            0 :                 return -1;      /* hard deadlock detected */
     415            0 :             softFound = nSoftEdges;
     416              :         }
     417              :     }
     418           26 :     if (FindLockCycle(startProc, softEdges, &nSoftEdges))
     419              :     {
     420            8 :         if (nSoftEdges == 0)
     421            5 :             return -1;          /* hard deadlock detected */
     422            3 :         softFound = nSoftEdges;
     423              :     }
     424           21 :     return softFound;
     425              : }
     426              : 
     427              : 
     428              : /*
     429              :  * FindLockCycle -- basic check for deadlock cycles
     430              :  *
     431              :  * Scan outward from the given proc to see if there is a cycle in the
     432              :  * waits-for graph that includes this proc.  Return true if a cycle
     433              :  * is found, else false.  If a cycle is found, we return a list of
     434              :  * the "soft edges", if any, included in the cycle.  These edges could
     435              :  * potentially be eliminated by rearranging wait queues.  We also fill
     436              :  * deadlockDetails[] with information about the detected cycle; this info
     437              :  * is not used by the deadlock algorithm itself, only to print a useful
     438              :  * message after failing.
     439              :  *
     440              :  * Since we need to be able to check hypothetical configurations that would
     441              :  * exist after wait queue rearrangement, the routine pays attention to the
     442              :  * table of hypothetical queue orders in waitOrders[].  These orders will
     443              :  * be believed in preference to the actual ordering seen in the locktable.
     444              :  */
     445              : static bool
     446           37 : FindLockCycle(PGPROC *checkProc,
     447              :               EDGE *softEdges,  /* output argument */
     448              :               int *nSoftEdges)  /* output argument */
     449              : {
     450           37 :     nVisitedProcs = 0;
     451           37 :     nDeadlockDetails = 0;
     452           37 :     *nSoftEdges = 0;
     453           37 :     return FindLockCycleRecurse(checkProc, 0, softEdges, nSoftEdges);
     454              : }
     455              : 
     456              : static bool
     457          119 : FindLockCycleRecurse(PGPROC *checkProc,
     458              :                      int depth,
     459              :                      EDGE *softEdges,   /* output argument */
     460              :                      int *nSoftEdges)   /* output argument */
     461              : {
     462              :     int         i;
     463              :     dlist_iter  iter;
     464              : 
     465              :     /*
     466              :      * If this process is a lock group member, check the leader instead. (Note
     467              :      * that we might be the leader, in which case this is a no-op.)
     468              :      */
     469          119 :     if (checkProc->lockGroupLeader != NULL)
     470           28 :         checkProc = checkProc->lockGroupLeader;
     471              : 
     472              :     /*
     473              :      * Have we already seen this proc?
     474              :      */
     475          260 :     for (i = 0; i < nVisitedProcs; i++)
     476              :     {
     477          162 :         if (visitedProcs[i] == checkProc)
     478              :         {
     479              :             /* If we return to starting point, we have a deadlock cycle */
     480           21 :             if (i == 0)
     481              :             {
     482              :                 /*
     483              :                  * record total length of cycle --- outer levels will now fill
     484              :                  * deadlockDetails[]
     485              :                  */
     486              :                 Assert(depth <= MaxBackends);
     487           13 :                 nDeadlockDetails = depth;
     488              : 
     489           13 :                 return true;
     490              :             }
     491              : 
     492              :             /*
     493              :              * Otherwise, we have a cycle but it does not include the start
     494              :              * point, so say "no deadlock".
     495              :              */
     496            8 :             return false;
     497              :         }
     498              :     }
     499              :     /* Mark proc as seen */
     500              :     Assert(nVisitedProcs < MaxBackends);
     501           98 :     visitedProcs[nVisitedProcs++] = checkProc;
     502              : 
     503              :     /*
     504              :      * If the process is waiting, there is an outgoing waits-for edge to each
     505              :      * process that blocks it.
     506              :      */
     507          167 :     if (checkProc->links.next != NULL && checkProc->waitLock != NULL &&
     508           69 :         FindLockCycleRecurseMember(checkProc, checkProc, depth, softEdges,
     509              :                                    nSoftEdges))
     510           41 :         return true;
     511              : 
     512              :     /*
     513              :      * If the process is not waiting, there could still be outgoing waits-for
     514              :      * edges if it is part of a lock group, because other members of the lock
     515              :      * group might be waiting even though this process is not.  (Given lock
     516              :      * groups {A1, A2} and {B1, B2}, if A1 waits for B1 and B2 waits for A2,
     517              :      * that is a deadlock even neither of B1 and A2 are waiting for anything.)
     518              :      */
     519          102 :     dlist_foreach(iter, &checkProc->lockGroupMembers)
     520              :     {
     521              :         PGPROC     *memberProc;
     522              : 
     523           49 :         memberProc = dlist_container(PGPROC, lockGroupLink, iter.cur);
     524              : 
     525           49 :         if (memberProc->links.next != NULL && memberProc->waitLock != NULL &&
     526           23 :             memberProc != checkProc &&
     527           23 :             FindLockCycleRecurseMember(memberProc, checkProc, depth, softEdges,
     528              :                                        nSoftEdges))
     529            4 :             return true;
     530              :     }
     531              : 
     532           53 :     return false;
     533              : }
     534              : 
     535              : static bool
     536           92 : FindLockCycleRecurseMember(PGPROC *checkProc,
     537              :                            PGPROC *checkProcLeader,
     538              :                            int depth,
     539              :                            EDGE *softEdges, /* output argument */
     540              :                            int *nSoftEdges) /* output argument */
     541              : {
     542              :     PGPROC     *proc;
     543           92 :     LOCK       *lock = checkProc->waitLock;
     544              :     dlist_iter  proclock_iter;
     545              :     LockMethod  lockMethodTable;
     546              :     int         conflictMask;
     547              :     int         i;
     548              :     int         numLockModes,
     549              :                 lm;
     550              : 
     551              :     /*
     552              :      * The relation extension lock can never participate in actual deadlock
     553              :      * cycle.  See Assert in LockAcquireExtended.  So, there is no advantage
     554              :      * in checking wait edges from it.
     555              :      */
     556           92 :     if (LOCK_LOCKTAG(*lock) == LOCKTAG_RELATION_EXTEND)
     557            0 :         return false;
     558              : 
     559           92 :     lockMethodTable = GetLocksMethodTable(lock);
     560           92 :     numLockModes = lockMethodTable->numLockModes;
     561           92 :     conflictMask = lockMethodTable->conflictTab[checkProc->waitLockMode];
     562              : 
     563              :     /*
     564              :      * Scan for procs that already hold conflicting locks.  These are "hard"
     565              :      * edges in the waits-for graph.
     566              :      */
     567          273 :     dlist_foreach(proclock_iter, &lock->procLocks)
     568              :     {
     569          221 :         PROCLOCK   *proclock = dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
     570              :         PGPROC     *leader;
     571              : 
     572          221 :         proc = proclock->tag.myProc;
     573          221 :         leader = proc->lockGroupLeader == NULL ? proc : proc->lockGroupLeader;
     574              : 
     575              :         /* A proc never blocks itself or any other lock group member */
     576          221 :         if (leader != checkProcLeader)
     577              :         {
     578         1099 :             for (lm = 1; lm <= numLockModes; lm++)
     579              :             {
     580         1021 :                 if ((proclock->holdMask & LOCKBIT_ON(lm)) &&
     581              :                     (conflictMask & LOCKBIT_ON(lm)))
     582              :                 {
     583              :                     /* This proc hard-blocks checkProc */
     584           64 :                     if (FindLockCycleRecurse(proc, depth + 1,
     585              :                                              softEdges, nSoftEdges))
     586              :                     {
     587              :                         /* fill deadlockDetails[] */
     588           40 :                         DEADLOCK_INFO *info = &deadlockDetails[depth];
     589              : 
     590           40 :                         info->locktag = lock->tag;
     591           40 :                         info->lockmode = checkProc->waitLockMode;
     592           40 :                         info->pid = checkProc->pid;
     593              : 
     594           40 :                         return true;
     595              :                     }
     596              : 
     597              :                     /*
     598              :                      * No deadlock here, but see if this proc is an autovacuum
     599              :                      * that is directly hard-blocking our own proc.  If so,
     600              :                      * report it so that the caller can send a cancel signal
     601              :                      * to it, if appropriate.  If there's more than one such
     602              :                      * proc, it's indeterminate which one will be reported.
     603              :                      *
     604              :                      * We don't touch autovacuums that are indirectly blocking
     605              :                      * us; it's up to the direct blockee to take action.  This
     606              :                      * rule simplifies understanding the behavior and ensures
     607              :                      * that an autovacuum won't be canceled with less than
     608              :                      * deadlock_timeout grace period.
     609              :                      *
     610              :                      * Note we read statusFlags without any locking.  This is
     611              :                      * OK only for checking the PROC_IS_AUTOVACUUM flag,
     612              :                      * because that flag is set at process start and never
     613              :                      * reset.  There is logic elsewhere to avoid canceling an
     614              :                      * autovacuum that is working to prevent XID wraparound
     615              :                      * problems (which needs to read a different statusFlags
     616              :                      * bit), but we don't do that here to avoid grabbing
     617              :                      * ProcArrayLock.
     618              :                      */
     619           24 :                     if (checkProc == MyProc &&
     620           14 :                         proc->statusFlags & PROC_IS_AUTOVACUUM)
     621            0 :                         blocking_autovacuum_proc = proc;
     622              : 
     623              :                     /* We're done looking at this proclock */
     624           24 :                     break;
     625              :                 }
     626              :             }
     627              :         }
     628              :     }
     629              : 
     630              :     /*
     631              :      * Scan for procs that are ahead of this one in the lock's wait queue.
     632              :      * Those that have conflicting requests soft-block this one.  This must be
     633              :      * done after the hard-block search, since if another proc both hard- and
     634              :      * soft-blocks this one, we want to call it a hard edge.
     635              :      *
     636              :      * If there is a proposed re-ordering of the lock's wait order, use that
     637              :      * rather than the current wait order.
     638              :      */
     639           63 :     for (i = 0; i < nWaitOrders; i++)
     640              :     {
     641           29 :         if (waitOrders[i].lock == lock)
     642           18 :             break;
     643              :     }
     644              : 
     645           52 :     if (i < nWaitOrders)
     646              :     {
     647              :         /* Use the given hypothetical wait queue order */
     648           18 :         PGPROC    **procs = waitOrders[i].procs;
     649           18 :         int         queue_size = waitOrders[i].nProcs;
     650              : 
     651           23 :         for (i = 0; i < queue_size; i++)
     652              :         {
     653              :             PGPROC     *leader;
     654              : 
     655           23 :             proc = procs[i];
     656           23 :             leader = proc->lockGroupLeader == NULL ? proc :
     657              :                 proc->lockGroupLeader;
     658              : 
     659              :             /*
     660              :              * TopoSort will always return an ordering with group members
     661              :              * adjacent to each other in the wait queue (see comments
     662              :              * therein). So, as soon as we reach a process in the same lock
     663              :              * group as checkProc, we know we've found all the conflicts that
     664              :              * precede any member of the lock group lead by checkProcLeader.
     665              :              */
     666           23 :             if (leader == checkProcLeader)
     667           18 :                 break;
     668              : 
     669              :             /* Is there a conflict with this guy's request? */
     670            5 :             if ((LOCKBIT_ON(proc->waitLockMode) & conflictMask) != 0)
     671              :             {
     672              :                 /* This proc soft-blocks checkProc */
     673            5 :                 if (FindLockCycleRecurse(proc, depth + 1,
     674              :                                          softEdges, nSoftEdges))
     675              :                 {
     676              :                     /* fill deadlockDetails[] */
     677            0 :                     DEADLOCK_INFO *info = &deadlockDetails[depth];
     678              : 
     679            0 :                     info->locktag = lock->tag;
     680            0 :                     info->lockmode = checkProc->waitLockMode;
     681            0 :                     info->pid = checkProc->pid;
     682              : 
     683              :                     /*
     684              :                      * Add this edge to the list of soft edges in the cycle
     685              :                      */
     686              :                     Assert(*nSoftEdges < MaxBackends);
     687            0 :                     softEdges[*nSoftEdges].waiter = checkProcLeader;
     688            0 :                     softEdges[*nSoftEdges].blocker = leader;
     689            0 :                     softEdges[*nSoftEdges].lock = lock;
     690            0 :                     (*nSoftEdges)++;
     691            0 :                     return true;
     692              :                 }
     693              :             }
     694              :         }
     695              :     }
     696              :     else
     697              :     {
     698           34 :         PGPROC     *lastGroupMember = NULL;
     699              :         dlist_iter  proc_iter;
     700              :         dclist_head *waitQueue;
     701              : 
     702              :         /* Use the true lock wait queue order */
     703           34 :         waitQueue = &lock->waitProcs;
     704              : 
     705              :         /*
     706              :          * Find the last member of the lock group that is present in the wait
     707              :          * queue.  Anything after this is not a soft lock conflict. If group
     708              :          * locking is not in use, then we know immediately which process we're
     709              :          * looking for, but otherwise we've got to search the wait queue to
     710              :          * find the last process actually present.
     711              :          */
     712           34 :         if (checkProc->lockGroupLeader == NULL)
     713           23 :             lastGroupMember = checkProc;
     714              :         else
     715              :         {
     716           45 :             dclist_foreach(proc_iter, waitQueue)
     717              :             {
     718           34 :                 proc = dlist_container(PGPROC, links, proc_iter.cur);
     719              : 
     720           34 :                 if (proc->lockGroupLeader == checkProcLeader)
     721           20 :                     lastGroupMember = proc;
     722              :             }
     723              :             Assert(lastGroupMember != NULL);
     724              :         }
     725              : 
     726              :         /*
     727              :          * OK, now rescan (or scan) the queue to identify the soft conflicts.
     728              :          */
     729           51 :         dclist_foreach(proc_iter, waitQueue)
     730              :         {
     731              :             PGPROC     *leader;
     732              : 
     733           51 :             proc = dlist_container(PGPROC, links, proc_iter.cur);
     734              : 
     735           51 :             leader = proc->lockGroupLeader == NULL ? proc :
     736              :                 proc->lockGroupLeader;
     737              : 
     738              :             /* Done when we reach the target proc */
     739           51 :             if (proc == lastGroupMember)
     740           29 :                 break;
     741              : 
     742              :             /* Is there a conflict with this guy's request? */
     743           22 :             if ((LOCKBIT_ON(proc->waitLockMode) & conflictMask) != 0 &&
     744              :                 leader != checkProcLeader)
     745              :             {
     746              :                 /* This proc soft-blocks checkProc */
     747           13 :                 if (FindLockCycleRecurse(proc, depth + 1,
     748              :                                          softEdges, nSoftEdges))
     749              :                 {
     750              :                     /* fill deadlockDetails[] */
     751            5 :                     DEADLOCK_INFO *info = &deadlockDetails[depth];
     752              : 
     753            5 :                     info->locktag = lock->tag;
     754            5 :                     info->lockmode = checkProc->waitLockMode;
     755            5 :                     info->pid = checkProc->pid;
     756              : 
     757              :                     /*
     758              :                      * Add this edge to the list of soft edges in the cycle
     759              :                      */
     760              :                     Assert(*nSoftEdges < MaxBackends);
     761            5 :                     softEdges[*nSoftEdges].waiter = checkProcLeader;
     762            5 :                     softEdges[*nSoftEdges].blocker = leader;
     763            5 :                     softEdges[*nSoftEdges].lock = lock;
     764            5 :                     (*nSoftEdges)++;
     765            5 :                     return true;
     766              :                 }
     767              :             }
     768              :         }
     769              :     }
     770              : 
     771              :     /*
     772              :      * No conflict detected here.
     773              :      */
     774           47 :     return false;
     775              : }
     776              : 
     777              : 
     778              : /*
     779              :  * ExpandConstraints -- expand a list of constraints into a set of
     780              :  *      specific new orderings for affected wait queues
     781              :  *
     782              :  * Input is a list of soft edges to be reversed.  The output is a list
     783              :  * of nWaitOrders WAIT_ORDER structs in waitOrders[], with PGPROC array
     784              :  * workspace in waitOrderProcs[].
     785              :  *
     786              :  * Returns true if able to build an ordering that satisfies all the
     787              :  * constraints, false if not (there are contradictory constraints).
     788              :  */
     789              : static bool
     790           26 : ExpandConstraints(EDGE *constraints,
     791              :                   int nConstraints)
     792              : {
     793           26 :     int         nWaitOrderProcs = 0;
     794              :     int         i,
     795              :                 j;
     796              : 
     797           26 :     nWaitOrders = 0;
     798              : 
     799              :     /*
     800              :      * Scan constraint list backwards.  This is because the last-added
     801              :      * constraint is the only one that could fail, and so we want to test it
     802              :      * for inconsistency first.
     803              :      */
     804           29 :     for (i = nConstraints; --i >= 0;)
     805              :     {
     806            3 :         LOCK       *lock = constraints[i].lock;
     807              : 
     808              :         /* Did we already make a list for this lock? */
     809            3 :         for (j = nWaitOrders; --j >= 0;)
     810              :         {
     811            0 :             if (waitOrders[j].lock == lock)
     812            0 :                 break;
     813              :         }
     814            3 :         if (j >= 0)
     815            0 :             continue;
     816              :         /* No, so allocate a new list */
     817            3 :         waitOrders[nWaitOrders].lock = lock;
     818            3 :         waitOrders[nWaitOrders].procs = waitOrderProcs + nWaitOrderProcs;
     819            3 :         waitOrders[nWaitOrders].nProcs = dclist_count(&lock->waitProcs);
     820            3 :         nWaitOrderProcs += dclist_count(&lock->waitProcs);
     821              :         Assert(nWaitOrderProcs <= MaxBackends);
     822              : 
     823              :         /*
     824              :          * Do the topo sort.  TopoSort need not examine constraints after this
     825              :          * one, since they must be for different locks.
     826              :          */
     827            3 :         if (!TopoSort(lock, constraints, i + 1,
     828            3 :                       waitOrders[nWaitOrders].procs))
     829            0 :             return false;
     830            3 :         nWaitOrders++;
     831              :     }
     832           26 :     return true;
     833              : }
     834              : 
     835              : 
     836              : /*
     837              :  * TopoSort -- topological sort of a wait queue
     838              :  *
     839              :  * Generate a re-ordering of a lock's wait queue that satisfies given
     840              :  * constraints about certain procs preceding others.  (Each such constraint
     841              :  * is a fact of a partial ordering.)  Minimize rearrangement of the queue
     842              :  * not needed to achieve the partial ordering.
     843              :  *
     844              :  * This is a lot simpler and slower than, for example, the topological sort
     845              :  * algorithm shown in Knuth's Volume 1.  However, Knuth's method doesn't
     846              :  * try to minimize the damage to the existing order.  In practice we are
     847              :  * not likely to be working with more than a few constraints, so the apparent
     848              :  * slowness of the algorithm won't really matter.
     849              :  *
     850              :  * The initial queue ordering is taken directly from the lock's wait queue.
     851              :  * The output is an array of PGPROC pointers, of length equal to the lock's
     852              :  * wait queue length (the caller is responsible for providing this space).
     853              :  * The partial order is specified by an array of EDGE structs.  Each EDGE
     854              :  * is one that we need to reverse, therefore the "waiter" must appear before
     855              :  * the "blocker" in the output array.  The EDGE array may well contain
     856              :  * edges associated with other locks; these should be ignored.
     857              :  *
     858              :  * Returns true if able to build an ordering that satisfies all the
     859              :  * constraints, false if not (there are contradictory constraints).
     860              :  */
     861              : static bool
     862            3 : TopoSort(LOCK *lock,
     863              :          EDGE *constraints,
     864              :          int nConstraints,
     865              :          PGPROC **ordering)     /* output argument */
     866              : {
     867            3 :     dclist_head *waitQueue = &lock->waitProcs;
     868            3 :     int         queue_size = dclist_count(waitQueue);
     869              :     PGPROC     *proc;
     870              :     int         i,
     871              :                 j,
     872              :                 jj,
     873              :                 k,
     874              :                 kk,
     875              :                 last;
     876              :     dlist_iter  proc_iter;
     877              : 
     878              :     /* First, fill topoProcs[] array with the procs in their current order */
     879            3 :     i = 0;
     880           12 :     dclist_foreach(proc_iter, waitQueue)
     881              :     {
     882            9 :         proc = dlist_container(PGPROC, links, proc_iter.cur);
     883            9 :         topoProcs[i++] = proc;
     884              :     }
     885              :     Assert(i == queue_size);
     886              : 
     887              :     /*
     888              :      * Scan the constraints, and for each proc in the array, generate a count
     889              :      * of the number of constraints that say it must be before something else,
     890              :      * plus a list of the constraints that say it must be after something
     891              :      * else. The count for the j'th proc is stored in beforeConstraints[j],
     892              :      * and the head of its list in afterConstraints[j].  Each constraint
     893              :      * stores its list link in constraints[i].link (note any constraint will
     894              :      * be in just one list). The array index for the before-proc of the i'th
     895              :      * constraint is remembered in constraints[i].pred.
     896              :      *
     897              :      * Note that it's not necessarily the case that every constraint affects
     898              :      * this particular wait queue.  Prior to group locking, a process could be
     899              :      * waiting for at most one lock.  But a lock group can be waiting for
     900              :      * zero, one, or multiple locks.  Since topoProcs[] is an array of the
     901              :      * processes actually waiting, while constraints[] is an array of group
     902              :      * leaders, we've got to scan through topoProcs[] for each constraint,
     903              :      * checking whether both a waiter and a blocker for that group are
     904              :      * present.  If so, the constraint is relevant to this wait queue; if not,
     905              :      * it isn't.
     906              :      */
     907            6 :     MemSet(beforeConstraints, 0, queue_size * sizeof(int));
     908            6 :     MemSet(afterConstraints, 0, queue_size * sizeof(int));
     909            6 :     for (i = 0; i < nConstraints; i++)
     910              :     {
     911              :         /*
     912              :          * Find a representative process that is on the lock queue and part of
     913              :          * the waiting lock group.  This may or may not be the leader, which
     914              :          * may or may not be waiting at all.  If there are any other processes
     915              :          * in the same lock group on the queue, set their number of
     916              :          * beforeConstraints to -1 to indicate that they should be emitted
     917              :          * with their groupmates rather than considered separately.
     918              :          *
     919              :          * In this loop and the similar one just below, it's critical that we
     920              :          * consistently select the same representative member of any one lock
     921              :          * group, so that all the constraints are associated with the same
     922              :          * proc, and the -1's are only associated with not-representative
     923              :          * members.  We select the last one in the topoProcs array.
     924              :          */
     925            3 :         proc = constraints[i].waiter;
     926              :         Assert(proc != NULL);
     927            3 :         jj = -1;
     928           12 :         for (j = queue_size; --j >= 0;)
     929              :         {
     930            9 :             PGPROC     *waiter = topoProcs[j];
     931              : 
     932            9 :             if (waiter == proc || waiter->lockGroupLeader == proc)
     933              :             {
     934              :                 Assert(waiter->waitLock == lock);
     935            5 :                 if (jj == -1)
     936            3 :                     jj = j;
     937              :                 else
     938              :                 {
     939              :                     Assert(beforeConstraints[j] <= 0);
     940            2 :                     beforeConstraints[j] = -1;
     941              :                 }
     942              :             }
     943              :         }
     944              : 
     945              :         /* If no matching waiter, constraint is not relevant to this lock. */
     946            3 :         if (jj < 0)
     947            0 :             continue;
     948              : 
     949              :         /*
     950              :          * Similarly, find a representative process that is on the lock queue
     951              :          * and waiting for the blocking lock group.  Again, this could be the
     952              :          * leader but does not need to be.
     953              :          */
     954            3 :         proc = constraints[i].blocker;
     955              :         Assert(proc != NULL);
     956            3 :         kk = -1;
     957           12 :         for (k = queue_size; --k >= 0;)
     958              :         {
     959            9 :             PGPROC     *blocker = topoProcs[k];
     960              : 
     961            9 :             if (blocker == proc || blocker->lockGroupLeader == proc)
     962              :             {
     963              :                 Assert(blocker->waitLock == lock);
     964            3 :                 if (kk == -1)
     965            3 :                     kk = k;
     966              :                 else
     967              :                 {
     968              :                     Assert(beforeConstraints[k] <= 0);
     969            0 :                     beforeConstraints[k] = -1;
     970              :                 }
     971              :             }
     972              :         }
     973              : 
     974              :         /* If no matching blocker, constraint is not relevant to this lock. */
     975            3 :         if (kk < 0)
     976            0 :             continue;
     977              : 
     978              :         Assert(beforeConstraints[jj] >= 0);
     979            3 :         beforeConstraints[jj]++;    /* waiter must come before */
     980              :         /* add this constraint to list of after-constraints for blocker */
     981            3 :         constraints[i].pred = jj;
     982            3 :         constraints[i].link = afterConstraints[kk];
     983            3 :         afterConstraints[kk] = i + 1;
     984              :     }
     985              : 
     986              :     /*--------------------
     987              :      * Now scan the topoProcs array backwards.  At each step, output the
     988              :      * last proc that has no remaining before-constraints plus any other
     989              :      * members of the same lock group; then decrease the beforeConstraints
     990              :      * count of each of the procs it was constrained against.
     991              :      * i = index of ordering[] entry we want to output this time
     992              :      * j = search index for topoProcs[]
     993              :      * k = temp for scanning constraint list for proc j
     994              :      * last = last non-null index in topoProcs (avoid redundant searches)
     995              :      *--------------------
     996              :      */
     997            3 :     last = queue_size - 1;
     998           10 :     for (i = queue_size - 1; i >= 0;)
     999              :     {
    1000              :         int         c;
    1001            7 :         int         nmatches = 0;
    1002              : 
    1003              :         /* Find next candidate to output */
    1004            7 :         while (topoProcs[last] == NULL)
    1005            0 :             last--;
    1006           14 :         for (j = last; j >= 0; j--)
    1007              :         {
    1008           14 :             if (topoProcs[j] != NULL && beforeConstraints[j] == 0)
    1009            7 :                 break;
    1010              :         }
    1011              : 
    1012              :         /* If no available candidate, topological sort fails */
    1013            7 :         if (j < 0)
    1014            0 :             return false;
    1015              : 
    1016              :         /*
    1017              :          * Output everything in the lock group.  There's no point in
    1018              :          * outputting an ordering where members of the same lock group are not
    1019              :          * consecutive on the wait queue: if some other waiter is between two
    1020              :          * requests that belong to the same group, then either it conflicts
    1021              :          * with both of them and is certainly not a solution; or it conflicts
    1022              :          * with at most one of them and is thus isomorphic to an ordering
    1023              :          * where the group members are consecutive.
    1024              :          */
    1025            7 :         proc = topoProcs[j];
    1026            7 :         if (proc->lockGroupLeader != NULL)
    1027            2 :             proc = proc->lockGroupLeader;
    1028              :         Assert(proc != NULL);
    1029           28 :         for (c = 0; c <= last; ++c)
    1030              :         {
    1031           21 :             if (topoProcs[c] == proc || (topoProcs[c] != NULL &&
    1032           11 :                                          topoProcs[c]->lockGroupLeader == proc))
    1033              :             {
    1034            9 :                 ordering[i - nmatches] = topoProcs[c];
    1035            9 :                 topoProcs[c] = NULL;
    1036            9 :                 ++nmatches;
    1037              :             }
    1038              :         }
    1039              :         Assert(nmatches > 0);
    1040            7 :         i -= nmatches;
    1041              : 
    1042              :         /* Update beforeConstraints counts of its predecessors */
    1043           10 :         for (k = afterConstraints[j]; k > 0; k = constraints[k - 1].link)
    1044            3 :             beforeConstraints[constraints[k - 1].pred]--;
    1045              :     }
    1046              : 
    1047              :     /* Done */
    1048            3 :     return true;
    1049              : }
    1050              : 
    1051              : #ifdef DEBUG_DEADLOCK
    1052              : static void
    1053              : PrintLockQueue(LOCK *lock, const char *info)
    1054              : {
    1055              :     dclist_head *waitQueue = &lock->waitProcs;
    1056              :     dlist_iter  proc_iter;
    1057              : 
    1058              :     printf("%s lock %p queue ", info, lock);
    1059              : 
    1060              :     dclist_foreach(proc_iter, waitQueue)
    1061              :     {
    1062              :         PGPROC     *proc = dlist_container(PGPROC, links, proc_iter.cur);
    1063              : 
    1064              :         printf(" %d", proc->pid);
    1065              :     }
    1066              :     printf("\n");
    1067              :     fflush(stdout);
    1068              : }
    1069              : #endif
    1070              : 
    1071              : /*
    1072              :  * Report a detected deadlock, with available details.
    1073              :  */
    1074              : void
    1075            6 : DeadLockReport(void)
    1076              : {
    1077              :     StringInfoData clientbuf;   /* errdetail for client */
    1078              :     StringInfoData logbuf;      /* errdetail for server log */
    1079              :     StringInfoData locktagbuf;
    1080              :     int         i;
    1081              : 
    1082            6 :     initStringInfo(&clientbuf);
    1083            6 :     initStringInfo(&logbuf);
    1084            6 :     initStringInfo(&locktagbuf);
    1085              : 
    1086              :     /* Generate the "waits for" lines sent to the client */
    1087           25 :     for (i = 0; i < nDeadlockDetails; i++)
    1088              :     {
    1089           19 :         DEADLOCK_INFO *info = &deadlockDetails[i];
    1090              :         int         nextpid;
    1091              : 
    1092              :         /* The last proc waits for the first one... */
    1093           19 :         if (i < nDeadlockDetails - 1)
    1094           13 :             nextpid = info[1].pid;
    1095              :         else
    1096            6 :             nextpid = deadlockDetails[0].pid;
    1097              : 
    1098              :         /* reset locktagbuf to hold next object description */
    1099           19 :         resetStringInfo(&locktagbuf);
    1100              : 
    1101           19 :         DescribeLockTag(&locktagbuf, &info->locktag);
    1102              : 
    1103           19 :         if (i > 0)
    1104           13 :             appendStringInfoChar(&clientbuf, '\n');
    1105              : 
    1106           38 :         appendStringInfo(&clientbuf,
    1107           19 :                          _("Process %d waits for %s on %s; blocked by process %d."),
    1108              :                          info->pid,
    1109           19 :                          GetLockmodeName(info->locktag.locktag_lockmethodid,
    1110              :                                          info->lockmode),
    1111              :                          locktagbuf.data,
    1112              :                          nextpid);
    1113              :     }
    1114              : 
    1115              :     /* Duplicate all the above for the server ... */
    1116            6 :     appendBinaryStringInfo(&logbuf, clientbuf.data, clientbuf.len);
    1117              : 
    1118              :     /* ... and add info about query strings */
    1119           25 :     for (i = 0; i < nDeadlockDetails; i++)
    1120              :     {
    1121           19 :         DEADLOCK_INFO *info = &deadlockDetails[i];
    1122              : 
    1123           19 :         appendStringInfoChar(&logbuf, '\n');
    1124              : 
    1125           19 :         appendStringInfo(&logbuf,
    1126           19 :                          _("Process %d: %s"),
    1127              :                          info->pid,
    1128              :                          pgstat_get_backend_current_activity(info->pid, false));
    1129              :     }
    1130              : 
    1131            6 :     pgstat_report_deadlock();
    1132              : 
    1133            6 :     ereport(ERROR,
    1134              :             (errcode(ERRCODE_T_R_DEADLOCK_DETECTED),
    1135              :              errmsg("deadlock detected"),
    1136              :              errdetail_internal("%s", clientbuf.data),
    1137              :              errdetail_log("%s", logbuf.data),
    1138              :              errhint("See server log for query details.")));
    1139              : }
    1140              : 
    1141              : /*
    1142              :  * RememberSimpleDeadLock: set up info for DeadLockReport when ProcSleep
    1143              :  * detects a trivial (two-way) deadlock.  proc1 wants to block for lockmode
    1144              :  * on lock, but proc2 is already waiting and would be blocked by proc1.
    1145              :  */
    1146              : void
    1147            1 : RememberSimpleDeadLock(PGPROC *proc1,
    1148              :                        LOCKMODE lockmode,
    1149              :                        LOCK *lock,
    1150              :                        PGPROC *proc2)
    1151              : {
    1152            1 :     DEADLOCK_INFO *info = &deadlockDetails[0];
    1153              : 
    1154            1 :     info->locktag = lock->tag;
    1155            1 :     info->lockmode = lockmode;
    1156            1 :     info->pid = proc1->pid;
    1157            1 :     info++;
    1158            1 :     info->locktag = proc2->waitLock->tag;
    1159            1 :     info->lockmode = proc2->waitLockMode;
    1160            1 :     info->pid = proc2->pid;
    1161            1 :     nDeadlockDetails = 2;
    1162            1 : }
        

Generated by: LCOV version 2.0-1