LCOV - code coverage report
Current view: top level - src/backend/storage/lmgr - deadlock.c (source / functions) Hit Total Coverage
Test: PostgreSQL 13beta1 Lines: 266 307 86.6 %
Date: 2020-06-01 10:07:15 Functions: 11 12 91.7 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * deadlock.c
       4             :  *    POSTGRES deadlock detection code
       5             :  *
       6             :  * See src/backend/storage/lmgr/README for a description of the deadlock
       7             :  * detection and resolution algorithms.
       8             :  *
       9             :  *
      10             :  * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
      11             :  * Portions Copyright (c) 1994, Regents of the University of California
      12             :  *
      13             :  *
      14             :  * IDENTIFICATION
      15             :  *    src/backend/storage/lmgr/deadlock.c
      16             :  *
      17             :  *  Interface:
      18             :  *
      19             :  *  DeadLockCheck()
      20             :  *  DeadLockReport()
      21             :  *  RememberSimpleDeadLock()
      22             :  *  InitDeadLockChecking()
      23             :  *
      24             :  *-------------------------------------------------------------------------
      25             :  */
      26             : #include "postgres.h"
      27             : 
      28             : #include "miscadmin.h"
      29             : #include "pg_trace.h"
      30             : #include "pgstat.h"
      31             : #include "storage/lmgr.h"
      32             : #include "storage/proc.h"
      33             : #include "utils/memutils.h"
      34             : 
      35             : 
      36             : /*
      37             :  * One edge in the waits-for graph.
      38             :  *
      39             :  * waiter and blocker may or may not be members of a lock group, but if either
      40             :  * is, it will be the leader rather than any other member of the lock group.
      41             :  * The group leaders act as representatives of the whole group even though
      42             :  * those particular processes need not be waiting at all.  There will be at
      43             :  * least one member of the waiter's lock group on the wait queue for the given
      44             :  * lock, maybe more.
      45             :  */
      46             : typedef struct
      47             : {
      48             :     PGPROC     *waiter;         /* the leader of the waiting lock group */
      49             :     PGPROC     *blocker;        /* the leader of the group it is waiting for */
      50             :     LOCK       *lock;           /* the lock being waited for */
      51             :     int         pred;           /* workspace for TopoSort */
      52             :     int         link;           /* workspace for TopoSort */
      53             : } EDGE;
      54             : 
      55             : /* One potential reordering of a lock's wait queue */
      56             : typedef struct
      57             : {
      58             :     LOCK       *lock;           /* the lock whose wait queue is described */
      59             :     PGPROC    **procs;          /* array of PGPROC *'s in new wait order */
      60             :     int         nProcs;
      61             : } WAIT_ORDER;
      62             : 
      63             : /*
      64             :  * Information saved about each edge in a detected deadlock cycle.  This
      65             :  * is used to print a diagnostic message upon failure.
      66             :  *
      67             :  * Note: because we want to examine this info after releasing the lock
      68             :  * manager's partition locks, we can't just store LOCK and PGPROC pointers;
      69             :  * we must extract out all the info we want to be able to print.
      70             :  */
      71             : typedef struct
      72             : {
      73             :     LOCKTAG     locktag;        /* ID of awaited lock object */
      74             :     LOCKMODE    lockmode;       /* type of lock we're waiting for */
      75             :     int         pid;            /* PID of blocked backend */
      76             : } DEADLOCK_INFO;
      77             : 
      78             : 
      79             : static bool DeadLockCheckRecurse(PGPROC *proc);
      80             : static int  TestConfiguration(PGPROC *startProc);
      81             : static bool FindLockCycle(PGPROC *checkProc,
      82             :                           EDGE *softEdges, int *nSoftEdges);
      83             : static bool FindLockCycleRecurse(PGPROC *checkProc, int depth,
      84             :                                  EDGE *softEdges, int *nSoftEdges);
      85             : static bool FindLockCycleRecurseMember(PGPROC *checkProc,
      86             :                                        PGPROC *checkProcLeader,
      87             :                                        int depth, EDGE *softEdges, int *nSoftEdges);
      88             : static bool ExpandConstraints(EDGE *constraints, int nConstraints);
      89             : static bool TopoSort(LOCK *lock, EDGE *constraints, int nConstraints,
      90             :                      PGPROC **ordering);
      91             : 
      92             : #ifdef DEBUG_DEADLOCK
      93             : static void PrintLockQueue(LOCK *lock, const char *info);
      94             : #endif
      95             : 
      96             : 
      97             : /*
      98             :  * Working space for the deadlock detector
      99             :  */
     100             : 
     101             : /* Workspace for FindLockCycle */
     102             : static PGPROC **visitedProcs;   /* Array of visited procs */
     103             : static int  nVisitedProcs;
     104             : 
     105             : /* Workspace for TopoSort */
     106             : static PGPROC **topoProcs;      /* Array of not-yet-output procs */
     107             : static int *beforeConstraints;  /* Counts of remaining before-constraints */
     108             : static int *afterConstraints;   /* List head for after-constraints */
     109             : 
     110             : /* Output area for ExpandConstraints */
     111             : static WAIT_ORDER *waitOrders;  /* Array of proposed queue rearrangements */
     112             : static int  nWaitOrders;
     113             : static PGPROC **waitOrderProcs; /* Space for waitOrders queue contents */
     114             : 
     115             : /* Current list of constraints being considered */
     116             : static EDGE *curConstraints;
     117             : static int  nCurConstraints;
     118             : static int  maxCurConstraints;
     119             : 
     120             : /* Storage space for results from FindLockCycle */
     121             : static EDGE *possibleConstraints;
     122             : static int  nPossibleConstraints;
     123             : static int  maxPossibleConstraints;
     124             : static DEADLOCK_INFO *deadlockDetails;
     125             : static int  nDeadlockDetails;
     126             : 
     127             : /* PGPROC pointer of any blocking autovacuum worker found */
     128             : static PGPROC *blocking_autovacuum_proc = NULL;
     129             : 
     130             : 
     131             : /*
     132             :  * InitDeadLockChecking -- initialize deadlock checker during backend startup
     133             :  *
     134             :  * This does per-backend initialization of the deadlock checker; primarily,
     135             :  * allocation of working memory for DeadLockCheck.  We do this per-backend
     136             :  * since there's no percentage in making the kernel do copy-on-write
     137             :  * inheritance of workspace from the postmaster.  We want to allocate the
     138             :  * space at startup because (a) the deadlock checker might be invoked when
     139             :  * there's no free memory left, and (b) the checker is normally run inside a
     140             :  * signal handler, which is a very dangerous place to invoke palloc from.
     141             :  */
     142             : void
     143       11086 : InitDeadLockChecking(void)
     144             : {
     145             :     MemoryContext oldcxt;
     146             : 
     147             :     /* Make sure allocations are permanent */
     148       11086 :     oldcxt = MemoryContextSwitchTo(TopMemoryContext);
     149             : 
     150             :     /*
     151             :      * FindLockCycle needs at most MaxBackends entries in visitedProcs[] and
     152             :      * deadlockDetails[].
     153             :      */
     154       11086 :     visitedProcs = (PGPROC **) palloc(MaxBackends * sizeof(PGPROC *));
     155       11086 :     deadlockDetails = (DEADLOCK_INFO *) palloc(MaxBackends * sizeof(DEADLOCK_INFO));
     156             : 
     157             :     /*
     158             :      * TopoSort needs to consider at most MaxBackends wait-queue entries, and
     159             :      * it needn't run concurrently with FindLockCycle.
     160             :      */
     161       11086 :     topoProcs = visitedProcs;   /* re-use this space */
     162       11086 :     beforeConstraints = (int *) palloc(MaxBackends * sizeof(int));
     163       11086 :     afterConstraints = (int *) palloc(MaxBackends * sizeof(int));
     164             : 
     165             :     /*
     166             :      * We need to consider rearranging at most MaxBackends/2 wait queues
     167             :      * (since it takes at least two waiters in a queue to create a soft edge),
     168             :      * and the expanded form of the wait queues can't involve more than
     169             :      * MaxBackends total waiters.
     170             :      */
     171       11086 :     waitOrders = (WAIT_ORDER *)
     172       11086 :         palloc((MaxBackends / 2) * sizeof(WAIT_ORDER));
     173       11086 :     waitOrderProcs = (PGPROC **) palloc(MaxBackends * sizeof(PGPROC *));
     174             : 
     175             :     /*
     176             :      * Allow at most MaxBackends distinct constraints in a configuration. (Is
     177             :      * this enough?  In practice it seems it should be, but I don't quite see
     178             :      * how to prove it.  If we run out, we might fail to find a workable wait
     179             :      * queue rearrangement even though one exists.)  NOTE that this number
     180             :      * limits the maximum recursion depth of DeadLockCheckRecurse. Making it
     181             :      * really big might potentially allow a stack-overflow problem.
     182             :      */
     183       11086 :     maxCurConstraints = MaxBackends;
     184       11086 :     curConstraints = (EDGE *) palloc(maxCurConstraints * sizeof(EDGE));
     185             : 
     186             :     /*
     187             :      * Allow up to 3*MaxBackends constraints to be saved without having to
     188             :      * re-run TestConfiguration.  (This is probably more than enough, but we
     189             :      * can survive if we run low on space by doing excess runs of
     190             :      * TestConfiguration to re-compute constraint lists each time needed.) The
     191             :      * last MaxBackends entries in possibleConstraints[] are reserved as
     192             :      * output workspace for FindLockCycle.
     193             :      */
     194       11086 :     maxPossibleConstraints = MaxBackends * 4;
     195       11086 :     possibleConstraints =
     196       11086 :         (EDGE *) palloc(maxPossibleConstraints * sizeof(EDGE));
     197             : 
     198       11086 :     MemoryContextSwitchTo(oldcxt);
     199       11086 : }
     200             : 
     201             : /*
     202             :  * DeadLockCheck -- Checks for deadlocks for a given process
     203             :  *
     204             :  * This code looks for deadlocks involving the given process.  If any
     205             :  * are found, it tries to rearrange lock wait queues to resolve the
     206             :  * deadlock.  If resolution is impossible, return DS_HARD_DEADLOCK ---
     207             :  * the caller is then expected to abort the given proc's transaction.
     208             :  *
     209             :  * Caller must already have locked all partitions of the lock tables.
     210             :  *
     211             :  * On failure, deadlock details are recorded in deadlockDetails[] for
     212             :  * subsequent printing by DeadLockReport().  That activity is separate
     213             :  * because (a) we don't want to do it while holding all those LWLocks,
     214             :  * and (b) we are typically invoked inside a signal handler.
     215             :  */
     216             : DeadLockState
     217          30 : DeadLockCheck(PGPROC *proc)
     218             : {
     219             :     int         i,
     220             :                 j;
     221             : 
     222             :     /* Initialize to "no constraints" */
     223          30 :     nCurConstraints = 0;
     224          30 :     nPossibleConstraints = 0;
     225          30 :     nWaitOrders = 0;
     226             : 
     227             :     /* Initialize to not blocked by an autovacuum worker */
     228          30 :     blocking_autovacuum_proc = NULL;
     229             : 
     230             :     /* Search for deadlocks and possible fixes */
     231          30 :     if (DeadLockCheckRecurse(proc))
     232             :     {
     233             :         /*
     234             :          * Call FindLockCycle one more time, to record the correct
     235             :          * deadlockDetails[] for the basic state with no rearrangements.
     236             :          */
     237             :         int         nSoftEdges;
     238             : 
     239             :         TRACE_POSTGRESQL_DEADLOCK_FOUND();
     240             : 
     241           2 :         nWaitOrders = 0;
     242           2 :         if (!FindLockCycle(proc, possibleConstraints, &nSoftEdges))
     243           0 :             elog(FATAL, "deadlock seems to have disappeared");
     244             : 
     245           2 :         return DS_HARD_DEADLOCK;    /* cannot find a non-deadlocked state */
     246             :     }
     247             : 
     248             :     /* Apply any needed rearrangements of wait queues */
     249          34 :     for (i = 0; i < nWaitOrders; i++)
     250             :     {
     251           6 :         LOCK       *lock = waitOrders[i].lock;
     252           6 :         PGPROC    **procs = waitOrders[i].procs;
     253           6 :         int         nProcs = waitOrders[i].nProcs;
     254           6 :         PROC_QUEUE *waitQueue = &(lock->waitProcs);
     255             : 
     256             :         Assert(nProcs == waitQueue->size);
     257             : 
     258             : #ifdef DEBUG_DEADLOCK
     259             :         PrintLockQueue(lock, "DeadLockCheck:");
     260             : #endif
     261             : 
     262             :         /* Reset the queue and re-add procs in the desired order */
     263           6 :         ProcQueueInit(waitQueue);
     264          24 :         for (j = 0; j < nProcs; j++)
     265             :         {
     266          18 :             SHMQueueInsertBefore(&(waitQueue->links), &(procs[j]->links));
     267          18 :             waitQueue->size++;
     268             :         }
     269             : 
     270             : #ifdef DEBUG_DEADLOCK
     271             :         PrintLockQueue(lock, "rearranged to:");
     272             : #endif
     273             : 
     274             :         /* See if any waiters for the lock can be woken up now */
     275           6 :         ProcLockWakeup(GetLocksMethodTable(lock), lock);
     276             :     }
     277             : 
     278             :     /* Return code tells caller if we had to escape a deadlock or not */
     279          28 :     if (nWaitOrders > 0)
     280           6 :         return DS_SOFT_DEADLOCK;
     281          22 :     else if (blocking_autovacuum_proc != NULL)
     282           0 :         return DS_BLOCKED_BY_AUTOVACUUM;
     283             :     else
     284          22 :         return DS_NO_DEADLOCK;
     285             : }
     286             : 
     287             : /*
     288             :  * Return the PGPROC of the autovacuum that's blocking a process.
     289             :  *
     290             :  * We reset the saved pointer as soon as we pass it back.
     291             :  */
     292             : PGPROC *
     293           0 : GetBlockingAutoVacuumPgproc(void)
     294             : {
     295             :     PGPROC     *ptr;
     296             : 
     297           0 :     ptr = blocking_autovacuum_proc;
     298           0 :     blocking_autovacuum_proc = NULL;
     299             : 
     300           0 :     return ptr;
     301             : }
     302             : 
     303             : /*
     304             :  * DeadLockCheckRecurse -- recursively search for valid orderings
     305             :  *
     306             :  * curConstraints[] holds the current set of constraints being considered
     307             :  * by an outer level of recursion.  Add to this each possible solution
     308             :  * constraint for any cycle detected at this level.
     309             :  *
     310             :  * Returns true if no solution exists.  Returns false if a deadlock-free
     311             :  * state is attainable, in which case waitOrders[] shows the required
     312             :  * rearrangements of lock wait queues (if any).
     313             :  */
     314             : static bool
     315          36 : DeadLockCheckRecurse(PGPROC *proc)
     316             : {
     317             :     int         nEdges;
     318             :     int         oldPossibleConstraints;
     319             :     bool        savedList;
     320             :     int         i;
     321             : 
     322          36 :     nEdges = TestConfiguration(proc);
     323          36 :     if (nEdges < 0)
     324           2 :         return true;            /* hard deadlock --- no solution */
     325          34 :     if (nEdges == 0)
     326          28 :         return false;           /* good configuration found */
     327           6 :     if (nCurConstraints >= maxCurConstraints)
     328           0 :         return true;            /* out of room for active constraints? */
     329           6 :     oldPossibleConstraints = nPossibleConstraints;
     330           6 :     if (nPossibleConstraints + nEdges + MaxBackends <= maxPossibleConstraints)
     331             :     {
     332             :         /* We can save the edge list in possibleConstraints[] */
     333           6 :         nPossibleConstraints += nEdges;
     334           6 :         savedList = true;
     335             :     }
     336             :     else
     337             :     {
     338             :         /* Not room; will need to regenerate the edges on-the-fly */
     339           0 :         savedList = false;
     340             :     }
     341             : 
     342             :     /*
     343             :      * Try each available soft edge as an addition to the configuration.
     344             :      */
     345           6 :     for (i = 0; i < nEdges; i++)
     346             :     {
     347           6 :         if (!savedList && i > 0)
     348             :         {
     349             :             /* Regenerate the list of possible added constraints */
     350           0 :             if (nEdges != TestConfiguration(proc))
     351           0 :                 elog(FATAL, "inconsistent results during deadlock check");
     352             :         }
     353           6 :         curConstraints[nCurConstraints] =
     354           6 :             possibleConstraints[oldPossibleConstraints + i];
     355           6 :         nCurConstraints++;
     356           6 :         if (!DeadLockCheckRecurse(proc))
     357           6 :             return false;       /* found a valid solution! */
     358             :         /* give up on that added constraint, try again */
     359           0 :         nCurConstraints--;
     360             :     }
     361           0 :     nPossibleConstraints = oldPossibleConstraints;
     362           0 :     return true;                /* no solution found */
     363             : }
     364             : 
     365             : 
     366             : /*--------------------
     367             :  * Test a configuration (current set of constraints) for validity.
     368             :  *
     369             :  * Returns:
     370             :  *      0: the configuration is good (no deadlocks)
     371             :  *     -1: the configuration has a hard deadlock or is not self-consistent
     372             :  *      >0: the configuration has one or more soft deadlocks
     373             :  *
     374             :  * In the soft-deadlock case, one of the soft cycles is chosen arbitrarily
     375             :  * and a list of its soft edges is returned beginning at
     376             :  * possibleConstraints+nPossibleConstraints.  The return value is the
     377             :  * number of soft edges.
     378             :  *--------------------
     379             :  */
     380             : static int
     381          36 : TestConfiguration(PGPROC *startProc)
     382             : {
     383          36 :     int         softFound = 0;
     384          36 :     EDGE       *softEdges = possibleConstraints + nPossibleConstraints;
     385             :     int         nSoftEdges;
     386             :     int         i;
     387             : 
     388             :     /*
     389             :      * Make sure we have room for FindLockCycle's output.
     390             :      */
     391          36 :     if (nPossibleConstraints + MaxBackends > maxPossibleConstraints)
     392           0 :         return -1;
     393             : 
     394             :     /*
     395             :      * Expand current constraint set into wait orderings.  Fail if the
     396             :      * constraint set is not self-consistent.
     397             :      */
     398          36 :     if (!ExpandConstraints(curConstraints, nCurConstraints))
     399           0 :         return -1;
     400             : 
     401             :     /*
     402             :      * Check for cycles involving startProc or any of the procs mentioned in
     403             :      * constraints.  We check startProc last because if it has a soft cycle
     404             :      * still to be dealt with, we want to deal with that first.
     405             :      */
     406          42 :     for (i = 0; i < nCurConstraints; i++)
     407             :     {
     408           6 :         if (FindLockCycle(curConstraints[i].waiter, softEdges, &nSoftEdges))
     409             :         {
     410           0 :             if (nSoftEdges == 0)
     411           0 :                 return -1;      /* hard deadlock detected */
     412           0 :             softFound = nSoftEdges;
     413             :         }
     414           6 :         if (FindLockCycle(curConstraints[i].blocker, softEdges, &nSoftEdges))
     415             :         {
     416           0 :             if (nSoftEdges == 0)
     417           0 :                 return -1;      /* hard deadlock detected */
     418           0 :             softFound = nSoftEdges;
     419             :         }
     420             :     }
     421          36 :     if (FindLockCycle(startProc, softEdges, &nSoftEdges))
     422             :     {
     423           8 :         if (nSoftEdges == 0)
     424           2 :             return -1;          /* hard deadlock detected */
     425           6 :         softFound = nSoftEdges;
     426             :     }
     427          34 :     return softFound;
     428             : }
     429             : 
     430             : 
     431             : /*
     432             :  * FindLockCycle -- basic check for deadlock cycles
     433             :  *
     434             :  * Scan outward from the given proc to see if there is a cycle in the
     435             :  * waits-for graph that includes this proc.  Return true if a cycle
     436             :  * is found, else false.  If a cycle is found, we return a list of
     437             :  * the "soft edges", if any, included in the cycle.  These edges could
     438             :  * potentially be eliminated by rearranging wait queues.  We also fill
     439             :  * deadlockDetails[] with information about the detected cycle; this info
     440             :  * is not used by the deadlock algorithm itself, only to print a useful
     441             :  * message after failing.
     442             :  *
     443             :  * Since we need to be able to check hypothetical configurations that would
     444             :  * exist after wait queue rearrangement, the routine pays attention to the
     445             :  * table of hypothetical queue orders in waitOrders[].  These orders will
     446             :  * be believed in preference to the actual ordering seen in the locktable.
     447             :  */
     448             : static bool
     449          50 : FindLockCycle(PGPROC *checkProc,
     450             :               EDGE *softEdges,  /* output argument */
     451             :               int *nSoftEdges)  /* output argument */
     452             : {
     453          50 :     nVisitedProcs = 0;
     454          50 :     nDeadlockDetails = 0;
     455          50 :     *nSoftEdges = 0;
     456          50 :     return FindLockCycleRecurse(checkProc, 0, softEdges, nSoftEdges);
     457             : }
     458             : 
     459             : static bool
     460         164 : FindLockCycleRecurse(PGPROC *checkProc,
     461             :                      int depth,
     462             :                      EDGE *softEdges,   /* output argument */
     463             :                      int *nSoftEdges)   /* output argument */
     464             : {
     465             :     int         i;
     466             :     dlist_iter  iter;
     467             : 
     468             :     /*
     469             :      * If this process is a lock group member, check the leader instead. (Note
     470             :      * that we might be the leader, in which case this is a no-op.)
     471             :      */
     472         164 :     if (checkProc->lockGroupLeader != NULL)
     473          40 :         checkProc = checkProc->lockGroupLeader;
     474             : 
     475             :     /*
     476             :      * Have we already seen this proc?
     477             :      */
     478         404 :     for (i = 0; i < nVisitedProcs; i++)
     479             :     {
     480         262 :         if (visitedProcs[i] == checkProc)
     481             :         {
     482             :             /* If we return to starting point, we have a deadlock cycle */
     483          22 :             if (i == 0)
     484             :             {
     485             :                 /*
     486             :                  * record total length of cycle --- outer levels will now fill
     487             :                  * deadlockDetails[]
     488             :                  */
     489             :                 Assert(depth <= MaxBackends);
     490          10 :                 nDeadlockDetails = depth;
     491             : 
     492          10 :                 return true;
     493             :             }
     494             : 
     495             :             /*
     496             :              * Otherwise, we have a cycle but it does not include the start
     497             :              * point, so say "no deadlock".
     498             :              */
     499          12 :             return false;
     500             :         }
     501             :     }
     502             :     /* Mark proc as seen */
     503             :     Assert(nVisitedProcs < MaxBackends);
     504         142 :     visitedProcs[nVisitedProcs++] = checkProc;
     505             : 
     506             :     /*
     507             :      * If the process is waiting, there is an outgoing waits-for edge to each
     508             :      * process that blocks it.
     509             :      */
     510         234 :     if (checkProc->links.next != NULL && checkProc->waitLock != NULL &&
     511          92 :         FindLockCycleRecurseMember(checkProc, checkProc, depth, softEdges,
     512             :                                    nSoftEdges))
     513          46 :         return true;
     514             : 
     515             :     /*
     516             :      * If the process is not waiting, there could still be outgoing waits-for
     517             :      * edges if it is part of a lock group, because other members of the lock
     518             :      * group might be waiting even though this process is not.  (Given lock
     519             :      * groups {A1, A2} and {B1, B2}, if A1 waits for B1 and B2 waits for A2,
     520             :      * that is a deadlock even neither of B1 and A2 are waiting for anything.)
     521             :      */
     522         166 :     dlist_foreach(iter, &checkProc->lockGroupMembers)
     523             :     {
     524             :         PGPROC     *memberProc;
     525             : 
     526          78 :         memberProc = dlist_container(PGPROC, lockGroupLink, iter.cur);
     527             : 
     528          78 :         if (memberProc->links.next != NULL && memberProc->waitLock != NULL &&
     529          42 :             memberProc != checkProc &&
     530          42 :             FindLockCycleRecurseMember(memberProc, checkProc, depth, softEdges,
     531             :                                        nSoftEdges))
     532           8 :             return true;
     533             :     }
     534             : 
     535          88 :     return false;
     536             : }
     537             : 
     538             : static bool
     539         134 : FindLockCycleRecurseMember(PGPROC *checkProc,
     540             :                            PGPROC *checkProcLeader,
     541             :                            int depth,
     542             :                            EDGE *softEdges, /* output argument */
     543             :                            int *nSoftEdges) /* output argument */
     544             : {
     545             :     PGPROC     *proc;
     546         134 :     LOCK       *lock = checkProc->waitLock;
     547             :     PGXACT     *pgxact;
     548             :     PROCLOCK   *proclock;
     549             :     SHM_QUEUE  *procLocks;
     550             :     LockMethod  lockMethodTable;
     551             :     PROC_QUEUE *waitQueue;
     552             :     int         queue_size;
     553             :     int         conflictMask;
     554             :     int         i;
     555             :     int         numLockModes,
     556             :                 lm;
     557             : 
     558             :     /*
     559             :      * The relation extension or page lock can never participate in actual
     560             :      * deadlock cycle.  See Asserts in LockAcquireExtended.  So, there is no
     561             :      * advantage in checking wait edges from them.
     562             :      */
     563         134 :     if (LOCK_LOCKTAG(*lock) == LOCKTAG_RELATION_EXTEND ||
     564         134 :         (LOCK_LOCKTAG(*lock) == LOCKTAG_PAGE))
     565           0 :         return false;
     566             : 
     567         134 :     lockMethodTable = GetLocksMethodTable(lock);
     568         134 :     numLockModes = lockMethodTable->numLockModes;
     569         134 :     conflictMask = lockMethodTable->conflictTab[checkProc->waitLockMode];
     570             : 
     571             :     /*
     572             :      * Scan for procs that already hold conflicting locks.  These are "hard"
     573             :      * edges in the waits-for graph.
     574             :      */
     575         134 :     procLocks = &(lock->procLocks);
     576             : 
     577         134 :     proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
     578             :                                          offsetof(PROCLOCK, lockLink));
     579             : 
     580         442 :     while (proclock)
     581             :     {
     582             :         PGPROC     *leader;
     583             : 
     584         352 :         proc = proclock->tag.myProc;
     585         352 :         pgxact = &ProcGlobal->allPgXact[proc->pgprocno];
     586         352 :         leader = proc->lockGroupLeader == NULL ? proc : proc->lockGroupLeader;
     587             : 
     588             :         /* A proc never blocks itself or any other lock group member */
     589         352 :         if (leader != checkProcLeader)
     590             :         {
     591        1662 :             for (lm = 1; lm <= numLockModes; lm++)
     592             :             {
     593        1526 :                 if ((proclock->holdMask & LOCKBIT_ON(lm)) &&
     594             :                     (conflictMask & LOCKBIT_ON(lm)))
     595             :                 {
     596             :                     /* This proc hard-blocks checkProc */
     597          82 :                     if (FindLockCycleRecurse(proc, depth + 1,
     598             :                                              softEdges, nSoftEdges))
     599             :                     {
     600             :                         /* fill deadlockDetails[] */
     601          44 :                         DEADLOCK_INFO *info = &deadlockDetails[depth];
     602             : 
     603          44 :                         info->locktag = lock->tag;
     604          44 :                         info->lockmode = checkProc->waitLockMode;
     605          44 :                         info->pid = checkProc->pid;
     606             : 
     607          44 :                         return true;
     608             :                     }
     609             : 
     610             :                     /*
     611             :                      * No deadlock here, but see if this proc is an autovacuum
     612             :                      * that is directly hard-blocking our own proc.  If so,
     613             :                      * report it so that the caller can send a cancel signal
     614             :                      * to it, if appropriate.  If there's more than one such
     615             :                      * proc, it's indeterminate which one will be reported.
     616             :                      *
     617             :                      * We don't touch autovacuums that are indirectly blocking
     618             :                      * us; it's up to the direct blockee to take action.  This
     619             :                      * rule simplifies understanding the behavior and ensures
     620             :                      * that an autovacuum won't be canceled with less than
     621             :                      * deadlock_timeout grace period.
     622             :                      *
     623             :                      * Note we read vacuumFlags without any locking.  This is
     624             :                      * OK only for checking the PROC_IS_AUTOVACUUM flag,
     625             :                      * because that flag is set at process start and never
     626             :                      * reset.  There is logic elsewhere to avoid canceling an
     627             :                      * autovacuum that is working to prevent XID wraparound
     628             :                      * problems (which needs to read a different vacuumFlag
     629             :                      * bit), but we don't do that here to avoid grabbing
     630             :                      * ProcArrayLock.
     631             :                      */
     632          38 :                     if (checkProc == MyProc &&
     633          20 :                         pgxact->vacuumFlags & PROC_IS_AUTOVACUUM)
     634           0 :                         blocking_autovacuum_proc = proc;
     635             : 
     636             :                     /* We're done looking at this proclock */
     637          38 :                     break;
     638             :                 }
     639             :             }
     640             :         }
     641             : 
     642         308 :         proclock = (PROCLOCK *) SHMQueueNext(procLocks, &proclock->lockLink,
     643             :                                              offsetof(PROCLOCK, lockLink));
     644             :     }
     645             : 
     646             :     /*
     647             :      * Scan for procs that are ahead of this one in the lock's wait queue.
     648             :      * Those that have conflicting requests soft-block this one.  This must be
     649             :      * done after the hard-block search, since if another proc both hard- and
     650             :      * soft-blocks this one, we want to call it a hard edge.
     651             :      *
     652             :      * If there is a proposed re-ordering of the lock's wait order, use that
     653             :      * rather than the current wait order.
     654             :      */
     655         108 :     for (i = 0; i < nWaitOrders; i++)
     656             :     {
     657          54 :         if (waitOrders[i].lock == lock)
     658          36 :             break;
     659             :     }
     660             : 
     661          90 :     if (i < nWaitOrders)
     662             :     {
     663             :         /* Use the given hypothetical wait queue order */
     664          36 :         PGPROC    **procs = waitOrders[i].procs;
     665             : 
     666          36 :         queue_size = waitOrders[i].nProcs;
     667             : 
     668          46 :         for (i = 0; i < queue_size; i++)
     669             :         {
     670             :             PGPROC     *leader;
     671             : 
     672          46 :             proc = procs[i];
     673          46 :             leader = proc->lockGroupLeader == NULL ? proc :
     674             :                 proc->lockGroupLeader;
     675             : 
     676             :             /*
     677             :              * TopoSort will always return an ordering with group members
     678             :              * adjacent to each other in the wait queue (see comments
     679             :              * therein). So, as soon as we reach a process in the same lock
     680             :              * group as checkProc, we know we've found all the conflicts that
     681             :              * precede any member of the lock group lead by checkProcLeader.
     682             :              */
     683          46 :             if (leader == checkProcLeader)
     684          36 :                 break;
     685             : 
     686             :             /* Is there a conflict with this guy's request? */
     687          10 :             if ((LOCKBIT_ON(proc->waitLockMode) & conflictMask) != 0)
     688             :             {
     689             :                 /* This proc soft-blocks checkProc */
     690          10 :                 if (FindLockCycleRecurse(proc, depth + 1,
     691             :                                          softEdges, nSoftEdges))
     692             :                 {
     693             :                     /* fill deadlockDetails[] */
     694           0 :                     DEADLOCK_INFO *info = &deadlockDetails[depth];
     695             : 
     696           0 :                     info->locktag = lock->tag;
     697           0 :                     info->lockmode = checkProc->waitLockMode;
     698           0 :                     info->pid = checkProc->pid;
     699             : 
     700             :                     /*
     701             :                      * Add this edge to the list of soft edges in the cycle
     702             :                      */
     703             :                     Assert(*nSoftEdges < MaxBackends);
     704           0 :                     softEdges[*nSoftEdges].waiter = checkProcLeader;
     705           0 :                     softEdges[*nSoftEdges].blocker = leader;
     706           0 :                     softEdges[*nSoftEdges].lock = lock;
     707           0 :                     (*nSoftEdges)++;
     708           0 :                     return true;
     709             :                 }
     710             :             }
     711             :         }
     712             :     }
     713             :     else
     714             :     {
     715          54 :         PGPROC     *lastGroupMember = NULL;
     716             : 
     717             :         /* Use the true lock wait queue order */
     718          54 :         waitQueue = &(lock->waitProcs);
     719             : 
     720             :         /*
     721             :          * Find the last member of the lock group that is present in the wait
     722             :          * queue.  Anything after this is not a soft lock conflict. If group
     723             :          * locking is not in use, then we know immediately which process we're
     724             :          * looking for, but otherwise we've got to search the wait queue to
     725             :          * find the last process actually present.
     726             :          */
     727          54 :         if (checkProc->lockGroupLeader == NULL)
     728          36 :             lastGroupMember = checkProc;
     729             :         else
     730             :         {
     731          18 :             proc = (PGPROC *) waitQueue->links.next;
     732          18 :             queue_size = waitQueue->size;
     733          64 :             while (queue_size-- > 0)
     734             :             {
     735          46 :                 if (proc->lockGroupLeader == checkProcLeader)
     736          26 :                     lastGroupMember = proc;
     737          46 :                 proc = (PGPROC *) proc->links.next;
     738             :             }
     739             :             Assert(lastGroupMember != NULL);
     740             :         }
     741             : 
     742             :         /*
     743             :          * OK, now rescan (or scan) the queue to identify the soft conflicts.
     744             :          */
     745          54 :         queue_size = waitQueue->size;
     746          54 :         proc = (PGPROC *) waitQueue->links.next;
     747          70 :         while (queue_size-- > 0)
     748             :         {
     749             :             PGPROC     *leader;
     750             : 
     751          70 :             leader = proc->lockGroupLeader == NULL ? proc :
     752             :                 proc->lockGroupLeader;
     753             : 
     754             :             /* Done when we reach the target proc */
     755          70 :             if (proc == lastGroupMember)
     756          44 :                 break;
     757             : 
     758             :             /* Is there a conflict with this guy's request? */
     759          26 :             if ((LOCKBIT_ON(proc->waitLockMode) & conflictMask) != 0 &&
     760             :                 leader != checkProcLeader)
     761             :             {
     762             :                 /* This proc soft-blocks checkProc */
     763          22 :                 if (FindLockCycleRecurse(proc, depth + 1,
     764             :                                          softEdges, nSoftEdges))
     765             :                 {
     766             :                     /* fill deadlockDetails[] */
     767          10 :                     DEADLOCK_INFO *info = &deadlockDetails[depth];
     768             : 
     769          10 :                     info->locktag = lock->tag;
     770          10 :                     info->lockmode = checkProc->waitLockMode;
     771          10 :                     info->pid = checkProc->pid;
     772             : 
     773             :                     /*
     774             :                      * Add this edge to the list of soft edges in the cycle
     775             :                      */
     776             :                     Assert(*nSoftEdges < MaxBackends);
     777          10 :                     softEdges[*nSoftEdges].waiter = checkProcLeader;
     778          10 :                     softEdges[*nSoftEdges].blocker = leader;
     779          10 :                     softEdges[*nSoftEdges].lock = lock;
     780          10 :                     (*nSoftEdges)++;
     781          10 :                     return true;
     782             :                 }
     783             :             }
     784             : 
     785          16 :             proc = (PGPROC *) proc->links.next;
     786             :         }
     787             :     }
     788             : 
     789             :     /*
     790             :      * No conflict detected here.
     791             :      */
     792          80 :     return false;
     793             : }
     794             : 
     795             : 
     796             : /*
     797             :  * ExpandConstraints -- expand a list of constraints into a set of
     798             :  *      specific new orderings for affected wait queues
     799             :  *
     800             :  * Input is a list of soft edges to be reversed.  The output is a list
     801             :  * of nWaitOrders WAIT_ORDER structs in waitOrders[], with PGPROC array
     802             :  * workspace in waitOrderProcs[].
     803             :  *
     804             :  * Returns true if able to build an ordering that satisfies all the
     805             :  * constraints, false if not (there are contradictory constraints).
     806             :  */
     807             : static bool
     808          36 : ExpandConstraints(EDGE *constraints,
     809             :                   int nConstraints)
     810             : {
     811          36 :     int         nWaitOrderProcs = 0;
     812             :     int         i,
     813             :                 j;
     814             : 
     815          36 :     nWaitOrders = 0;
     816             : 
     817             :     /*
     818             :      * Scan constraint list backwards.  This is because the last-added
     819             :      * constraint is the only one that could fail, and so we want to test it
     820             :      * for inconsistency first.
     821             :      */
     822          42 :     for (i = nConstraints; --i >= 0;)
     823             :     {
     824           6 :         LOCK       *lock = constraints[i].lock;
     825             : 
     826             :         /* Did we already make a list for this lock? */
     827           6 :         for (j = nWaitOrders; --j >= 0;)
     828             :         {
     829           0 :             if (waitOrders[j].lock == lock)
     830           0 :                 break;
     831             :         }
     832           6 :         if (j >= 0)
     833           0 :             continue;
     834             :         /* No, so allocate a new list */
     835           6 :         waitOrders[nWaitOrders].lock = lock;
     836           6 :         waitOrders[nWaitOrders].procs = waitOrderProcs + nWaitOrderProcs;
     837           6 :         waitOrders[nWaitOrders].nProcs = lock->waitProcs.size;
     838           6 :         nWaitOrderProcs += lock->waitProcs.size;
     839             :         Assert(nWaitOrderProcs <= MaxBackends);
     840             : 
     841             :         /*
     842             :          * Do the topo sort.  TopoSort need not examine constraints after this
     843             :          * one, since they must be for different locks.
     844             :          */
     845           6 :         if (!TopoSort(lock, constraints, i + 1,
     846           6 :                       waitOrders[nWaitOrders].procs))
     847           0 :             return false;
     848           6 :         nWaitOrders++;
     849             :     }
     850          36 :     return true;
     851             : }
     852             : 
     853             : 
     854             : /*
     855             :  * TopoSort -- topological sort of a wait queue
     856             :  *
     857             :  * Generate a re-ordering of a lock's wait queue that satisfies given
     858             :  * constraints about certain procs preceding others.  (Each such constraint
     859             :  * is a fact of a partial ordering.)  Minimize rearrangement of the queue
     860             :  * not needed to achieve the partial ordering.
     861             :  *
     862             :  * This is a lot simpler and slower than, for example, the topological sort
     863             :  * algorithm shown in Knuth's Volume 1.  However, Knuth's method doesn't
     864             :  * try to minimize the damage to the existing order.  In practice we are
     865             :  * not likely to be working with more than a few constraints, so the apparent
     866             :  * slowness of the algorithm won't really matter.
     867             :  *
     868             :  * The initial queue ordering is taken directly from the lock's wait queue.
     869             :  * The output is an array of PGPROC pointers, of length equal to the lock's
     870             :  * wait queue length (the caller is responsible for providing this space).
     871             :  * The partial order is specified by an array of EDGE structs.  Each EDGE
     872             :  * is one that we need to reverse, therefore the "waiter" must appear before
     873             :  * the "blocker" in the output array.  The EDGE array may well contain
     874             :  * edges associated with other locks; these should be ignored.
     875             :  *
     876             :  * Returns true if able to build an ordering that satisfies all the
     877             :  * constraints, false if not (there are contradictory constraints).
     878             :  */
     879             : static bool
     880           6 : TopoSort(LOCK *lock,
     881             :          EDGE *constraints,
     882             :          int nConstraints,
     883             :          PGPROC **ordering)     /* output argument */
     884             : {
     885           6 :     PROC_QUEUE *waitQueue = &(lock->waitProcs);
     886           6 :     int         queue_size = waitQueue->size;
     887             :     PGPROC     *proc;
     888             :     int         i,
     889             :                 j,
     890             :                 jj,
     891             :                 k,
     892             :                 kk,
     893             :                 last;
     894             : 
     895             :     /* First, fill topoProcs[] array with the procs in their current order */
     896           6 :     proc = (PGPROC *) waitQueue->links.next;
     897          24 :     for (i = 0; i < queue_size; i++)
     898             :     {
     899          18 :         topoProcs[i] = proc;
     900          18 :         proc = (PGPROC *) proc->links.next;
     901             :     }
     902             : 
     903             :     /*
     904             :      * Scan the constraints, and for each proc in the array, generate a count
     905             :      * of the number of constraints that say it must be before something else,
     906             :      * plus a list of the constraints that say it must be after something
     907             :      * else. The count for the j'th proc is stored in beforeConstraints[j],
     908             :      * and the head of its list in afterConstraints[j].  Each constraint
     909             :      * stores its list link in constraints[i].link (note any constraint will
     910             :      * be in just one list). The array index for the before-proc of the i'th
     911             :      * constraint is remembered in constraints[i].pred.
     912             :      *
     913             :      * Note that it's not necessarily the case that every constraint affects
     914             :      * this particular wait queue.  Prior to group locking, a process could be
     915             :      * waiting for at most one lock.  But a lock group can be waiting for
     916             :      * zero, one, or multiple locks.  Since topoProcs[] is an array of the
     917             :      * processes actually waiting, while constraints[] is an array of group
     918             :      * leaders, we've got to scan through topoProcs[] for each constraint,
     919             :      * checking whether both a waiter and a blocker for that group are
     920             :      * present.  If so, the constraint is relevant to this wait queue; if not,
     921             :      * it isn't.
     922             :      */
     923          12 :     MemSet(beforeConstraints, 0, queue_size * sizeof(int));
     924          12 :     MemSet(afterConstraints, 0, queue_size * sizeof(int));
     925          12 :     for (i = 0; i < nConstraints; i++)
     926             :     {
     927             :         /*
     928             :          * Find a representative process that is on the lock queue and part of
     929             :          * the waiting lock group.  This may or may not be the leader, which
     930             :          * may or may not be waiting at all.  If there are any other processes
     931             :          * in the same lock group on the queue, set their number of
     932             :          * beforeConstraints to -1 to indicate that they should be emitted
     933             :          * with their groupmates rather than considered separately.
     934             :          *
     935             :          * In this loop and the similar one just below, it's critical that we
     936             :          * consistently select the same representative member of any one lock
     937             :          * group, so that all the constraints are associated with the same
     938             :          * proc, and the -1's are only associated with not-representative
     939             :          * members.  We select the last one in the topoProcs array.
     940             :          */
     941           6 :         proc = constraints[i].waiter;
     942             :         Assert(proc != NULL);
     943           6 :         jj = -1;
     944          24 :         for (j = queue_size; --j >= 0;)
     945             :         {
     946          18 :             PGPROC     *waiter = topoProcs[j];
     947             : 
     948          18 :             if (waiter == proc || waiter->lockGroupLeader == proc)
     949             :             {
     950             :                 Assert(waiter->waitLock == lock);
     951          10 :                 if (jj == -1)
     952           6 :                     jj = j;
     953             :                 else
     954             :                 {
     955             :                     Assert(beforeConstraints[j] <= 0);
     956           4 :                     beforeConstraints[j] = -1;
     957             :                 }
     958             :             }
     959             :         }
     960             : 
     961             :         /* If no matching waiter, constraint is not relevant to this lock. */
     962           6 :         if (jj < 0)
     963           0 :             continue;
     964             : 
     965             :         /*
     966             :          * Similarly, find a representative process that is on the lock queue
     967             :          * and waiting for the blocking lock group.  Again, this could be the
     968             :          * leader but does not need to be.
     969             :          */
     970           6 :         proc = constraints[i].blocker;
     971             :         Assert(proc != NULL);
     972           6 :         kk = -1;
     973          24 :         for (k = queue_size; --k >= 0;)
     974             :         {
     975          18 :             PGPROC     *blocker = topoProcs[k];
     976             : 
     977          18 :             if (blocker == proc || blocker->lockGroupLeader == proc)
     978             :             {
     979             :                 Assert(blocker->waitLock == lock);
     980           6 :                 if (kk == -1)
     981           6 :                     kk = k;
     982             :                 else
     983             :                 {
     984             :                     Assert(beforeConstraints[k] <= 0);
     985           0 :                     beforeConstraints[k] = -1;
     986             :                 }
     987             :             }
     988             :         }
     989             : 
     990             :         /* If no matching blocker, constraint is not relevant to this lock. */
     991           6 :         if (kk < 0)
     992           0 :             continue;
     993             : 
     994             :         Assert(beforeConstraints[jj] >= 0);
     995           6 :         beforeConstraints[jj]++;    /* waiter must come before */
     996             :         /* add this constraint to list of after-constraints for blocker */
     997           6 :         constraints[i].pred = jj;
     998           6 :         constraints[i].link = afterConstraints[kk];
     999           6 :         afterConstraints[kk] = i + 1;
    1000             :     }
    1001             : 
    1002             :     /*--------------------
    1003             :      * Now scan the topoProcs array backwards.  At each step, output the
    1004             :      * last proc that has no remaining before-constraints plus any other
    1005             :      * members of the same lock group; then decrease the beforeConstraints
    1006             :      * count of each of the procs it was constrained against.
    1007             :      * i = index of ordering[] entry we want to output this time
    1008             :      * j = search index for topoProcs[]
    1009             :      * k = temp for scanning constraint list for proc j
    1010             :      * last = last non-null index in topoProcs (avoid redundant searches)
    1011             :      *--------------------
    1012             :      */
    1013           6 :     last = queue_size - 1;
    1014          20 :     for (i = queue_size - 1; i >= 0;)
    1015             :     {
    1016             :         int         c;
    1017          14 :         int         nmatches = 0;
    1018             : 
    1019             :         /* Find next candidate to output */
    1020          14 :         while (topoProcs[last] == NULL)
    1021           0 :             last--;
    1022          28 :         for (j = last; j >= 0; j--)
    1023             :         {
    1024          28 :             if (topoProcs[j] != NULL && beforeConstraints[j] == 0)
    1025          14 :                 break;
    1026             :         }
    1027             : 
    1028             :         /* If no available candidate, topological sort fails */
    1029          14 :         if (j < 0)
    1030           0 :             return false;
    1031             : 
    1032             :         /*
    1033             :          * Output everything in the lock group.  There's no point in
    1034             :          * outputting an ordering where members of the same lock group are not
    1035             :          * consecutive on the wait queue: if some other waiter is between two
    1036             :          * requests that belong to the same group, then either it conflicts
    1037             :          * with both of them and is certainly not a solution; or it conflicts
    1038             :          * with at most one of them and is thus isomorphic to an ordering
    1039             :          * where the group members are consecutive.
    1040             :          */
    1041          14 :         proc = topoProcs[j];
    1042          14 :         if (proc->lockGroupLeader != NULL)
    1043           4 :             proc = proc->lockGroupLeader;
    1044             :         Assert(proc != NULL);
    1045          56 :         for (c = 0; c <= last; ++c)
    1046             :         {
    1047          42 :             if (topoProcs[c] == proc || (topoProcs[c] != NULL &&
    1048          22 :                                          topoProcs[c]->lockGroupLeader == proc))
    1049             :             {
    1050          18 :                 ordering[i - nmatches] = topoProcs[c];
    1051          18 :                 topoProcs[c] = NULL;
    1052          18 :                 ++nmatches;
    1053             :             }
    1054             :         }
    1055             :         Assert(nmatches > 0);
    1056          14 :         i -= nmatches;
    1057             : 
    1058             :         /* Update beforeConstraints counts of its predecessors */
    1059          20 :         for (k = afterConstraints[j]; k > 0; k = constraints[k - 1].link)
    1060           6 :             beforeConstraints[constraints[k - 1].pred]--;
    1061             :     }
    1062             : 
    1063             :     /* Done */
    1064           6 :     return true;
    1065             : }
    1066             : 
    1067             : #ifdef DEBUG_DEADLOCK
    1068             : static void
    1069             : PrintLockQueue(LOCK *lock, const char *info)
    1070             : {
    1071             :     PROC_QUEUE *waitQueue = &(lock->waitProcs);
    1072             :     int         queue_size = waitQueue->size;
    1073             :     PGPROC     *proc;
    1074             :     int         i;
    1075             : 
    1076             :     printf("%s lock %p queue ", info, lock);
    1077             :     proc = (PGPROC *) waitQueue->links.next;
    1078             :     for (i = 0; i < queue_size; i++)
    1079             :     {
    1080             :         printf(" %d", proc->pid);
    1081             :         proc = (PGPROC *) proc->links.next;
    1082             :     }
    1083             :     printf("\n");
    1084             :     fflush(stdout);
    1085             : }
    1086             : #endif
    1087             : 
    1088             : /*
    1089             :  * Report a detected deadlock, with available details.
    1090             :  */
    1091             : void
    1092           4 : DeadLockReport(void)
    1093             : {
    1094             :     StringInfoData clientbuf;   /* errdetail for client */
    1095             :     StringInfoData logbuf;      /* errdetail for server log */
    1096             :     StringInfoData locktagbuf;
    1097             :     int         i;
    1098             : 
    1099           4 :     initStringInfo(&clientbuf);
    1100           4 :     initStringInfo(&logbuf);
    1101           4 :     initStringInfo(&locktagbuf);
    1102             : 
    1103             :     /* Generate the "waits for" lines sent to the client */
    1104          24 :     for (i = 0; i < nDeadlockDetails; i++)
    1105             :     {
    1106          20 :         DEADLOCK_INFO *info = &deadlockDetails[i];
    1107             :         int         nextpid;
    1108             : 
    1109             :         /* The last proc waits for the first one... */
    1110          20 :         if (i < nDeadlockDetails - 1)
    1111          16 :             nextpid = info[1].pid;
    1112             :         else
    1113           4 :             nextpid = deadlockDetails[0].pid;
    1114             : 
    1115             :         /* reset locktagbuf to hold next object description */
    1116          20 :         resetStringInfo(&locktagbuf);
    1117             : 
    1118          20 :         DescribeLockTag(&locktagbuf, &info->locktag);
    1119             : 
    1120          20 :         if (i > 0)
    1121          16 :             appendStringInfoChar(&clientbuf, '\n');
    1122             : 
    1123          80 :         appendStringInfo(&clientbuf,
    1124          20 :                          _("Process %d waits for %s on %s; blocked by process %d."),
    1125             :                          info->pid,
    1126          20 :                          GetLockmodeName(info->locktag.locktag_lockmethodid,
    1127             :                                          info->lockmode),
    1128             :                          locktagbuf.data,
    1129             :                          nextpid);
    1130             :     }
    1131             : 
    1132             :     /* Duplicate all the above for the server ... */
    1133           4 :     appendBinaryStringInfo(&logbuf, clientbuf.data, clientbuf.len);
    1134             : 
    1135             :     /* ... and add info about query strings */
    1136          24 :     for (i = 0; i < nDeadlockDetails; i++)
    1137             :     {
    1138          20 :         DEADLOCK_INFO *info = &deadlockDetails[i];
    1139             : 
    1140          20 :         appendStringInfoChar(&logbuf, '\n');
    1141             : 
    1142          20 :         appendStringInfo(&logbuf,
    1143          20 :                          _("Process %d: %s"),
    1144             :                          info->pid,
    1145             :                          pgstat_get_backend_current_activity(info->pid, false));
    1146             :     }
    1147             : 
    1148           4 :     pgstat_report_deadlock();
    1149             : 
    1150           4 :     ereport(ERROR,
    1151             :             (errcode(ERRCODE_T_R_DEADLOCK_DETECTED),
    1152             :              errmsg("deadlock detected"),
    1153             :              errdetail_internal("%s", clientbuf.data),
    1154             :              errdetail_log("%s", logbuf.data),
    1155             :              errhint("See server log for query details.")));
    1156             : }
    1157             : 
    1158             : /*
    1159             :  * RememberSimpleDeadLock: set up info for DeadLockReport when ProcSleep
    1160             :  * detects a trivial (two-way) deadlock.  proc1 wants to block for lockmode
    1161             :  * on lock, but proc2 is already waiting and would be blocked by proc1.
    1162             :  */
    1163             : void
    1164           2 : RememberSimpleDeadLock(PGPROC *proc1,
    1165             :                        LOCKMODE lockmode,
    1166             :                        LOCK *lock,
    1167             :                        PGPROC *proc2)
    1168             : {
    1169           2 :     DEADLOCK_INFO *info = &deadlockDetails[0];
    1170             : 
    1171           2 :     info->locktag = lock->tag;
    1172           2 :     info->lockmode = lockmode;
    1173           2 :     info->pid = proc1->pid;
    1174           2 :     info++;
    1175           2 :     info->locktag = proc2->waitLock->tag;
    1176           2 :     info->lockmode = proc2->waitLockMode;
    1177           2 :     info->pid = proc2->pid;
    1178           2 :     nDeadlockDetails = 2;
    1179           2 : }

Generated by: LCOV version 1.13