LCOV - code coverage report
Current view: top level - src/backend/utils/cache - inval.c (source / functions) Hit Total Coverage
Test: PostgreSQL 16beta1 Lines: 337 345 97.7 %
Date: 2023-05-31 00:12:04 Functions: 36 36 100.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * inval.c
       4             :  *    POSTGRES cache invalidation dispatcher code.
       5             :  *
       6             :  *  This is subtle stuff, so pay attention:
       7             :  *
       8             :  *  When a tuple is updated or deleted, our standard visibility rules
       9             :  *  consider that it is *still valid* so long as we are in the same command,
      10             :  *  ie, until the next CommandCounterIncrement() or transaction commit.
      11             :  *  (See access/heap/heapam_visibility.c, and note that system catalogs are
      12             :  *  generally scanned under the most current snapshot available, rather than
      13             :  *  the transaction snapshot.)  At the command boundary, the old tuple stops
      14             :  *  being valid and the new version, if any, becomes valid.  Therefore,
      15             :  *  we cannot simply flush a tuple from the system caches during heap_update()
      16             :  *  or heap_delete().  The tuple is still good at that point; what's more,
      17             :  *  even if we did flush it, it might be reloaded into the caches by a later
      18             :  *  request in the same command.  So the correct behavior is to keep a list
      19             :  *  of outdated (updated/deleted) tuples and then do the required cache
      20             :  *  flushes at the next command boundary.  We must also keep track of
      21             :  *  inserted tuples so that we can flush "negative" cache entries that match
      22             :  *  the new tuples; again, that mustn't happen until end of command.
      23             :  *
      24             :  *  Once we have finished the command, we still need to remember inserted
      25             :  *  tuples (including new versions of updated tuples), so that we can flush
      26             :  *  them from the caches if we abort the transaction.  Similarly, we'd better
      27             :  *  be able to flush "negative" cache entries that may have been loaded in
      28             :  *  place of deleted tuples, so we still need the deleted ones too.
      29             :  *
      30             :  *  If we successfully complete the transaction, we have to broadcast all
      31             :  *  these invalidation events to other backends (via the SI message queue)
      32             :  *  so that they can flush obsolete entries from their caches.  Note we have
      33             :  *  to record the transaction commit before sending SI messages, otherwise
      34             :  *  the other backends won't see our updated tuples as good.
      35             :  *
      36             :  *  When a subtransaction aborts, we can process and discard any events
      37             :  *  it has queued.  When a subtransaction commits, we just add its events
      38             :  *  to the pending lists of the parent transaction.
      39             :  *
      40             :  *  In short, we need to remember until xact end every insert or delete
      41             :  *  of a tuple that might be in the system caches.  Updates are treated as
      42             :  *  two events, delete + insert, for simplicity.  (If the update doesn't
      43             :  *  change the tuple hash value, catcache.c optimizes this into one event.)
      44             :  *
      45             :  *  We do not need to register EVERY tuple operation in this way, just those
      46             :  *  on tuples in relations that have associated catcaches.  We do, however,
      47             :  *  have to register every operation on every tuple that *could* be in a
      48             :  *  catcache, whether or not it currently is in our cache.  Also, if the
      49             :  *  tuple is in a relation that has multiple catcaches, we need to register
      50             :  *  an invalidation message for each such catcache.  catcache.c's
      51             :  *  PrepareToInvalidateCacheTuple() routine provides the knowledge of which
      52             :  *  catcaches may need invalidation for a given tuple.
      53             :  *
      54             :  *  Also, whenever we see an operation on a pg_class, pg_attribute, or
      55             :  *  pg_index tuple, we register a relcache flush operation for the relation
      56             :  *  described by that tuple (as specified in CacheInvalidateHeapTuple()).
      57             :  *  Likewise for pg_constraint tuples for foreign keys on relations.
      58             :  *
      59             :  *  We keep the relcache flush requests in lists separate from the catcache
      60             :  *  tuple flush requests.  This allows us to issue all the pending catcache
      61             :  *  flushes before we issue relcache flushes, which saves us from loading
      62             :  *  a catcache tuple during relcache load only to flush it again right away.
      63             :  *  Also, we avoid queuing multiple relcache flush requests for the same
      64             :  *  relation, since a relcache flush is relatively expensive to do.
      65             :  *  (XXX is it worth testing likewise for duplicate catcache flush entries?
      66             :  *  Probably not.)
      67             :  *
      68             :  *  Many subsystems own higher-level caches that depend on relcache and/or
      69             :  *  catcache, and they register callbacks here to invalidate their caches.
      70             :  *  While building a higher-level cache entry, a backend may receive a
      71             :  *  callback for the being-built entry or one of its dependencies.  This
      72             :  *  implies the new higher-level entry would be born stale, and it might
      73             :  *  remain stale for the life of the backend.  Many caches do not prevent
      74             :  *  that.  They rely on DDL for can't-miss catalog changes taking
      75             :  *  AccessExclusiveLock on suitable objects.  (For a change made with less
      76             :  *  locking, backends might never read the change.)  The relation cache,
      77             :  *  however, needs to reflect changes from CREATE INDEX CONCURRENTLY no later
      78             :  *  than the beginning of the next transaction.  Hence, when a relevant
      79             :  *  invalidation callback arrives during a build, relcache.c reattempts that
      80             :  *  build.  Caches with similar needs could do likewise.
      81             :  *
      82             :  *  If a relcache flush is issued for a system relation that we preload
      83             :  *  from the relcache init file, we must also delete the init file so that
      84             :  *  it will be rebuilt during the next backend restart.  The actual work of
      85             :  *  manipulating the init file is in relcache.c, but we keep track of the
      86             :  *  need for it here.
      87             :  *
      88             :  *  Currently, inval messages are sent without regard for the possibility
      89             :  *  that the object described by the catalog tuple might be a session-local
      90             :  *  object such as a temporary table.  This is because (1) this code has
      91             :  *  no practical way to tell the difference, and (2) it is not certain that
      92             :  *  other backends don't have catalog cache or even relcache entries for
      93             :  *  such tables, anyway; there is nothing that prevents that.  It might be
      94             :  *  worth trying to avoid sending such inval traffic in the future, if those
      95             :  *  problems can be overcome cheaply.
      96             :  *
      97             :  *  When wal_level=logical, write invalidations into WAL at each command end to
      98             :  *  support the decoding of the in-progress transactions.  See
      99             :  *  CommandEndInvalidationMessages.
     100             :  *
     101             :  * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
     102             :  * Portions Copyright (c) 1994, Regents of the University of California
     103             :  *
     104             :  * IDENTIFICATION
     105             :  *    src/backend/utils/cache/inval.c
     106             :  *
     107             :  *-------------------------------------------------------------------------
     108             :  */
     109             : #include "postgres.h"
     110             : 
     111             : #include <limits.h>
     112             : 
     113             : #include "access/htup_details.h"
     114             : #include "access/xact.h"
     115             : #include "access/xloginsert.h"
     116             : #include "catalog/catalog.h"
     117             : #include "catalog/pg_constraint.h"
     118             : #include "miscadmin.h"
     119             : #include "storage/sinval.h"
     120             : #include "storage/smgr.h"
     121             : #include "utils/catcache.h"
     122             : #include "utils/guc.h"
     123             : #include "utils/inval.h"
     124             : #include "utils/memdebug.h"
     125             : #include "utils/memutils.h"
     126             : #include "utils/rel.h"
     127             : #include "utils/relmapper.h"
     128             : #include "utils/snapmgr.h"
     129             : #include "utils/syscache.h"
     130             : 
     131             : 
     132             : /*
     133             :  * Pending requests are stored as ready-to-send SharedInvalidationMessages.
     134             :  * We keep the messages themselves in arrays in TopTransactionContext
     135             :  * (there are separate arrays for catcache and relcache messages).  Control
     136             :  * information is kept in a chain of TransInvalidationInfo structs, also
     137             :  * allocated in TopTransactionContext.  (We could keep a subtransaction's
     138             :  * TransInvalidationInfo in its CurTransactionContext; but that's more
     139             :  * wasteful not less so, since in very many scenarios it'd be the only
     140             :  * allocation in the subtransaction's CurTransactionContext.)
     141             :  *
     142             :  * We can store the message arrays densely, and yet avoid moving data around
     143             :  * within an array, because within any one subtransaction we need only
     144             :  * distinguish between messages emitted by prior commands and those emitted
     145             :  * by the current command.  Once a command completes and we've done local
     146             :  * processing on its messages, we can fold those into the prior-commands
     147             :  * messages just by changing array indexes in the TransInvalidationInfo
     148             :  * struct.  Similarly, we need distinguish messages of prior subtransactions
     149             :  * from those of the current subtransaction only until the subtransaction
     150             :  * completes, after which we adjust the array indexes in the parent's
     151             :  * TransInvalidationInfo to include the subtransaction's messages.
     152             :  *
     153             :  * The ordering of the individual messages within a command's or
     154             :  * subtransaction's output is not considered significant, although this
     155             :  * implementation happens to preserve the order in which they were queued.
     156             :  * (Previous versions of this code did not preserve it.)
     157             :  *
     158             :  * For notational convenience, control information is kept in two-element
     159             :  * arrays, the first for catcache messages and the second for relcache
     160             :  * messages.
     161             :  */
     162             : #define CatCacheMsgs 0
     163             : #define RelCacheMsgs 1
     164             : 
     165             : /* Pointers to main arrays in TopTransactionContext */
     166             : typedef struct InvalMessageArray
     167             : {
     168             :     SharedInvalidationMessage *msgs;    /* palloc'd array (can be expanded) */
     169             :     int         maxmsgs;        /* current allocated size of array */
     170             : } InvalMessageArray;
     171             : 
     172             : static InvalMessageArray InvalMessageArrays[2];
     173             : 
     174             : /* Control information for one logical group of messages */
     175             : typedef struct InvalidationMsgsGroup
     176             : {
     177             :     int         firstmsg[2];    /* first index in relevant array */
     178             :     int         nextmsg[2];     /* last+1 index */
     179             : } InvalidationMsgsGroup;
     180             : 
     181             : /* Macros to help preserve InvalidationMsgsGroup abstraction */
     182             : #define SetSubGroupToFollow(targetgroup, priorgroup, subgroup) \
     183             :     do { \
     184             :         (targetgroup)->firstmsg[subgroup] = \
     185             :             (targetgroup)->nextmsg[subgroup] = \
     186             :             (priorgroup)->nextmsg[subgroup]; \
     187             :     } while (0)
     188             : 
     189             : #define SetGroupToFollow(targetgroup, priorgroup) \
     190             :     do { \
     191             :         SetSubGroupToFollow(targetgroup, priorgroup, CatCacheMsgs); \
     192             :         SetSubGroupToFollow(targetgroup, priorgroup, RelCacheMsgs); \
     193             :     } while (0)
     194             : 
     195             : #define NumMessagesInSubGroup(group, subgroup) \
     196             :     ((group)->nextmsg[subgroup] - (group)->firstmsg[subgroup])
     197             : 
     198             : #define NumMessagesInGroup(group) \
     199             :     (NumMessagesInSubGroup(group, CatCacheMsgs) + \
     200             :      NumMessagesInSubGroup(group, RelCacheMsgs))
     201             : 
     202             : 
     203             : /*----------------
     204             :  * Invalidation messages are divided into two groups:
     205             :  *  1) events so far in current command, not yet reflected to caches.
     206             :  *  2) events in previous commands of current transaction; these have
     207             :  *     been reflected to local caches, and must be either broadcast to
     208             :  *     other backends or rolled back from local cache when we commit
     209             :  *     or abort the transaction.
     210             :  * Actually, we need such groups for each level of nested transaction,
     211             :  * so that we can discard events from an aborted subtransaction.  When
     212             :  * a subtransaction commits, we append its events to the parent's groups.
     213             :  *
     214             :  * The relcache-file-invalidated flag can just be a simple boolean,
     215             :  * since we only act on it at transaction commit; we don't care which
     216             :  * command of the transaction set it.
     217             :  *----------------
     218             :  */
     219             : 
     220             : typedef struct TransInvalidationInfo
     221             : {
     222             :     /* Back link to parent transaction's info */
     223             :     struct TransInvalidationInfo *parent;
     224             : 
     225             :     /* Subtransaction nesting depth */
     226             :     int         my_level;
     227             : 
     228             :     /* Events emitted by current command */
     229             :     InvalidationMsgsGroup CurrentCmdInvalidMsgs;
     230             : 
     231             :     /* Events emitted by previous commands of this (sub)transaction */
     232             :     InvalidationMsgsGroup PriorCmdInvalidMsgs;
     233             : 
     234             :     /* init file must be invalidated? */
     235             :     bool        RelcacheInitFileInval;
     236             : } TransInvalidationInfo;
     237             : 
     238             : static TransInvalidationInfo *transInvalInfo = NULL;
     239             : 
     240             : /* GUC storage */
     241             : int         debug_discard_caches = 0;
     242             : 
     243             : /*
     244             :  * Dynamically-registered callback functions.  Current implementation
     245             :  * assumes there won't be enough of these to justify a dynamically resizable
     246             :  * array; it'd be easy to improve that if needed.
     247             :  *
     248             :  * To avoid searching in CallSyscacheCallbacks, all callbacks for a given
     249             :  * syscache are linked into a list pointed to by syscache_callback_links[id].
     250             :  * The link values are syscache_callback_list[] index plus 1, or 0 for none.
     251             :  */
     252             : 
     253             : #define MAX_SYSCACHE_CALLBACKS 64
     254             : #define MAX_RELCACHE_CALLBACKS 10
     255             : 
     256             : static struct SYSCACHECALLBACK
     257             : {
     258             :     int16       id;             /* cache number */
     259             :     int16       link;           /* next callback index+1 for same cache */
     260             :     SyscacheCallbackFunction function;
     261             :     Datum       arg;
     262             : }           syscache_callback_list[MAX_SYSCACHE_CALLBACKS];
     263             : 
     264             : static int16 syscache_callback_links[SysCacheSize];
     265             : 
     266             : static int  syscache_callback_count = 0;
     267             : 
     268             : static struct RELCACHECALLBACK
     269             : {
     270             :     RelcacheCallbackFunction function;
     271             :     Datum       arg;
     272             : }           relcache_callback_list[MAX_RELCACHE_CALLBACKS];
     273             : 
     274             : static int  relcache_callback_count = 0;
     275             : 
     276             : /* ----------------------------------------------------------------
     277             :  *              Invalidation subgroup support functions
     278             :  * ----------------------------------------------------------------
     279             :  */
     280             : 
     281             : /*
     282             :  * AddInvalidationMessage
     283             :  *      Add an invalidation message to a (sub)group.
     284             :  *
     285             :  * The group must be the last active one, since we assume we can add to the
     286             :  * end of the relevant InvalMessageArray.
     287             :  *
     288             :  * subgroup must be CatCacheMsgs or RelCacheMsgs.
     289             :  */
     290             : static void
     291    10873110 : AddInvalidationMessage(InvalidationMsgsGroup *group, int subgroup,
     292             :                        const SharedInvalidationMessage *msg)
     293             : {
     294    10873110 :     InvalMessageArray *ima = &InvalMessageArrays[subgroup];
     295    10873110 :     int         nextindex = group->nextmsg[subgroup];
     296             : 
     297    10873110 :     if (nextindex >= ima->maxmsgs)
     298             :     {
     299     1122618 :         if (ima->msgs == NULL)
     300             :         {
     301             :             /* Create new storage array in TopTransactionContext */
     302     1040130 :             int         reqsize = 32;   /* arbitrary */
     303             : 
     304     1040130 :             ima->msgs = (SharedInvalidationMessage *)
     305     1040130 :                 MemoryContextAlloc(TopTransactionContext,
     306             :                                    reqsize * sizeof(SharedInvalidationMessage));
     307     1040130 :             ima->maxmsgs = reqsize;
     308             :             Assert(nextindex == 0);
     309             :         }
     310             :         else
     311             :         {
     312             :             /* Enlarge storage array */
     313       82488 :             int         reqsize = 2 * ima->maxmsgs;
     314             : 
     315       82488 :             ima->msgs = (SharedInvalidationMessage *)
     316       82488 :                 repalloc(ima->msgs,
     317             :                          reqsize * sizeof(SharedInvalidationMessage));
     318       82488 :             ima->maxmsgs = reqsize;
     319             :         }
     320             :     }
     321             :     /* Okay, add message to current group */
     322    10873110 :     ima->msgs[nextindex] = *msg;
     323    10873110 :     group->nextmsg[subgroup]++;
     324    10873110 : }
     325             : 
     326             : /*
     327             :  * Append one subgroup of invalidation messages to another, resetting
     328             :  * the source subgroup to empty.
     329             :  */
     330             : static void
     331     4471504 : AppendInvalidationMessageSubGroup(InvalidationMsgsGroup *dest,
     332             :                                   InvalidationMsgsGroup *src,
     333             :                                   int subgroup)
     334             : {
     335             :     /* Messages must be adjacent in main array */
     336             :     Assert(dest->nextmsg[subgroup] == src->firstmsg[subgroup]);
     337             : 
     338             :     /* ... which makes this easy: */
     339     4471504 :     dest->nextmsg[subgroup] = src->nextmsg[subgroup];
     340             : 
     341             :     /*
     342             :      * This is handy for some callers and irrelevant for others.  But we do it
     343             :      * always, reasoning that it's bad to leave different groups pointing at
     344             :      * the same fragment of the message array.
     345             :      */
     346     4471504 :     SetSubGroupToFollow(src, dest, subgroup);
     347     4471504 : }
     348             : 
     349             : /*
     350             :  * Process a subgroup of invalidation messages.
     351             :  *
     352             :  * This is a macro that executes the given code fragment for each message in
     353             :  * a message subgroup.  The fragment should refer to the message as *msg.
     354             :  */
     355             : #define ProcessMessageSubGroup(group, subgroup, codeFragment) \
     356             :     do { \
     357             :         int     _msgindex = (group)->firstmsg[subgroup]; \
     358             :         int     _endmsg = (group)->nextmsg[subgroup]; \
     359             :         for (; _msgindex < _endmsg; _msgindex++) \
     360             :         { \
     361             :             SharedInvalidationMessage *msg = \
     362             :                 &InvalMessageArrays[subgroup].msgs[_msgindex]; \
     363             :             codeFragment; \
     364             :         } \
     365             :     } while (0)
     366             : 
     367             : /*
     368             :  * Process a subgroup of invalidation messages as an array.
     369             :  *
     370             :  * As above, but the code fragment can handle an array of messages.
     371             :  * The fragment should refer to the messages as msgs[], with n entries.
     372             :  */
     373             : #define ProcessMessageSubGroupMulti(group, subgroup, codeFragment) \
     374             :     do { \
     375             :         int     n = NumMessagesInSubGroup(group, subgroup); \
     376             :         if (n > 0) { \
     377             :             SharedInvalidationMessage *msgs = \
     378             :                 &InvalMessageArrays[subgroup].msgs[(group)->firstmsg[subgroup]]; \
     379             :             codeFragment; \
     380             :         } \
     381             :     } while (0)
     382             : 
     383             : 
     384             : /* ----------------------------------------------------------------
     385             :  *              Invalidation group support functions
     386             :  *
     387             :  * These routines understand about the division of a logical invalidation
     388             :  * group into separate physical arrays for catcache and relcache entries.
     389             :  * ----------------------------------------------------------------
     390             :  */
     391             : 
     392             : /*
     393             :  * Add a catcache inval entry
     394             :  */
     395             : static void
     396     8751084 : AddCatcacheInvalidationMessage(InvalidationMsgsGroup *group,
     397             :                                int id, uint32 hashValue, Oid dbId)
     398             : {
     399             :     SharedInvalidationMessage msg;
     400             : 
     401             :     Assert(id < CHAR_MAX);
     402     8751084 :     msg.cc.id = (int8) id;
     403     8751084 :     msg.cc.dbId = dbId;
     404     8751084 :     msg.cc.hashValue = hashValue;
     405             : 
     406             :     /*
     407             :      * Define padding bytes in SharedInvalidationMessage structs to be
     408             :      * defined. Otherwise the sinvaladt.c ringbuffer, which is accessed by
     409             :      * multiple processes, will cause spurious valgrind warnings about
     410             :      * undefined memory being used. That's because valgrind remembers the
     411             :      * undefined bytes from the last local process's store, not realizing that
     412             :      * another process has written since, filling the previously uninitialized
     413             :      * bytes
     414             :      */
     415             :     VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
     416             : 
     417     8751084 :     AddInvalidationMessage(group, CatCacheMsgs, &msg);
     418     8751084 : }
     419             : 
     420             : /*
     421             :  * Add a whole-catalog inval entry
     422             :  */
     423             : static void
     424         200 : AddCatalogInvalidationMessage(InvalidationMsgsGroup *group,
     425             :                               Oid dbId, Oid catId)
     426             : {
     427             :     SharedInvalidationMessage msg;
     428             : 
     429         200 :     msg.cat.id = SHAREDINVALCATALOG_ID;
     430         200 :     msg.cat.dbId = dbId;
     431         200 :     msg.cat.catId = catId;
     432             :     /* check AddCatcacheInvalidationMessage() for an explanation */
     433             :     VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
     434             : 
     435         200 :     AddInvalidationMessage(group, CatCacheMsgs, &msg);
     436         200 : }
     437             : 
     438             : /*
     439             :  * Add a relcache inval entry
     440             :  */
     441             : static void
     442     2849226 : AddRelcacheInvalidationMessage(InvalidationMsgsGroup *group,
     443             :                                Oid dbId, Oid relId)
     444             : {
     445             :     SharedInvalidationMessage msg;
     446             : 
     447             :     /*
     448             :      * Don't add a duplicate item. We assume dbId need not be checked because
     449             :      * it will never change. InvalidOid for relId means all relations so we
     450             :      * don't need to add individual ones when it is present.
     451             :      */
     452    11211338 :     ProcessMessageSubGroup(group, RelCacheMsgs,
     453             :                            if (msg->rc.id == SHAREDINVALRELCACHE_ID &&
     454             :                                (msg->rc.relId == relId ||
     455             :                                 msg->rc.relId == InvalidOid))
     456             :                            return);
     457             : 
     458             :     /* OK, add the item */
     459     1048276 :     msg.rc.id = SHAREDINVALRELCACHE_ID;
     460     1048276 :     msg.rc.dbId = dbId;
     461     1048276 :     msg.rc.relId = relId;
     462             :     /* check AddCatcacheInvalidationMessage() for an explanation */
     463             :     VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
     464             : 
     465     1048276 :     AddInvalidationMessage(group, RelCacheMsgs, &msg);
     466             : }
     467             : 
     468             : /*
     469             :  * Add a snapshot inval entry
     470             :  *
     471             :  * We put these into the relcache subgroup for simplicity.
     472             :  */
     473             : static void
     474     2605802 : AddSnapshotInvalidationMessage(InvalidationMsgsGroup *group,
     475             :                                Oid dbId, Oid relId)
     476             : {
     477             :     SharedInvalidationMessage msg;
     478             : 
     479             :     /* Don't add a duplicate item */
     480             :     /* We assume dbId need not be checked because it will never change */
     481     3011986 :     ProcessMessageSubGroup(group, RelCacheMsgs,
     482             :                            if (msg->sn.id == SHAREDINVALSNAPSHOT_ID &&
     483             :                                msg->sn.relId == relId)
     484             :                            return);
     485             : 
     486             :     /* OK, add the item */
     487     1073550 :     msg.sn.id = SHAREDINVALSNAPSHOT_ID;
     488     1073550 :     msg.sn.dbId = dbId;
     489     1073550 :     msg.sn.relId = relId;
     490             :     /* check AddCatcacheInvalidationMessage() for an explanation */
     491             :     VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
     492             : 
     493     1073550 :     AddInvalidationMessage(group, RelCacheMsgs, &msg);
     494             : }
     495             : 
     496             : /*
     497             :  * Append one group of invalidation messages to another, resetting
     498             :  * the source group to empty.
     499             :  */
     500             : static void
     501     2235752 : AppendInvalidationMessages(InvalidationMsgsGroup *dest,
     502             :                            InvalidationMsgsGroup *src)
     503             : {
     504     2235752 :     AppendInvalidationMessageSubGroup(dest, src, CatCacheMsgs);
     505     2235752 :     AppendInvalidationMessageSubGroup(dest, src, RelCacheMsgs);
     506     2235752 : }
     507             : 
     508             : /*
     509             :  * Execute the given function for all the messages in an invalidation group.
     510             :  * The group is not altered.
     511             :  *
     512             :  * catcache entries are processed first, for reasons mentioned above.
     513             :  */
     514             : static void
     515     1612222 : ProcessInvalidationMessages(InvalidationMsgsGroup *group,
     516             :                             void (*func) (SharedInvalidationMessage *msg))
     517             : {
     518     9763702 :     ProcessMessageSubGroup(group, CatCacheMsgs, func(msg));
     519     3541034 :     ProcessMessageSubGroup(group, RelCacheMsgs, func(msg));
     520     1612216 : }
     521             : 
     522             : /*
     523             :  * As above, but the function is able to process an array of messages
     524             :  * rather than just one at a time.
     525             :  */
     526             : static void
     527      627226 : ProcessInvalidationMessagesMulti(InvalidationMsgsGroup *group,
     528             :                                  void (*func) (const SharedInvalidationMessage *msgs, int n))
     529             : {
     530      627226 :     ProcessMessageSubGroupMulti(group, CatCacheMsgs, func(msgs, n));
     531      627226 :     ProcessMessageSubGroupMulti(group, RelCacheMsgs, func(msgs, n));
     532      627226 : }
     533             : 
     534             : /* ----------------------------------------------------------------
     535             :  *                    private support functions
     536             :  * ----------------------------------------------------------------
     537             :  */
     538             : 
     539             : /*
     540             :  * RegisterCatcacheInvalidation
     541             :  *
     542             :  * Register an invalidation event for a catcache tuple entry.
     543             :  */
     544             : static void
     545     8751084 : RegisterCatcacheInvalidation(int cacheId,
     546             :                              uint32 hashValue,
     547             :                              Oid dbId)
     548             : {
     549     8751084 :     AddCatcacheInvalidationMessage(&transInvalInfo->CurrentCmdInvalidMsgs,
     550             :                                    cacheId, hashValue, dbId);
     551     8751084 : }
     552             : 
     553             : /*
     554             :  * RegisterCatalogInvalidation
     555             :  *
     556             :  * Register an invalidation event for all catcache entries from a catalog.
     557             :  */
     558             : static void
     559         200 : RegisterCatalogInvalidation(Oid dbId, Oid catId)
     560             : {
     561         200 :     AddCatalogInvalidationMessage(&transInvalInfo->CurrentCmdInvalidMsgs,
     562             :                                   dbId, catId);
     563         200 : }
     564             : 
     565             : /*
     566             :  * RegisterRelcacheInvalidation
     567             :  *
     568             :  * As above, but register a relcache invalidation event.
     569             :  */
     570             : static void
     571     2849226 : RegisterRelcacheInvalidation(Oid dbId, Oid relId)
     572             : {
     573     2849226 :     AddRelcacheInvalidationMessage(&transInvalInfo->CurrentCmdInvalidMsgs,
     574             :                                    dbId, relId);
     575             : 
     576             :     /*
     577             :      * Most of the time, relcache invalidation is associated with system
     578             :      * catalog updates, but there are a few cases where it isn't.  Quick hack
     579             :      * to ensure that the next CommandCounterIncrement() will think that we
     580             :      * need to do CommandEndInvalidationMessages().
     581             :      */
     582     2849226 :     (void) GetCurrentCommandId(true);
     583             : 
     584             :     /*
     585             :      * If the relation being invalidated is one of those cached in a relcache
     586             :      * init file, mark that we need to zap that file at commit. For simplicity
     587             :      * invalidations for a specific database always invalidate the shared file
     588             :      * as well.  Also zap when we are invalidating whole relcache.
     589             :      */
     590     2849226 :     if (relId == InvalidOid || RelationIdIsInInitFile(relId))
     591      262872 :         transInvalInfo->RelcacheInitFileInval = true;
     592     2849226 : }
     593             : 
     594             : /*
     595             :  * RegisterSnapshotInvalidation
     596             :  *
     597             :  * Register an invalidation event for MVCC scans against a given catalog.
     598             :  * Only needed for catalogs that don't have catcaches.
     599             :  */
     600             : static void
     601     2605802 : RegisterSnapshotInvalidation(Oid dbId, Oid relId)
     602             : {
     603     2605802 :     AddSnapshotInvalidationMessage(&transInvalInfo->CurrentCmdInvalidMsgs,
     604             :                                    dbId, relId);
     605     2605802 : }
     606             : 
     607             : /*
     608             :  * LocalExecuteInvalidationMessage
     609             :  *
     610             :  * Process a single invalidation message (which could be of any type).
     611             :  * Only the local caches are flushed; this does not transmit the message
     612             :  * to other backends.
     613             :  */
     614             : void
     615    38613754 : LocalExecuteInvalidationMessage(SharedInvalidationMessage *msg)
     616             : {
     617    38613754 :     if (msg->id >= 0)
     618             :     {
     619    31343866 :         if (msg->cc.dbId == MyDatabaseId || msg->cc.dbId == InvalidOid)
     620             :         {
     621    24629578 :             InvalidateCatalogSnapshot();
     622             : 
     623    24629578 :             SysCacheInvalidate(msg->cc.id, msg->cc.hashValue);
     624             : 
     625    24629578 :             CallSyscacheCallbacks(msg->cc.id, msg->cc.hashValue);
     626             :         }
     627             :     }
     628     7269888 :     else if (msg->id == SHAREDINVALCATALOG_ID)
     629             :     {
     630         766 :         if (msg->cat.dbId == MyDatabaseId || msg->cat.dbId == InvalidOid)
     631             :         {
     632         656 :             InvalidateCatalogSnapshot();
     633             : 
     634         656 :             CatalogCacheFlushCatalog(msg->cat.catId);
     635             : 
     636             :             /* CatalogCacheFlushCatalog calls CallSyscacheCallbacks as needed */
     637             :         }
     638             :     }
     639     7269122 :     else if (msg->id == SHAREDINVALRELCACHE_ID)
     640             :     {
     641     3422500 :         if (msg->rc.dbId == MyDatabaseId || msg->rc.dbId == InvalidOid)
     642             :         {
     643             :             int         i;
     644             : 
     645     2699714 :             if (msg->rc.relId == InvalidOid)
     646         218 :                 RelationCacheInvalidate(false);
     647             :             else
     648     2699496 :                 RelationCacheInvalidateEntry(msg->rc.relId);
     649             : 
     650     7378048 :             for (i = 0; i < relcache_callback_count; i++)
     651             :             {
     652     4678340 :                 struct RELCACHECALLBACK *ccitem = relcache_callback_list + i;
     653             : 
     654     4678340 :                 ccitem->function(ccitem->arg, msg->rc.relId);
     655             :             }
     656             :         }
     657             :     }
     658     3846622 :     else if (msg->id == SHAREDINVALSMGR_ID)
     659             :     {
     660             :         /*
     661             :          * We could have smgr entries for relations of other databases, so no
     662             :          * short-circuit test is possible here.
     663             :          */
     664             :         RelFileLocatorBackend rlocator;
     665             : 
     666      376902 :         rlocator.locator = msg->sm.rlocator;
     667      376902 :         rlocator.backend = (msg->sm.backend_hi << 16) | (int) msg->sm.backend_lo;
     668      376902 :         smgrcloserellocator(rlocator);
     669             :     }
     670     3469720 :     else if (msg->id == SHAREDINVALRELMAP_ID)
     671             :     {
     672             :         /* We only care about our own database and shared catalogs */
     673         330 :         if (msg->rm.dbId == InvalidOid)
     674         162 :             RelationMapInvalidate(true);
     675         168 :         else if (msg->rm.dbId == MyDatabaseId)
     676         102 :             RelationMapInvalidate(false);
     677             :     }
     678     3469390 :     else if (msg->id == SHAREDINVALSNAPSHOT_ID)
     679             :     {
     680             :         /* We only care about our own database and shared catalogs */
     681     3469390 :         if (msg->sn.dbId == InvalidOid)
     682       78012 :             InvalidateCatalogSnapshot();
     683     3391378 :         else if (msg->sn.dbId == MyDatabaseId)
     684     2848446 :             InvalidateCatalogSnapshot();
     685             :     }
     686             :     else
     687           0 :         elog(FATAL, "unrecognized SI message ID: %d", msg->id);
     688    38613748 : }
     689             : 
     690             : /*
     691             :  *      InvalidateSystemCaches
     692             :  *
     693             :  *      This blows away all tuples in the system catalog caches and
     694             :  *      all the cached relation descriptors and smgr cache entries.
     695             :  *      Relation descriptors that have positive refcounts are then rebuilt.
     696             :  *
     697             :  *      We call this when we see a shared-inval-queue overflow signal,
     698             :  *      since that tells us we've lost some shared-inval messages and hence
     699             :  *      don't know what needs to be invalidated.
     700             :  */
     701             : void
     702        3854 : InvalidateSystemCaches(void)
     703             : {
     704        3854 :     InvalidateSystemCachesExtended(false);
     705        3854 : }
     706             : 
     707             : void
     708        3854 : InvalidateSystemCachesExtended(bool debug_discard)
     709             : {
     710             :     int         i;
     711             : 
     712        3854 :     InvalidateCatalogSnapshot();
     713        3854 :     ResetCatalogCaches();
     714        3854 :     RelationCacheInvalidate(debug_discard); /* gets smgr and relmap too */
     715             : 
     716       54236 :     for (i = 0; i < syscache_callback_count; i++)
     717             :     {
     718       50382 :         struct SYSCACHECALLBACK *ccitem = syscache_callback_list + i;
     719             : 
     720       50382 :         ccitem->function(ccitem->arg, ccitem->id, 0);
     721             :     }
     722             : 
     723        8772 :     for (i = 0; i < relcache_callback_count; i++)
     724             :     {
     725        4918 :         struct RELCACHECALLBACK *ccitem = relcache_callback_list + i;
     726             : 
     727        4918 :         ccitem->function(ccitem->arg, InvalidOid);
     728             :     }
     729        3854 : }
     730             : 
     731             : 
     732             : /* ----------------------------------------------------------------
     733             :  *                    public functions
     734             :  * ----------------------------------------------------------------
     735             :  */
     736             : 
     737             : /*
     738             :  * AcceptInvalidationMessages
     739             :  *      Read and process invalidation messages from the shared invalidation
     740             :  *      message queue.
     741             :  *
     742             :  * Note:
     743             :  *      This should be called as the first step in processing a transaction.
     744             :  */
     745             : void
     746    48898816 : AcceptInvalidationMessages(void)
     747             : {
     748    48898816 :     ReceiveSharedInvalidMessages(LocalExecuteInvalidationMessage,
     749             :                                  InvalidateSystemCaches);
     750             : 
     751             :     /*----------
     752             :      * Test code to force cache flushes anytime a flush could happen.
     753             :      *
     754             :      * This helps detect intermittent faults caused by code that reads a cache
     755             :      * entry and then performs an action that could invalidate the entry, but
     756             :      * rarely actually does so.  This can spot issues that would otherwise
     757             :      * only arise with badly timed concurrent DDL, for example.
     758             :      *
     759             :      * The default debug_discard_caches = 0 does no forced cache flushes.
     760             :      *
     761             :      * If used with CLOBBER_FREED_MEMORY,
     762             :      * debug_discard_caches = 1 (formerly known as CLOBBER_CACHE_ALWAYS)
     763             :      * provides a fairly thorough test that the system contains no cache-flush
     764             :      * hazards.  However, it also makes the system unbelievably slow --- the
     765             :      * regression tests take about 100 times longer than normal.
     766             :      *
     767             :      * If you're a glutton for punishment, try
     768             :      * debug_discard_caches = 3 (formerly known as CLOBBER_CACHE_RECURSIVELY).
     769             :      * This slows things by at least a factor of 10000, so I wouldn't suggest
     770             :      * trying to run the entire regression tests that way.  It's useful to try
     771             :      * a few simple tests, to make sure that cache reload isn't subject to
     772             :      * internal cache-flush hazards, but after you've done a few thousand
     773             :      * recursive reloads it's unlikely you'll learn more.
     774             :      *----------
     775             :      */
     776             : #ifdef DISCARD_CACHES_ENABLED
     777             :     {
     778             :         static int  recursion_depth = 0;
     779             : 
     780             :         if (recursion_depth < debug_discard_caches)
     781             :         {
     782             :             recursion_depth++;
     783             :             InvalidateSystemCachesExtended(true);
     784             :             recursion_depth--;
     785             :         }
     786             :     }
     787             : #endif
     788    48898816 : }
     789             : 
     790             : /*
     791             :  * PrepareInvalidationState
     792             :  *      Initialize inval data for the current (sub)transaction.
     793             :  */
     794             : static void
     795     7972626 : PrepareInvalidationState(void)
     796             : {
     797             :     TransInvalidationInfo *myInfo;
     798             : 
     799    15314430 :     if (transInvalInfo != NULL &&
     800     7341804 :         transInvalInfo->my_level == GetCurrentTransactionNestLevel())
     801     7341662 :         return;
     802             : 
     803             :     myInfo = (TransInvalidationInfo *)
     804      630964 :         MemoryContextAllocZero(TopTransactionContext,
     805             :                                sizeof(TransInvalidationInfo));
     806      630964 :     myInfo->parent = transInvalInfo;
     807      630964 :     myInfo->my_level = GetCurrentTransactionNestLevel();
     808             : 
     809             :     /* Now, do we have a previous stack entry? */
     810      630964 :     if (transInvalInfo != NULL)
     811             :     {
     812             :         /* Yes; this one should be for a deeper nesting level. */
     813             :         Assert(myInfo->my_level > transInvalInfo->my_level);
     814             : 
     815             :         /*
     816             :          * The parent (sub)transaction must not have any current (i.e.,
     817             :          * not-yet-locally-processed) messages.  If it did, we'd have a
     818             :          * semantic problem: the new subtransaction presumably ought not be
     819             :          * able to see those events yet, but since the CommandCounter is
     820             :          * linear, that can't work once the subtransaction advances the
     821             :          * counter.  This is a convenient place to check for that, as well as
     822             :          * being important to keep management of the message arrays simple.
     823             :          */
     824         142 :         if (NumMessagesInGroup(&transInvalInfo->CurrentCmdInvalidMsgs) != 0)
     825           0 :             elog(ERROR, "cannot start a subtransaction when there are unprocessed inval messages");
     826             : 
     827             :         /*
     828             :          * MemoryContextAllocZero set firstmsg = nextmsg = 0 in each group,
     829             :          * which is fine for the first (sub)transaction, but otherwise we need
     830             :          * to update them to follow whatever is already in the arrays.
     831             :          */
     832         142 :         SetGroupToFollow(&myInfo->PriorCmdInvalidMsgs,
     833             :                          &transInvalInfo->CurrentCmdInvalidMsgs);
     834         142 :         SetGroupToFollow(&myInfo->CurrentCmdInvalidMsgs,
     835             :                          &myInfo->PriorCmdInvalidMsgs);
     836             :     }
     837             :     else
     838             :     {
     839             :         /*
     840             :          * Here, we need only clear any array pointers left over from a prior
     841             :          * transaction.
     842             :          */
     843      630822 :         InvalMessageArrays[CatCacheMsgs].msgs = NULL;
     844      630822 :         InvalMessageArrays[CatCacheMsgs].maxmsgs = 0;
     845      630822 :         InvalMessageArrays[RelCacheMsgs].msgs = NULL;
     846      630822 :         InvalMessageArrays[RelCacheMsgs].maxmsgs = 0;
     847             :     }
     848             : 
     849      630964 :     transInvalInfo = myInfo;
     850             : }
     851             : 
     852             : /*
     853             :  * PostPrepare_Inval
     854             :  *      Clean up after successful PREPARE.
     855             :  *
     856             :  * Here, we want to act as though the transaction aborted, so that we will
     857             :  * undo any syscache changes it made, thereby bringing us into sync with the
     858             :  * outside world, which doesn't believe the transaction committed yet.
     859             :  *
     860             :  * If the prepared transaction is later aborted, there is nothing more to
     861             :  * do; if it commits, we will receive the consequent inval messages just
     862             :  * like everyone else.
     863             :  */
     864             : void
     865         736 : PostPrepare_Inval(void)
     866             : {
     867         736 :     AtEOXact_Inval(false);
     868         736 : }
     869             : 
     870             : /*
     871             :  * xactGetCommittedInvalidationMessages() is called by
     872             :  * RecordTransactionCommit() to collect invalidation messages to add to the
     873             :  * commit record. This applies only to commit message types, never to
     874             :  * abort records. Must always run before AtEOXact_Inval(), since that
     875             :  * removes the data we need to see.
     876             :  *
     877             :  * Remember that this runs before we have officially committed, so we
     878             :  * must not do anything here to change what might occur *if* we should
     879             :  * fail between here and the actual commit.
     880             :  *
     881             :  * see also xact_redo_commit() and xact_desc_commit()
     882             :  */
     883             : int
     884      797744 : xactGetCommittedInvalidationMessages(SharedInvalidationMessage **msgs,
     885             :                                      bool *RelcacheInitFileInval)
     886             : {
     887             :     SharedInvalidationMessage *msgarray;
     888             :     int         nummsgs;
     889             :     int         nmsgs;
     890             : 
     891             :     /* Quick exit if we haven't done anything with invalidation messages. */
     892      797744 :     if (transInvalInfo == NULL)
     893             :     {
     894      207990 :         *RelcacheInitFileInval = false;
     895      207990 :         *msgs = NULL;
     896      207990 :         return 0;
     897             :     }
     898             : 
     899             :     /* Must be at top of stack */
     900             :     Assert(transInvalInfo->my_level == 1 && transInvalInfo->parent == NULL);
     901             : 
     902             :     /*
     903             :      * Relcache init file invalidation requires processing both before and
     904             :      * after we send the SI messages.  However, we need not do anything unless
     905             :      * we committed.
     906             :      */
     907      589754 :     *RelcacheInitFileInval = transInvalInfo->RelcacheInitFileInval;
     908             : 
     909             :     /*
     910             :      * Collect all the pending messages into a single contiguous array of
     911             :      * invalidation messages, to simplify what needs to happen while building
     912             :      * the commit WAL message.  Maintain the order that they would be
     913             :      * processed in by AtEOXact_Inval(), to ensure emulated behaviour in redo
     914             :      * is as similar as possible to original.  We want the same bugs, if any,
     915             :      * not new ones.
     916             :      */
     917      589754 :     nummsgs = NumMessagesInGroup(&transInvalInfo->PriorCmdInvalidMsgs) +
     918      589754 :         NumMessagesInGroup(&transInvalInfo->CurrentCmdInvalidMsgs);
     919             : 
     920      589754 :     *msgs = msgarray = (SharedInvalidationMessage *)
     921      589754 :         MemoryContextAlloc(CurTransactionContext,
     922             :                            nummsgs * sizeof(SharedInvalidationMessage));
     923             : 
     924      589754 :     nmsgs = 0;
     925      589754 :     ProcessMessageSubGroupMulti(&transInvalInfo->PriorCmdInvalidMsgs,
     926             :                                 CatCacheMsgs,
     927             :                                 (memcpy(msgarray + nmsgs,
     928             :                                         msgs,
     929             :                                         n * sizeof(SharedInvalidationMessage)),
     930             :                                  nmsgs += n));
     931      589754 :     ProcessMessageSubGroupMulti(&transInvalInfo->CurrentCmdInvalidMsgs,
     932             :                                 CatCacheMsgs,
     933             :                                 (memcpy(msgarray + nmsgs,
     934             :                                         msgs,
     935             :                                         n * sizeof(SharedInvalidationMessage)),
     936             :                                  nmsgs += n));
     937      589754 :     ProcessMessageSubGroupMulti(&transInvalInfo->PriorCmdInvalidMsgs,
     938             :                                 RelCacheMsgs,
     939             :                                 (memcpy(msgarray + nmsgs,
     940             :                                         msgs,
     941             :                                         n * sizeof(SharedInvalidationMessage)),
     942             :                                  nmsgs += n));
     943      589754 :     ProcessMessageSubGroupMulti(&transInvalInfo->CurrentCmdInvalidMsgs,
     944             :                                 RelCacheMsgs,
     945             :                                 (memcpy(msgarray + nmsgs,
     946             :                                         msgs,
     947             :                                         n * sizeof(SharedInvalidationMessage)),
     948             :                                  nmsgs += n));
     949             :     Assert(nmsgs == nummsgs);
     950             : 
     951      589754 :     return nmsgs;
     952             : }
     953             : 
     954             : /*
     955             :  * ProcessCommittedInvalidationMessages is executed by xact_redo_commit() or
     956             :  * standby_redo() to process invalidation messages. Currently that happens
     957             :  * only at end-of-xact.
     958             :  *
     959             :  * Relcache init file invalidation requires processing both
     960             :  * before and after we send the SI messages. See AtEOXact_Inval()
     961             :  */
     962             : void
     963       34356 : ProcessCommittedInvalidationMessages(SharedInvalidationMessage *msgs,
     964             :                                      int nmsgs, bool RelcacheInitFileInval,
     965             :                                      Oid dbid, Oid tsid)
     966             : {
     967       34356 :     if (nmsgs <= 0)
     968        8822 :         return;
     969             : 
     970       25534 :     elog(trace_recovery(DEBUG4), "replaying commit with %d messages%s", nmsgs,
     971             :          (RelcacheInitFileInval ? " and relcache file invalidation" : ""));
     972             : 
     973       25534 :     if (RelcacheInitFileInval)
     974             :     {
     975         240 :         elog(trace_recovery(DEBUG4), "removing relcache init files for database %u",
     976             :              dbid);
     977             : 
     978             :         /*
     979             :          * RelationCacheInitFilePreInvalidate, when the invalidation message
     980             :          * is for a specific database, requires DatabasePath to be set, but we
     981             :          * should not use SetDatabasePath during recovery, since it is
     982             :          * intended to be used only once by normal backends.  Hence, a quick
     983             :          * hack: set DatabasePath directly then unset after use.
     984             :          */
     985         240 :         if (OidIsValid(dbid))
     986         240 :             DatabasePath = GetDatabasePath(dbid, tsid);
     987             : 
     988         240 :         RelationCacheInitFilePreInvalidate();
     989             : 
     990         240 :         if (OidIsValid(dbid))
     991             :         {
     992         240 :             pfree(DatabasePath);
     993         240 :             DatabasePath = NULL;
     994             :         }
     995             :     }
     996             : 
     997       25534 :     SendSharedInvalidMessages(msgs, nmsgs);
     998             : 
     999       25534 :     if (RelcacheInitFileInval)
    1000         240 :         RelationCacheInitFilePostInvalidate();
    1001             : }
    1002             : 
    1003             : /*
    1004             :  * AtEOXact_Inval
    1005             :  *      Process queued-up invalidation messages at end of main transaction.
    1006             :  *
    1007             :  * If isCommit, we must send out the messages in our PriorCmdInvalidMsgs list
    1008             :  * to the shared invalidation message queue.  Note that these will be read
    1009             :  * not only by other backends, but also by our own backend at the next
    1010             :  * transaction start (via AcceptInvalidationMessages).  This means that
    1011             :  * we can skip immediate local processing of anything that's still in
    1012             :  * CurrentCmdInvalidMsgs, and just send that list out too.
    1013             :  *
    1014             :  * If not isCommit, we are aborting, and must locally process the messages
    1015             :  * in PriorCmdInvalidMsgs.  No messages need be sent to other backends,
    1016             :  * since they'll not have seen our changed tuples anyway.  We can forget
    1017             :  * about CurrentCmdInvalidMsgs too, since those changes haven't touched
    1018             :  * the caches yet.
    1019             :  *
    1020             :  * In any case, reset our state to empty.  We need not physically
    1021             :  * free memory here, since TopTransactionContext is about to be emptied
    1022             :  * anyway.
    1023             :  *
    1024             :  * Note:
    1025             :  *      This should be called as the last step in processing a transaction.
    1026             :  */
    1027             : void
    1028      976494 : AtEOXact_Inval(bool isCommit)
    1029             : {
    1030             :     /* Quick exit if no messages */
    1031      976494 :     if (transInvalInfo == NULL)
    1032      345736 :         return;
    1033             : 
    1034             :     /* Must be at top of stack */
    1035             :     Assert(transInvalInfo->my_level == 1 && transInvalInfo->parent == NULL);
    1036             : 
    1037      630758 :     if (isCommit)
    1038             :     {
    1039             :         /*
    1040             :          * Relcache init file invalidation requires processing both before and
    1041             :          * after we send the SI messages.  However, we need not do anything
    1042             :          * unless we committed.
    1043             :          */
    1044      627226 :         if (transInvalInfo->RelcacheInitFileInval)
    1045       88500 :             RelationCacheInitFilePreInvalidate();
    1046             : 
    1047      627226 :         AppendInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs,
    1048      627226 :                                    &transInvalInfo->CurrentCmdInvalidMsgs);
    1049             : 
    1050      627226 :         ProcessInvalidationMessagesMulti(&transInvalInfo->PriorCmdInvalidMsgs,
    1051             :                                          SendSharedInvalidMessages);
    1052             : 
    1053      627226 :         if (transInvalInfo->RelcacheInitFileInval)
    1054       88500 :             RelationCacheInitFilePostInvalidate();
    1055             :     }
    1056             :     else
    1057             :     {
    1058        3532 :         ProcessInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs,
    1059             :                                     LocalExecuteInvalidationMessage);
    1060             :     }
    1061             : 
    1062             :     /* Need not free anything explicitly */
    1063      630758 :     transInvalInfo = NULL;
    1064             : }
    1065             : 
    1066             : /*
    1067             :  * AtEOSubXact_Inval
    1068             :  *      Process queued-up invalidation messages at end of subtransaction.
    1069             :  *
    1070             :  * If isCommit, process CurrentCmdInvalidMsgs if any (there probably aren't),
    1071             :  * and then attach both CurrentCmdInvalidMsgs and PriorCmdInvalidMsgs to the
    1072             :  * parent's PriorCmdInvalidMsgs list.
    1073             :  *
    1074             :  * If not isCommit, we are aborting, and must locally process the messages
    1075             :  * in PriorCmdInvalidMsgs.  No messages need be sent to other backends.
    1076             :  * We can forget about CurrentCmdInvalidMsgs too, since those changes haven't
    1077             :  * touched the caches yet.
    1078             :  *
    1079             :  * In any case, pop the transaction stack.  We need not physically free memory
    1080             :  * here, since CurTransactionContext is about to be emptied anyway
    1081             :  * (if aborting).  Beware of the possibility of aborting the same nesting
    1082             :  * level twice, though.
    1083             :  */
    1084             : void
    1085       17602 : AtEOSubXact_Inval(bool isCommit)
    1086             : {
    1087             :     int         my_level;
    1088       17602 :     TransInvalidationInfo *myInfo = transInvalInfo;
    1089             : 
    1090             :     /* Quick exit if no messages. */
    1091       17602 :     if (myInfo == NULL)
    1092       16072 :         return;
    1093             : 
    1094             :     /* Also bail out quickly if messages are not for this level. */
    1095        1530 :     my_level = GetCurrentTransactionNestLevel();
    1096        1530 :     if (myInfo->my_level != my_level)
    1097             :     {
    1098             :         Assert(myInfo->my_level < my_level);
    1099        1256 :         return;
    1100             :     }
    1101             : 
    1102         274 :     if (isCommit)
    1103             :     {
    1104             :         /* If CurrentCmdInvalidMsgs still has anything, fix it */
    1105          92 :         CommandEndInvalidationMessages();
    1106             : 
    1107             :         /*
    1108             :          * We create invalidation stack entries lazily, so the parent might
    1109             :          * not have one.  Instead of creating one, moving all the data over,
    1110             :          * and then freeing our own, we can just adjust the level of our own
    1111             :          * entry.
    1112             :          */
    1113          92 :         if (myInfo->parent == NULL || myInfo->parent->my_level < my_level - 1)
    1114             :         {
    1115          68 :             myInfo->my_level--;
    1116          68 :             return;
    1117             :         }
    1118             : 
    1119             :         /*
    1120             :          * Pass up my inval messages to parent.  Notice that we stick them in
    1121             :          * PriorCmdInvalidMsgs, not CurrentCmdInvalidMsgs, since they've
    1122             :          * already been locally processed.  (This would trigger the Assert in
    1123             :          * AppendInvalidationMessageSubGroup if the parent's
    1124             :          * CurrentCmdInvalidMsgs isn't empty; but we already checked that in
    1125             :          * PrepareInvalidationState.)
    1126             :          */
    1127          24 :         AppendInvalidationMessages(&myInfo->parent->PriorCmdInvalidMsgs,
    1128             :                                    &myInfo->PriorCmdInvalidMsgs);
    1129             : 
    1130             :         /* Must readjust parent's CurrentCmdInvalidMsgs indexes now */
    1131          24 :         SetGroupToFollow(&myInfo->parent->CurrentCmdInvalidMsgs,
    1132             :                          &myInfo->parent->PriorCmdInvalidMsgs);
    1133             : 
    1134             :         /* Pending relcache inval becomes parent's problem too */
    1135          24 :         if (myInfo->RelcacheInitFileInval)
    1136           0 :             myInfo->parent->RelcacheInitFileInval = true;
    1137             : 
    1138             :         /* Pop the transaction state stack */
    1139          24 :         transInvalInfo = myInfo->parent;
    1140             : 
    1141             :         /* Need not free anything else explicitly */
    1142          24 :         pfree(myInfo);
    1143             :     }
    1144             :     else
    1145             :     {
    1146         182 :         ProcessInvalidationMessages(&myInfo->PriorCmdInvalidMsgs,
    1147             :                                     LocalExecuteInvalidationMessage);
    1148             : 
    1149             :         /* Pop the transaction state stack */
    1150         182 :         transInvalInfo = myInfo->parent;
    1151             : 
    1152             :         /* Need not free anything else explicitly */
    1153         182 :         pfree(myInfo);
    1154             :     }
    1155             : }
    1156             : 
    1157             : /*
    1158             :  * CommandEndInvalidationMessages
    1159             :  *      Process queued-up invalidation messages at end of one command
    1160             :  *      in a transaction.
    1161             :  *
    1162             :  * Here, we send no messages to the shared queue, since we don't know yet if
    1163             :  * we will commit.  We do need to locally process the CurrentCmdInvalidMsgs
    1164             :  * list, so as to flush our caches of any entries we have outdated in the
    1165             :  * current command.  We then move the current-cmd list over to become part
    1166             :  * of the prior-cmds list.
    1167             :  *
    1168             :  * Note:
    1169             :  *      This should be called during CommandCounterIncrement(),
    1170             :  *      after we have advanced the command ID.
    1171             :  */
    1172             : void
    1173     1990722 : CommandEndInvalidationMessages(void)
    1174             : {
    1175             :     /*
    1176             :      * You might think this shouldn't be called outside any transaction, but
    1177             :      * bootstrap does it, and also ABORT issued when not in a transaction. So
    1178             :      * just quietly return if no state to work on.
    1179             :      */
    1180     1990722 :     if (transInvalInfo == NULL)
    1181      382214 :         return;
    1182             : 
    1183     1608508 :     ProcessInvalidationMessages(&transInvalInfo->CurrentCmdInvalidMsgs,
    1184             :                                 LocalExecuteInvalidationMessage);
    1185             : 
    1186             :     /* WAL Log per-command invalidation messages for wal_level=logical */
    1187     1608502 :     if (XLogLogicalInfoActive())
    1188       10514 :         LogLogicalInvalidations();
    1189             : 
    1190     1608502 :     AppendInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs,
    1191     1608502 :                                &transInvalInfo->CurrentCmdInvalidMsgs);
    1192             : }
    1193             : 
    1194             : 
    1195             : /*
    1196             :  * CacheInvalidateHeapTuple
    1197             :  *      Register the given tuple for invalidation at end of command
    1198             :  *      (ie, current command is creating or outdating this tuple).
    1199             :  *      Also, detect whether a relcache invalidation is implied.
    1200             :  *
    1201             :  * For an insert or delete, tuple is the target tuple and newtuple is NULL.
    1202             :  * For an update, we are called just once, with tuple being the old tuple
    1203             :  * version and newtuple the new version.  This allows avoidance of duplicate
    1204             :  * effort during an update.
    1205             :  */
    1206             : void
    1207    31510862 : CacheInvalidateHeapTuple(Relation relation,
    1208             :                          HeapTuple tuple,
    1209             :                          HeapTuple newtuple)
    1210             : {
    1211             :     Oid         tupleRelId;
    1212             :     Oid         databaseId;
    1213             :     Oid         relationId;
    1214             : 
    1215             :     /* Do nothing during bootstrap */
    1216    31510862 :     if (IsBootstrapProcessingMode())
    1217     7574394 :         return;
    1218             : 
    1219             :     /*
    1220             :      * We only need to worry about invalidation for tuples that are in system
    1221             :      * catalogs; user-relation tuples are never in catcaches and can't affect
    1222             :      * the relcache either.
    1223             :      */
    1224    23936468 :     if (!IsCatalogRelation(relation))
    1225    16113800 :         return;
    1226             : 
    1227             :     /*
    1228             :      * IsCatalogRelation() will return true for TOAST tables of system
    1229             :      * catalogs, but we don't care about those, either.
    1230             :      */
    1231     7822668 :     if (IsToastRelation(relation))
    1232      185482 :         return;
    1233             : 
    1234             :     /*
    1235             :      * If we're not prepared to queue invalidation messages for this
    1236             :      * subtransaction level, get ready now.
    1237             :      */
    1238     7637186 :     PrepareInvalidationState();
    1239             : 
    1240             :     /*
    1241             :      * First let the catcache do its thing
    1242             :      */
    1243     7637186 :     tupleRelId = RelationGetRelid(relation);
    1244     7637186 :     if (RelationInvalidatesSnapshotsOnly(tupleRelId))
    1245             :     {
    1246     2605802 :         databaseId = IsSharedRelation(tupleRelId) ? InvalidOid : MyDatabaseId;
    1247     2605802 :         RegisterSnapshotInvalidation(databaseId, tupleRelId);
    1248             :     }
    1249             :     else
    1250     5031384 :         PrepareToInvalidateCacheTuple(relation, tuple, newtuple,
    1251             :                                       RegisterCatcacheInvalidation);
    1252             : 
    1253             :     /*
    1254             :      * Now, is this tuple one of the primary definers of a relcache entry? See
    1255             :      * comments in file header for deeper explanation.
    1256             :      *
    1257             :      * Note we ignore newtuple here; we assume an update cannot move a tuple
    1258             :      * from being part of one relcache entry to being part of another.
    1259             :      */
    1260     7637186 :     if (tupleRelId == RelationRelationId)
    1261             :     {
    1262      690850 :         Form_pg_class classtup = (Form_pg_class) GETSTRUCT(tuple);
    1263             : 
    1264      690850 :         relationId = classtup->oid;
    1265      690850 :         if (classtup->relisshared)
    1266       27722 :             databaseId = InvalidOid;
    1267             :         else
    1268      663128 :             databaseId = MyDatabaseId;
    1269             :     }
    1270     6946336 :     else if (tupleRelId == AttributeRelationId)
    1271             :     {
    1272     1752736 :         Form_pg_attribute atttup = (Form_pg_attribute) GETSTRUCT(tuple);
    1273             : 
    1274     1752736 :         relationId = atttup->attrelid;
    1275             : 
    1276             :         /*
    1277             :          * KLUGE ALERT: we always send the relcache event with MyDatabaseId,
    1278             :          * even if the rel in question is shared (which we can't easily tell).
    1279             :          * This essentially means that only backends in this same database
    1280             :          * will react to the relcache flush request.  This is in fact
    1281             :          * appropriate, since only those backends could see our pg_attribute
    1282             :          * change anyway.  It looks a bit ugly though.  (In practice, shared
    1283             :          * relations can't have schema changes after bootstrap, so we should
    1284             :          * never come here for a shared rel anyway.)
    1285             :          */
    1286     1752736 :         databaseId = MyDatabaseId;
    1287             :     }
    1288     5193600 :     else if (tupleRelId == IndexRelationId)
    1289             :     {
    1290       90028 :         Form_pg_index indextup = (Form_pg_index) GETSTRUCT(tuple);
    1291             : 
    1292             :         /*
    1293             :          * When a pg_index row is updated, we should send out a relcache inval
    1294             :          * for the index relation.  As above, we don't know the shared status
    1295             :          * of the index, but in practice it doesn't matter since indexes of
    1296             :          * shared catalogs can't have such updates.
    1297             :          */
    1298       90028 :         relationId = indextup->indexrelid;
    1299       90028 :         databaseId = MyDatabaseId;
    1300             :     }
    1301     5103572 :     else if (tupleRelId == ConstraintRelationId)
    1302             :     {
    1303       94092 :         Form_pg_constraint constrtup = (Form_pg_constraint) GETSTRUCT(tuple);
    1304             : 
    1305             :         /*
    1306             :          * Foreign keys are part of relcache entries, too, so send out an
    1307             :          * inval for the table that the FK applies to.
    1308             :          */
    1309       94092 :         if (constrtup->contype == CONSTRAINT_FOREIGN &&
    1310        6220 :             OidIsValid(constrtup->conrelid))
    1311             :         {
    1312        6220 :             relationId = constrtup->conrelid;
    1313        6220 :             databaseId = MyDatabaseId;
    1314             :         }
    1315             :         else
    1316       87872 :             return;
    1317             :     }
    1318             :     else
    1319     5009480 :         return;
    1320             : 
    1321             :     /*
    1322             :      * Yes.  We need to register a relcache invalidation event.
    1323             :      */
    1324     2539834 :     RegisterRelcacheInvalidation(databaseId, relationId);
    1325             : }
    1326             : 
    1327             : /*
    1328             :  * CacheInvalidateCatalog
    1329             :  *      Register invalidation of the whole content of a system catalog.
    1330             :  *
    1331             :  * This is normally used in VACUUM FULL/CLUSTER, where we haven't so much
    1332             :  * changed any tuples as moved them around.  Some uses of catcache entries
    1333             :  * expect their TIDs to be correct, so we have to blow away the entries.
    1334             :  *
    1335             :  * Note: we expect caller to verify that the rel actually is a system
    1336             :  * catalog.  If it isn't, no great harm is done, just a wasted sinval message.
    1337             :  */
    1338             : void
    1339         200 : CacheInvalidateCatalog(Oid catalogId)
    1340             : {
    1341             :     Oid         databaseId;
    1342             : 
    1343         200 :     PrepareInvalidationState();
    1344             : 
    1345         200 :     if (IsSharedRelation(catalogId))
    1346          34 :         databaseId = InvalidOid;
    1347             :     else
    1348         166 :         databaseId = MyDatabaseId;
    1349             : 
    1350         200 :     RegisterCatalogInvalidation(databaseId, catalogId);
    1351         200 : }
    1352             : 
    1353             : /*
    1354             :  * CacheInvalidateRelcache
    1355             :  *      Register invalidation of the specified relation's relcache entry
    1356             :  *      at end of command.
    1357             :  *
    1358             :  * This is used in places that need to force relcache rebuild but aren't
    1359             :  * changing any of the tuples recognized as contributors to the relcache
    1360             :  * entry by CacheInvalidateHeapTuple.  (An example is dropping an index.)
    1361             :  */
    1362             : void
    1363      218338 : CacheInvalidateRelcache(Relation relation)
    1364             : {
    1365             :     Oid         databaseId;
    1366             :     Oid         relationId;
    1367             : 
    1368      218338 :     PrepareInvalidationState();
    1369             : 
    1370      218338 :     relationId = RelationGetRelid(relation);
    1371      218338 :     if (relation->rd_rel->relisshared)
    1372       24448 :         databaseId = InvalidOid;
    1373             :     else
    1374      193890 :         databaseId = MyDatabaseId;
    1375             : 
    1376      218338 :     RegisterRelcacheInvalidation(databaseId, relationId);
    1377      218338 : }
    1378             : 
    1379             : /*
    1380             :  * CacheInvalidateRelcacheAll
    1381             :  *      Register invalidation of the whole relcache at the end of command.
    1382             :  *
    1383             :  * This is used by alter publication as changes in publications may affect
    1384             :  * large number of tables.
    1385             :  */
    1386             : void
    1387          76 : CacheInvalidateRelcacheAll(void)
    1388             : {
    1389          76 :     PrepareInvalidationState();
    1390             : 
    1391          76 :     RegisterRelcacheInvalidation(InvalidOid, InvalidOid);
    1392          76 : }
    1393             : 
    1394             : /*
    1395             :  * CacheInvalidateRelcacheByTuple
    1396             :  *      As above, but relation is identified by passing its pg_class tuple.
    1397             :  */
    1398             : void
    1399       90978 : CacheInvalidateRelcacheByTuple(HeapTuple classTuple)
    1400             : {
    1401       90978 :     Form_pg_class classtup = (Form_pg_class) GETSTRUCT(classTuple);
    1402             :     Oid         databaseId;
    1403             :     Oid         relationId;
    1404             : 
    1405       90978 :     PrepareInvalidationState();
    1406             : 
    1407       90978 :     relationId = classtup->oid;
    1408       90978 :     if (classtup->relisshared)
    1409        6494 :         databaseId = InvalidOid;
    1410             :     else
    1411       84484 :         databaseId = MyDatabaseId;
    1412       90978 :     RegisterRelcacheInvalidation(databaseId, relationId);
    1413       90978 : }
    1414             : 
    1415             : /*
    1416             :  * CacheInvalidateRelcacheByRelid
    1417             :  *      As above, but relation is identified by passing its OID.
    1418             :  *      This is the least efficient of the three options; use one of
    1419             :  *      the above routines if you have a Relation or pg_class tuple.
    1420             :  */
    1421             : void
    1422       25848 : CacheInvalidateRelcacheByRelid(Oid relid)
    1423             : {
    1424             :     HeapTuple   tup;
    1425             : 
    1426       25848 :     PrepareInvalidationState();
    1427             : 
    1428       25848 :     tup = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
    1429       25848 :     if (!HeapTupleIsValid(tup))
    1430           0 :         elog(ERROR, "cache lookup failed for relation %u", relid);
    1431       25848 :     CacheInvalidateRelcacheByTuple(tup);
    1432       25848 :     ReleaseSysCache(tup);
    1433       25848 : }
    1434             : 
    1435             : 
    1436             : /*
    1437             :  * CacheInvalidateSmgr
    1438             :  *      Register invalidation of smgr references to a physical relation.
    1439             :  *
    1440             :  * Sending this type of invalidation msg forces other backends to close open
    1441             :  * smgr entries for the rel.  This should be done to flush dangling open-file
    1442             :  * references when the physical rel is being dropped or truncated.  Because
    1443             :  * these are nontransactional (i.e., not-rollback-able) operations, we just
    1444             :  * send the inval message immediately without any queuing.
    1445             :  *
    1446             :  * Note: in most cases there will have been a relcache flush issued against
    1447             :  * the rel at the logical level.  We need a separate smgr-level flush because
    1448             :  * it is possible for backends to have open smgr entries for rels they don't
    1449             :  * have a relcache entry for, e.g. because the only thing they ever did with
    1450             :  * the rel is write out dirty shared buffers.
    1451             :  *
    1452             :  * Note: because these messages are nontransactional, they won't be captured
    1453             :  * in commit/abort WAL entries.  Instead, calls to CacheInvalidateSmgr()
    1454             :  * should happen in low-level smgr.c routines, which are executed while
    1455             :  * replaying WAL as well as when creating it.
    1456             :  *
    1457             :  * Note: In order to avoid bloating SharedInvalidationMessage, we store only
    1458             :  * three bytes of the backend ID using what would otherwise be padding space.
    1459             :  * Thus, the maximum possible backend ID is 2^23-1.
    1460             :  */
    1461             : void
    1462      102574 : CacheInvalidateSmgr(RelFileLocatorBackend rlocator)
    1463             : {
    1464             :     SharedInvalidationMessage msg;
    1465             : 
    1466      102574 :     msg.sm.id = SHAREDINVALSMGR_ID;
    1467      102574 :     msg.sm.backend_hi = rlocator.backend >> 16;
    1468      102574 :     msg.sm.backend_lo = rlocator.backend & 0xffff;
    1469      102574 :     msg.sm.rlocator = rlocator.locator;
    1470             :     /* check AddCatcacheInvalidationMessage() for an explanation */
    1471             :     VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
    1472             : 
    1473      102574 :     SendSharedInvalidMessages(&msg, 1);
    1474      102574 : }
    1475             : 
    1476             : /*
    1477             :  * CacheInvalidateRelmap
    1478             :  *      Register invalidation of the relation mapping for a database,
    1479             :  *      or for the shared catalogs if databaseId is zero.
    1480             :  *
    1481             :  * Sending this type of invalidation msg forces other backends to re-read
    1482             :  * the indicated relation mapping file.  It is also necessary to send a
    1483             :  * relcache inval for the specific relations whose mapping has been altered,
    1484             :  * else the relcache won't get updated with the new filenode data.
    1485             :  *
    1486             :  * Note: because these messages are nontransactional, they won't be captured
    1487             :  * in commit/abort WAL entries.  Instead, calls to CacheInvalidateRelmap()
    1488             :  * should happen in low-level relmapper.c routines, which are executed while
    1489             :  * replaying WAL as well as when creating it.
    1490             :  */
    1491             : void
    1492         206 : CacheInvalidateRelmap(Oid databaseId)
    1493             : {
    1494             :     SharedInvalidationMessage msg;
    1495             : 
    1496         206 :     msg.rm.id = SHAREDINVALRELMAP_ID;
    1497         206 :     msg.rm.dbId = databaseId;
    1498             :     /* check AddCatcacheInvalidationMessage() for an explanation */
    1499             :     VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
    1500             : 
    1501         206 :     SendSharedInvalidMessages(&msg, 1);
    1502         206 : }
    1503             : 
    1504             : 
    1505             : /*
    1506             :  * CacheRegisterSyscacheCallback
    1507             :  *      Register the specified function to be called for all future
    1508             :  *      invalidation events in the specified cache.  The cache ID and the
    1509             :  *      hash value of the tuple being invalidated will be passed to the
    1510             :  *      function.
    1511             :  *
    1512             :  * NOTE: Hash value zero will be passed if a cache reset request is received.
    1513             :  * In this case the called routines should flush all cached state.
    1514             :  * Yes, there's a possibility of a false match to zero, but it doesn't seem
    1515             :  * worth troubling over, especially since most of the current callees just
    1516             :  * flush all cached state anyway.
    1517             :  */
    1518             : void
    1519      329016 : CacheRegisterSyscacheCallback(int cacheid,
    1520             :                               SyscacheCallbackFunction func,
    1521             :                               Datum arg)
    1522             : {
    1523      329016 :     if (cacheid < 0 || cacheid >= SysCacheSize)
    1524           0 :         elog(FATAL, "invalid cache ID: %d", cacheid);
    1525      329016 :     if (syscache_callback_count >= MAX_SYSCACHE_CALLBACKS)
    1526           0 :         elog(FATAL, "out of syscache_callback_list slots");
    1527             : 
    1528      329016 :     if (syscache_callback_links[cacheid] == 0)
    1529             :     {
    1530             :         /* first callback for this cache */
    1531      279192 :         syscache_callback_links[cacheid] = syscache_callback_count + 1;
    1532             :     }
    1533             :     else
    1534             :     {
    1535             :         /* add to end of chain, so that older callbacks are called first */
    1536       49824 :         int         i = syscache_callback_links[cacheid] - 1;
    1537             : 
    1538       50394 :         while (syscache_callback_list[i].link > 0)
    1539         570 :             i = syscache_callback_list[i].link - 1;
    1540       49824 :         syscache_callback_list[i].link = syscache_callback_count + 1;
    1541             :     }
    1542             : 
    1543      329016 :     syscache_callback_list[syscache_callback_count].id = cacheid;
    1544      329016 :     syscache_callback_list[syscache_callback_count].link = 0;
    1545      329016 :     syscache_callback_list[syscache_callback_count].function = func;
    1546      329016 :     syscache_callback_list[syscache_callback_count].arg = arg;
    1547             : 
    1548      329016 :     ++syscache_callback_count;
    1549      329016 : }
    1550             : 
    1551             : /*
    1552             :  * CacheRegisterRelcacheCallback
    1553             :  *      Register the specified function to be called for all future
    1554             :  *      relcache invalidation events.  The OID of the relation being
    1555             :  *      invalidated will be passed to the function.
    1556             :  *
    1557             :  * NOTE: InvalidOid will be passed if a cache reset request is received.
    1558             :  * In this case the called routines should flush all cached state.
    1559             :  */
    1560             : void
    1561       31570 : CacheRegisterRelcacheCallback(RelcacheCallbackFunction func,
    1562             :                               Datum arg)
    1563             : {
    1564       31570 :     if (relcache_callback_count >= MAX_RELCACHE_CALLBACKS)
    1565           0 :         elog(FATAL, "out of relcache_callback_list slots");
    1566             : 
    1567       31570 :     relcache_callback_list[relcache_callback_count].function = func;
    1568       31570 :     relcache_callback_list[relcache_callback_count].arg = arg;
    1569             : 
    1570       31570 :     ++relcache_callback_count;
    1571       31570 : }
    1572             : 
    1573             : /*
    1574             :  * CallSyscacheCallbacks
    1575             :  *
    1576             :  * This is exported so that CatalogCacheFlushCatalog can call it, saving
    1577             :  * this module from knowing which catcache IDs correspond to which catalogs.
    1578             :  */
    1579             : void
    1580    24630408 : CallSyscacheCallbacks(int cacheid, uint32 hashvalue)
    1581             : {
    1582             :     int         i;
    1583             : 
    1584    24630408 :     if (cacheid < 0 || cacheid >= SysCacheSize)
    1585           0 :         elog(ERROR, "invalid cache ID: %d", cacheid);
    1586             : 
    1587    24630408 :     i = syscache_callback_links[cacheid] - 1;
    1588    27596154 :     while (i >= 0)
    1589             :     {
    1590     2965746 :         struct SYSCACHECALLBACK *ccitem = syscache_callback_list + i;
    1591             : 
    1592             :         Assert(ccitem->id == cacheid);
    1593     2965746 :         ccitem->function(ccitem->arg, cacheid, hashvalue);
    1594     2965746 :         i = ccitem->link - 1;
    1595             :     }
    1596    24630408 : }
    1597             : 
    1598             : /*
    1599             :  * LogLogicalInvalidations
    1600             :  *
    1601             :  * Emit WAL for invalidations caused by the current command.
    1602             :  *
    1603             :  * This is currently only used for logging invalidations at the command end
    1604             :  * or at commit time if any invalidations are pending.
    1605             :  */
    1606             : void
    1607       36760 : LogLogicalInvalidations(void)
    1608             : {
    1609             :     xl_xact_invals xlrec;
    1610             :     InvalidationMsgsGroup *group;
    1611             :     int         nmsgs;
    1612             : 
    1613             :     /* Quick exit if we haven't done anything with invalidation messages. */
    1614       36760 :     if (transInvalInfo == NULL)
    1615       22248 :         return;
    1616             : 
    1617       14512 :     group = &transInvalInfo->CurrentCmdInvalidMsgs;
    1618       14512 :     nmsgs = NumMessagesInGroup(group);
    1619             : 
    1620       14512 :     if (nmsgs > 0)
    1621             :     {
    1622             :         /* prepare record */
    1623       11278 :         memset(&xlrec, 0, MinSizeOfXactInvals);
    1624       11278 :         xlrec.nmsgs = nmsgs;
    1625             : 
    1626             :         /* perform insertion */
    1627       11278 :         XLogBeginInsert();
    1628       11278 :         XLogRegisterData((char *) (&xlrec), MinSizeOfXactInvals);
    1629       11278 :         ProcessMessageSubGroupMulti(group, CatCacheMsgs,
    1630             :                                     XLogRegisterData((char *) msgs,
    1631             :                                                      n * sizeof(SharedInvalidationMessage)));
    1632       11278 :         ProcessMessageSubGroupMulti(group, RelCacheMsgs,
    1633             :                                     XLogRegisterData((char *) msgs,
    1634             :                                                      n * sizeof(SharedInvalidationMessage)));
    1635       11278 :         XLogInsert(RM_XACT_ID, XLOG_XACT_INVALIDATIONS);
    1636             :     }
    1637             : }

Generated by: LCOV version 1.14