LCOV - code coverage report
Current view: top level - src/backend/utils/cache - inval.c (source / functions) Hit Total Coverage
Test: PostgreSQL 19devel Lines: 416 425 97.9 %
Date: 2026-01-02 19:17:20 Functions: 49 49 100.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * inval.c
       4             :  *    POSTGRES cache invalidation dispatcher code.
       5             :  *
       6             :  *  This is subtle stuff, so pay attention:
       7             :  *
       8             :  *  When a tuple is updated or deleted, our standard visibility rules
       9             :  *  consider that it is *still valid* so long as we are in the same command,
      10             :  *  ie, until the next CommandCounterIncrement() or transaction commit.
      11             :  *  (See access/heap/heapam_visibility.c, and note that system catalogs are
      12             :  *  generally scanned under the most current snapshot available, rather than
      13             :  *  the transaction snapshot.)  At the command boundary, the old tuple stops
      14             :  *  being valid and the new version, if any, becomes valid.  Therefore,
      15             :  *  we cannot simply flush a tuple from the system caches during heap_update()
      16             :  *  or heap_delete().  The tuple is still good at that point; what's more,
      17             :  *  even if we did flush it, it might be reloaded into the caches by a later
      18             :  *  request in the same command.  So the correct behavior is to keep a list
      19             :  *  of outdated (updated/deleted) tuples and then do the required cache
      20             :  *  flushes at the next command boundary.  We must also keep track of
      21             :  *  inserted tuples so that we can flush "negative" cache entries that match
      22             :  *  the new tuples; again, that mustn't happen until end of command.
      23             :  *
      24             :  *  Once we have finished the command, we still need to remember inserted
      25             :  *  tuples (including new versions of updated tuples), so that we can flush
      26             :  *  them from the caches if we abort the transaction.  Similarly, we'd better
      27             :  *  be able to flush "negative" cache entries that may have been loaded in
      28             :  *  place of deleted tuples, so we still need the deleted ones too.
      29             :  *
      30             :  *  If we successfully complete the transaction, we have to broadcast all
      31             :  *  these invalidation events to other backends (via the SI message queue)
      32             :  *  so that they can flush obsolete entries from their caches.  Note we have
      33             :  *  to record the transaction commit before sending SI messages, otherwise
      34             :  *  the other backends won't see our updated tuples as good.
      35             :  *
      36             :  *  When a subtransaction aborts, we can process and discard any events
      37             :  *  it has queued.  When a subtransaction commits, we just add its events
      38             :  *  to the pending lists of the parent transaction.
      39             :  *
      40             :  *  In short, we need to remember until xact end every insert or delete
      41             :  *  of a tuple that might be in the system caches.  Updates are treated as
      42             :  *  two events, delete + insert, for simplicity.  (If the update doesn't
      43             :  *  change the tuple hash value, catcache.c optimizes this into one event.)
      44             :  *
      45             :  *  We do not need to register EVERY tuple operation in this way, just those
      46             :  *  on tuples in relations that have associated catcaches.  We do, however,
      47             :  *  have to register every operation on every tuple that *could* be in a
      48             :  *  catcache, whether or not it currently is in our cache.  Also, if the
      49             :  *  tuple is in a relation that has multiple catcaches, we need to register
      50             :  *  an invalidation message for each such catcache.  catcache.c's
      51             :  *  PrepareToInvalidateCacheTuple() routine provides the knowledge of which
      52             :  *  catcaches may need invalidation for a given tuple.
      53             :  *
      54             :  *  Also, whenever we see an operation on a pg_class, pg_attribute, or
      55             :  *  pg_index tuple, we register a relcache flush operation for the relation
      56             :  *  described by that tuple (as specified in CacheInvalidateHeapTuple()).
      57             :  *  Likewise for pg_constraint tuples for foreign keys on relations.
      58             :  *
      59             :  *  We keep the relcache flush requests in lists separate from the catcache
      60             :  *  tuple flush requests.  This allows us to issue all the pending catcache
      61             :  *  flushes before we issue relcache flushes, which saves us from loading
      62             :  *  a catcache tuple during relcache load only to flush it again right away.
      63             :  *  Also, we avoid queuing multiple relcache flush requests for the same
      64             :  *  relation, since a relcache flush is relatively expensive to do.
      65             :  *  (XXX is it worth testing likewise for duplicate catcache flush entries?
      66             :  *  Probably not.)
      67             :  *
      68             :  *  Many subsystems own higher-level caches that depend on relcache and/or
      69             :  *  catcache, and they register callbacks here to invalidate their caches.
      70             :  *  While building a higher-level cache entry, a backend may receive a
      71             :  *  callback for the being-built entry or one of its dependencies.  This
      72             :  *  implies the new higher-level entry would be born stale, and it might
      73             :  *  remain stale for the life of the backend.  Many caches do not prevent
      74             :  *  that.  They rely on DDL for can't-miss catalog changes taking
      75             :  *  AccessExclusiveLock on suitable objects.  (For a change made with less
      76             :  *  locking, backends might never read the change.)  The relation cache,
      77             :  *  however, needs to reflect changes from CREATE INDEX CONCURRENTLY no later
      78             :  *  than the beginning of the next transaction.  Hence, when a relevant
      79             :  *  invalidation callback arrives during a build, relcache.c reattempts that
      80             :  *  build.  Caches with similar needs could do likewise.
      81             :  *
      82             :  *  If a relcache flush is issued for a system relation that we preload
      83             :  *  from the relcache init file, we must also delete the init file so that
      84             :  *  it will be rebuilt during the next backend restart.  The actual work of
      85             :  *  manipulating the init file is in relcache.c, but we keep track of the
      86             :  *  need for it here.
      87             :  *
      88             :  *  Currently, inval messages are sent without regard for the possibility
      89             :  *  that the object described by the catalog tuple might be a session-local
      90             :  *  object such as a temporary table.  This is because (1) this code has
      91             :  *  no practical way to tell the difference, and (2) it is not certain that
      92             :  *  other backends don't have catalog cache or even relcache entries for
      93             :  *  such tables, anyway; there is nothing that prevents that.  It might be
      94             :  *  worth trying to avoid sending such inval traffic in the future, if those
      95             :  *  problems can be overcome cheaply.
      96             :  *
      97             :  *  When making a nontransactional change to a cacheable object, we must
      98             :  *  likewise send the invalidation immediately, before ending the change's
      99             :  *  critical section.  This includes inplace heap updates, relmap, and smgr.
     100             :  *
     101             :  *  When effective_wal_level is 'logical', write invalidations into WAL at
     102             :  *  each command end to support the decoding of the in-progress transactions.
     103             :  *  See CommandEndInvalidationMessages.
     104             :  *
     105             :  * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
     106             :  * Portions Copyright (c) 1994, Regents of the University of California
     107             :  *
     108             :  * IDENTIFICATION
     109             :  *    src/backend/utils/cache/inval.c
     110             :  *
     111             :  *-------------------------------------------------------------------------
     112             :  */
     113             : #include "postgres.h"
     114             : 
     115             : #include <limits.h>
     116             : 
     117             : #include "access/htup_details.h"
     118             : #include "access/xact.h"
     119             : #include "access/xloginsert.h"
     120             : #include "catalog/catalog.h"
     121             : #include "catalog/pg_constraint.h"
     122             : #include "miscadmin.h"
     123             : #include "storage/procnumber.h"
     124             : #include "storage/sinval.h"
     125             : #include "storage/smgr.h"
     126             : #include "utils/catcache.h"
     127             : #include "utils/injection_point.h"
     128             : #include "utils/inval.h"
     129             : #include "utils/memdebug.h"
     130             : #include "utils/memutils.h"
     131             : #include "utils/rel.h"
     132             : #include "utils/relmapper.h"
     133             : #include "utils/snapmgr.h"
     134             : #include "utils/syscache.h"
     135             : 
     136             : 
     137             : /*
     138             :  * Pending requests are stored as ready-to-send SharedInvalidationMessages.
     139             :  * We keep the messages themselves in arrays in TopTransactionContext (there
     140             :  * are separate arrays for catcache and relcache messages).  For transactional
     141             :  * messages, control information is kept in a chain of TransInvalidationInfo
     142             :  * structs, also allocated in TopTransactionContext.  (We could keep a
     143             :  * subtransaction's TransInvalidationInfo in its CurTransactionContext; but
     144             :  * that's more wasteful not less so, since in very many scenarios it'd be the
     145             :  * only allocation in the subtransaction's CurTransactionContext.)  For
     146             :  * inplace update messages, control information appears in an
     147             :  * InvalidationInfo, allocated in CurrentMemoryContext.
     148             :  *
     149             :  * We can store the message arrays densely, and yet avoid moving data around
     150             :  * within an array, because within any one subtransaction we need only
     151             :  * distinguish between messages emitted by prior commands and those emitted
     152             :  * by the current command.  Once a command completes and we've done local
     153             :  * processing on its messages, we can fold those into the prior-commands
     154             :  * messages just by changing array indexes in the TransInvalidationInfo
     155             :  * struct.  Similarly, we need distinguish messages of prior subtransactions
     156             :  * from those of the current subtransaction only until the subtransaction
     157             :  * completes, after which we adjust the array indexes in the parent's
     158             :  * TransInvalidationInfo to include the subtransaction's messages.  Inplace
     159             :  * invalidations don't need a concept of command or subtransaction boundaries,
     160             :  * since we send them during the WAL insertion critical section.
     161             :  *
     162             :  * The ordering of the individual messages within a command's or
     163             :  * subtransaction's output is not considered significant, although this
     164             :  * implementation happens to preserve the order in which they were queued.
     165             :  * (Previous versions of this code did not preserve it.)
     166             :  *
     167             :  * For notational convenience, control information is kept in two-element
     168             :  * arrays, the first for catcache messages and the second for relcache
     169             :  * messages.
     170             :  */
     171             : #define CatCacheMsgs 0
     172             : #define RelCacheMsgs 1
     173             : 
     174             : /* Pointers to main arrays in TopTransactionContext */
     175             : typedef struct InvalMessageArray
     176             : {
     177             :     SharedInvalidationMessage *msgs;    /* palloc'd array (can be expanded) */
     178             :     int         maxmsgs;        /* current allocated size of array */
     179             : } InvalMessageArray;
     180             : 
     181             : static InvalMessageArray InvalMessageArrays[2];
     182             : 
     183             : /* Control information for one logical group of messages */
     184             : typedef struct InvalidationMsgsGroup
     185             : {
     186             :     int         firstmsg[2];    /* first index in relevant array */
     187             :     int         nextmsg[2];     /* last+1 index */
     188             : } InvalidationMsgsGroup;
     189             : 
     190             : /* Macros to help preserve InvalidationMsgsGroup abstraction */
     191             : #define SetSubGroupToFollow(targetgroup, priorgroup, subgroup) \
     192             :     do { \
     193             :         (targetgroup)->firstmsg[subgroup] = \
     194             :             (targetgroup)->nextmsg[subgroup] = \
     195             :             (priorgroup)->nextmsg[subgroup]; \
     196             :     } while (0)
     197             : 
     198             : #define SetGroupToFollow(targetgroup, priorgroup) \
     199             :     do { \
     200             :         SetSubGroupToFollow(targetgroup, priorgroup, CatCacheMsgs); \
     201             :         SetSubGroupToFollow(targetgroup, priorgroup, RelCacheMsgs); \
     202             :     } while (0)
     203             : 
     204             : #define NumMessagesInSubGroup(group, subgroup) \
     205             :     ((group)->nextmsg[subgroup] - (group)->firstmsg[subgroup])
     206             : 
     207             : #define NumMessagesInGroup(group) \
     208             :     (NumMessagesInSubGroup(group, CatCacheMsgs) + \
     209             :      NumMessagesInSubGroup(group, RelCacheMsgs))
     210             : 
     211             : 
     212             : /*----------------
     213             :  * Transactional invalidation messages are divided into two groups:
     214             :  *  1) events so far in current command, not yet reflected to caches.
     215             :  *  2) events in previous commands of current transaction; these have
     216             :  *     been reflected to local caches, and must be either broadcast to
     217             :  *     other backends or rolled back from local cache when we commit
     218             :  *     or abort the transaction.
     219             :  * Actually, we need such groups for each level of nested transaction,
     220             :  * so that we can discard events from an aborted subtransaction.  When
     221             :  * a subtransaction commits, we append its events to the parent's groups.
     222             :  *
     223             :  * The relcache-file-invalidated flag can just be a simple boolean,
     224             :  * since we only act on it at transaction commit; we don't care which
     225             :  * command of the transaction set it.
     226             :  *----------------
     227             :  */
     228             : 
     229             : /* fields common to both transactional and inplace invalidation */
     230             : typedef struct InvalidationInfo
     231             : {
     232             :     /* Events emitted by current command */
     233             :     InvalidationMsgsGroup CurrentCmdInvalidMsgs;
     234             : 
     235             :     /* init file must be invalidated? */
     236             :     bool        RelcacheInitFileInval;
     237             : } InvalidationInfo;
     238             : 
     239             : /* subclass adding fields specific to transactional invalidation */
     240             : typedef struct TransInvalidationInfo
     241             : {
     242             :     /* Base class */
     243             :     struct InvalidationInfo ii;
     244             : 
     245             :     /* Events emitted by previous commands of this (sub)transaction */
     246             :     InvalidationMsgsGroup PriorCmdInvalidMsgs;
     247             : 
     248             :     /* Back link to parent transaction's info */
     249             :     struct TransInvalidationInfo *parent;
     250             : 
     251             :     /* Subtransaction nesting depth */
     252             :     int         my_level;
     253             : } TransInvalidationInfo;
     254             : 
     255             : static TransInvalidationInfo *transInvalInfo = NULL;
     256             : 
     257             : static InvalidationInfo *inplaceInvalInfo = NULL;
     258             : 
     259             : /* GUC storage */
     260             : int         debug_discard_caches = 0;
     261             : 
     262             : /*
     263             :  * Dynamically-registered callback functions.  Current implementation
     264             :  * assumes there won't be enough of these to justify a dynamically resizable
     265             :  * array; it'd be easy to improve that if needed.
     266             :  *
     267             :  * To avoid searching in CallSyscacheCallbacks, all callbacks for a given
     268             :  * syscache are linked into a list pointed to by syscache_callback_links[id].
     269             :  * The link values are syscache_callback_list[] index plus 1, or 0 for none.
     270             :  */
     271             : 
     272             : #define MAX_SYSCACHE_CALLBACKS 64
     273             : #define MAX_RELCACHE_CALLBACKS 10
     274             : #define MAX_RELSYNC_CALLBACKS 10
     275             : 
     276             : static struct SYSCACHECALLBACK
     277             : {
     278             :     int16       id;             /* cache number */
     279             :     int16       link;           /* next callback index+1 for same cache */
     280             :     SyscacheCallbackFunction function;
     281             :     Datum       arg;
     282             : }           syscache_callback_list[MAX_SYSCACHE_CALLBACKS];
     283             : 
     284             : static int16 syscache_callback_links[SysCacheSize];
     285             : 
     286             : static int  syscache_callback_count = 0;
     287             : 
     288             : static struct RELCACHECALLBACK
     289             : {
     290             :     RelcacheCallbackFunction function;
     291             :     Datum       arg;
     292             : }           relcache_callback_list[MAX_RELCACHE_CALLBACKS];
     293             : 
     294             : static int  relcache_callback_count = 0;
     295             : 
     296             : static struct RELSYNCCALLBACK
     297             : {
     298             :     RelSyncCallbackFunction function;
     299             :     Datum       arg;
     300             : }           relsync_callback_list[MAX_RELSYNC_CALLBACKS];
     301             : 
     302             : static int  relsync_callback_count = 0;
     303             : 
     304             : 
     305             : /* ----------------------------------------------------------------
     306             :  *              Invalidation subgroup support functions
     307             :  * ----------------------------------------------------------------
     308             :  */
     309             : 
     310             : /*
     311             :  * AddInvalidationMessage
     312             :  *      Add an invalidation message to a (sub)group.
     313             :  *
     314             :  * The group must be the last active one, since we assume we can add to the
     315             :  * end of the relevant InvalMessageArray.
     316             :  *
     317             :  * subgroup must be CatCacheMsgs or RelCacheMsgs.
     318             :  */
     319             : static void
     320     7700076 : AddInvalidationMessage(InvalidationMsgsGroup *group, int subgroup,
     321             :                        const SharedInvalidationMessage *msg)
     322             : {
     323     7700076 :     InvalMessageArray *ima = &InvalMessageArrays[subgroup];
     324     7700076 :     int         nextindex = group->nextmsg[subgroup];
     325             : 
     326     7700076 :     if (nextindex >= ima->maxmsgs)
     327             :     {
     328      949326 :         if (ima->msgs == NULL)
     329             :         {
     330             :             /* Create new storage array in TopTransactionContext */
     331      887148 :             int         reqsize = 32;   /* arbitrary */
     332             : 
     333      887148 :             ima->msgs = (SharedInvalidationMessage *)
     334      887148 :                 MemoryContextAlloc(TopTransactionContext,
     335             :                                    reqsize * sizeof(SharedInvalidationMessage));
     336      887148 :             ima->maxmsgs = reqsize;
     337             :             Assert(nextindex == 0);
     338             :         }
     339             :         else
     340             :         {
     341             :             /* Enlarge storage array */
     342       62178 :             int         reqsize = 2 * ima->maxmsgs;
     343             : 
     344       62178 :             ima->msgs = (SharedInvalidationMessage *)
     345       62178 :                 repalloc(ima->msgs,
     346             :                          reqsize * sizeof(SharedInvalidationMessage));
     347       62178 :             ima->maxmsgs = reqsize;
     348             :         }
     349             :     }
     350             :     /* Okay, add message to current group */
     351     7700076 :     ima->msgs[nextindex] = *msg;
     352     7700076 :     group->nextmsg[subgroup]++;
     353     7700076 : }
     354             : 
     355             : /*
     356             :  * Append one subgroup of invalidation messages to another, resetting
     357             :  * the source subgroup to empty.
     358             :  */
     359             : static void
     360     2163796 : AppendInvalidationMessageSubGroup(InvalidationMsgsGroup *dest,
     361             :                                   InvalidationMsgsGroup *src,
     362             :                                   int subgroup)
     363             : {
     364             :     /* Messages must be adjacent in main array */
     365             :     Assert(dest->nextmsg[subgroup] == src->firstmsg[subgroup]);
     366             : 
     367             :     /* ... which makes this easy: */
     368     2163796 :     dest->nextmsg[subgroup] = src->nextmsg[subgroup];
     369             : 
     370             :     /*
     371             :      * This is handy for some callers and irrelevant for others.  But we do it
     372             :      * always, reasoning that it's bad to leave different groups pointing at
     373             :      * the same fragment of the message array.
     374             :      */
     375     2163796 :     SetSubGroupToFollow(src, dest, subgroup);
     376     2163796 : }
     377             : 
     378             : /*
     379             :  * Process a subgroup of invalidation messages.
     380             :  *
     381             :  * This is a macro that executes the given code fragment for each message in
     382             :  * a message subgroup.  The fragment should refer to the message as *msg.
     383             :  */
     384             : #define ProcessMessageSubGroup(group, subgroup, codeFragment) \
     385             :     do { \
     386             :         int     _msgindex = (group)->firstmsg[subgroup]; \
     387             :         int     _endmsg = (group)->nextmsg[subgroup]; \
     388             :         for (; _msgindex < _endmsg; _msgindex++) \
     389             :         { \
     390             :             SharedInvalidationMessage *msg = \
     391             :                 &InvalMessageArrays[subgroup].msgs[_msgindex]; \
     392             :             codeFragment; \
     393             :         } \
     394             :     } while (0)
     395             : 
     396             : /*
     397             :  * Process a subgroup of invalidation messages as an array.
     398             :  *
     399             :  * As above, but the code fragment can handle an array of messages.
     400             :  * The fragment should refer to the messages as msgs[], with n entries.
     401             :  */
     402             : #define ProcessMessageSubGroupMulti(group, subgroup, codeFragment) \
     403             :     do { \
     404             :         int     n = NumMessagesInSubGroup(group, subgroup); \
     405             :         if (n > 0) { \
     406             :             SharedInvalidationMessage *msgs = \
     407             :                 &InvalMessageArrays[subgroup].msgs[(group)->firstmsg[subgroup]]; \
     408             :             codeFragment; \
     409             :         } \
     410             :     } while (0)
     411             : 
     412             : 
     413             : /* ----------------------------------------------------------------
     414             :  *              Invalidation group support functions
     415             :  *
     416             :  * These routines understand about the division of a logical invalidation
     417             :  * group into separate physical arrays for catcache and relcache entries.
     418             :  * ----------------------------------------------------------------
     419             :  */
     420             : 
     421             : /*
     422             :  * Add a catcache inval entry
     423             :  */
     424             : static void
     425     6104956 : AddCatcacheInvalidationMessage(InvalidationMsgsGroup *group,
     426             :                                int id, uint32 hashValue, Oid dbId)
     427             : {
     428             :     SharedInvalidationMessage msg;
     429             : 
     430             :     Assert(id < CHAR_MAX);
     431     6104956 :     msg.cc.id = (int8) id;
     432     6104956 :     msg.cc.dbId = dbId;
     433     6104956 :     msg.cc.hashValue = hashValue;
     434             : 
     435             :     /*
     436             :      * Define padding bytes in SharedInvalidationMessage structs to be
     437             :      * defined. Otherwise the sinvaladt.c ringbuffer, which is accessed by
     438             :      * multiple processes, will cause spurious valgrind warnings about
     439             :      * undefined memory being used. That's because valgrind remembers the
     440             :      * undefined bytes from the last local process's store, not realizing that
     441             :      * another process has written since, filling the previously uninitialized
     442             :      * bytes
     443             :      */
     444             :     VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
     445             : 
     446     6104956 :     AddInvalidationMessage(group, CatCacheMsgs, &msg);
     447     6104956 : }
     448             : 
     449             : /*
     450             :  * Add a whole-catalog inval entry
     451             :  */
     452             : static void
     453         222 : AddCatalogInvalidationMessage(InvalidationMsgsGroup *group,
     454             :                               Oid dbId, Oid catId)
     455             : {
     456             :     SharedInvalidationMessage msg;
     457             : 
     458         222 :     msg.cat.id = SHAREDINVALCATALOG_ID;
     459         222 :     msg.cat.dbId = dbId;
     460         222 :     msg.cat.catId = catId;
     461             :     /* check AddCatcacheInvalidationMessage() for an explanation */
     462             :     VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
     463             : 
     464         222 :     AddInvalidationMessage(group, CatCacheMsgs, &msg);
     465         222 : }
     466             : 
     467             : /*
     468             :  * Add a relcache inval entry
     469             :  */
     470             : static void
     471     2295332 : AddRelcacheInvalidationMessage(InvalidationMsgsGroup *group,
     472             :                                Oid dbId, Oid relId)
     473             : {
     474             :     SharedInvalidationMessage msg;
     475             : 
     476             :     /*
     477             :      * Don't add a duplicate item. We assume dbId need not be checked because
     478             :      * it will never change. InvalidOid for relId means all relations so we
     479             :      * don't need to add individual ones when it is present.
     480             :      */
     481     6826912 :     ProcessMessageSubGroup(group, RelCacheMsgs,
     482             :                            if (msg->rc.id == SHAREDINVALRELCACHE_ID &&
     483             :                                (msg->rc.relId == relId ||
     484             :                                 msg->rc.relId == InvalidOid))
     485             :                            return);
     486             : 
     487             :     /* OK, add the item */
     488     1026266 :     msg.rc.id = SHAREDINVALRELCACHE_ID;
     489     1026266 :     msg.rc.dbId = dbId;
     490     1026266 :     msg.rc.relId = relId;
     491             :     /* check AddCatcacheInvalidationMessage() for an explanation */
     492             :     VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
     493             : 
     494     1026266 :     AddInvalidationMessage(group, RelCacheMsgs, &msg);
     495             : }
     496             : 
     497             : /*
     498             :  * Add a relsync inval entry
     499             :  *
     500             :  * We put these into the relcache subgroup for simplicity. This message is the
     501             :  * same as AddRelcacheInvalidationMessage() except that it is for
     502             :  * RelationSyncCache maintained by decoding plugin pgoutput.
     503             :  */
     504             : static void
     505          12 : AddRelsyncInvalidationMessage(InvalidationMsgsGroup *group,
     506             :                               Oid dbId, Oid relId)
     507             : {
     508             :     SharedInvalidationMessage msg;
     509             : 
     510             :     /* Don't add a duplicate item. */
     511          12 :     ProcessMessageSubGroup(group, RelCacheMsgs,
     512             :                            if (msg->rc.id == SHAREDINVALRELSYNC_ID &&
     513             :                                (msg->rc.relId == relId ||
     514             :                                 msg->rc.relId == InvalidOid))
     515             :                            return);
     516             : 
     517             :     /* OK, add the item */
     518          12 :     msg.rc.id = SHAREDINVALRELSYNC_ID;
     519          12 :     msg.rc.dbId = dbId;
     520          12 :     msg.rc.relId = relId;
     521             :     /* check AddCatcacheInvalidationMessage() for an explanation */
     522             :     VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
     523             : 
     524          12 :     AddInvalidationMessage(group, RelCacheMsgs, &msg);
     525             : }
     526             : 
     527             : /*
     528             :  * Add a snapshot inval entry
     529             :  *
     530             :  * We put these into the relcache subgroup for simplicity.
     531             :  */
     532             : static void
     533     1127438 : AddSnapshotInvalidationMessage(InvalidationMsgsGroup *group,
     534             :                                Oid dbId, Oid relId)
     535             : {
     536             :     SharedInvalidationMessage msg;
     537             : 
     538             :     /* Don't add a duplicate item */
     539             :     /* We assume dbId need not be checked because it will never change */
     540     1642152 :     ProcessMessageSubGroup(group, RelCacheMsgs,
     541             :                            if (msg->sn.id == SHAREDINVALSNAPSHOT_ID &&
     542             :                                msg->sn.relId == relId)
     543             :                            return);
     544             : 
     545             :     /* OK, add the item */
     546      568620 :     msg.sn.id = SHAREDINVALSNAPSHOT_ID;
     547      568620 :     msg.sn.dbId = dbId;
     548      568620 :     msg.sn.relId = relId;
     549             :     /* check AddCatcacheInvalidationMessage() for an explanation */
     550             :     VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
     551             : 
     552      568620 :     AddInvalidationMessage(group, RelCacheMsgs, &msg);
     553             : }
     554             : 
     555             : /*
     556             :  * Append one group of invalidation messages to another, resetting
     557             :  * the source group to empty.
     558             :  */
     559             : static void
     560     1081898 : AppendInvalidationMessages(InvalidationMsgsGroup *dest,
     561             :                            InvalidationMsgsGroup *src)
     562             : {
     563     1081898 :     AppendInvalidationMessageSubGroup(dest, src, CatCacheMsgs);
     564     1081898 :     AppendInvalidationMessageSubGroup(dest, src, RelCacheMsgs);
     565     1081898 : }
     566             : 
     567             : /*
     568             :  * Execute the given function for all the messages in an invalidation group.
     569             :  * The group is not altered.
     570             :  *
     571             :  * catcache entries are processed first, for reasons mentioned above.
     572             :  */
     573             : static void
     574      823224 : ProcessInvalidationMessages(InvalidationMsgsGroup *group,
     575             :                             void (*func) (SharedInvalidationMessage *msg))
     576             : {
     577     6239786 :     ProcessMessageSubGroup(group, CatCacheMsgs, func(msg));
     578     2046788 :     ProcessMessageSubGroup(group, RelCacheMsgs, func(msg));
     579      823218 : }
     580             : 
     581             : /*
     582             :  * As above, but the function is able to process an array of messages
     583             :  * rather than just one at a time.
     584             :  */
     585             : static void
     586      404746 : ProcessInvalidationMessagesMulti(InvalidationMsgsGroup *group,
     587             :                                  void (*func) (const SharedInvalidationMessage *msgs, int n))
     588             : {
     589      404746 :     ProcessMessageSubGroupMulti(group, CatCacheMsgs, func(msgs, n));
     590      404746 :     ProcessMessageSubGroupMulti(group, RelCacheMsgs, func(msgs, n));
     591      404746 : }
     592             : 
     593             : /* ----------------------------------------------------------------
     594             :  *                    private support functions
     595             :  * ----------------------------------------------------------------
     596             :  */
     597             : 
     598             : /*
     599             :  * RegisterCatcacheInvalidation
     600             :  *
     601             :  * Register an invalidation event for a catcache tuple entry.
     602             :  */
     603             : static void
     604     6104956 : RegisterCatcacheInvalidation(int cacheId,
     605             :                              uint32 hashValue,
     606             :                              Oid dbId,
     607             :                              void *context)
     608             : {
     609     6104956 :     InvalidationInfo *info = (InvalidationInfo *) context;
     610             : 
     611     6104956 :     AddCatcacheInvalidationMessage(&info->CurrentCmdInvalidMsgs,
     612             :                                    cacheId, hashValue, dbId);
     613     6104956 : }
     614             : 
     615             : /*
     616             :  * RegisterCatalogInvalidation
     617             :  *
     618             :  * Register an invalidation event for all catcache entries from a catalog.
     619             :  */
     620             : static void
     621         222 : RegisterCatalogInvalidation(InvalidationInfo *info, Oid dbId, Oid catId)
     622             : {
     623         222 :     AddCatalogInvalidationMessage(&info->CurrentCmdInvalidMsgs, dbId, catId);
     624         222 : }
     625             : 
     626             : /*
     627             :  * RegisterRelcacheInvalidation
     628             :  *
     629             :  * As above, but register a relcache invalidation event.
     630             :  */
     631             : static void
     632     2295332 : RegisterRelcacheInvalidation(InvalidationInfo *info, Oid dbId, Oid relId)
     633             : {
     634     2295332 :     AddRelcacheInvalidationMessage(&info->CurrentCmdInvalidMsgs, dbId, relId);
     635             : 
     636             :     /*
     637             :      * Most of the time, relcache invalidation is associated with system
     638             :      * catalog updates, but there are a few cases where it isn't.  Quick hack
     639             :      * to ensure that the next CommandCounterIncrement() will think that we
     640             :      * need to do CommandEndInvalidationMessages().
     641             :      */
     642     2295332 :     (void) GetCurrentCommandId(true);
     643             : 
     644             :     /*
     645             :      * If the relation being invalidated is one of those cached in a relcache
     646             :      * init file, mark that we need to zap that file at commit. For simplicity
     647             :      * invalidations for a specific database always invalidate the shared file
     648             :      * as well.  Also zap when we are invalidating whole relcache.
     649             :      */
     650     2295332 :     if (relId == InvalidOid || RelationIdIsInInitFile(relId))
     651      208676 :         info->RelcacheInitFileInval = true;
     652     2295332 : }
     653             : 
     654             : /*
     655             :  * RegisterRelsyncInvalidation
     656             :  *
     657             :  * As above, but register a relsynccache invalidation event.
     658             :  */
     659             : static void
     660          12 : RegisterRelsyncInvalidation(InvalidationInfo *info, Oid dbId, Oid relId)
     661             : {
     662          12 :     AddRelsyncInvalidationMessage(&info->CurrentCmdInvalidMsgs, dbId, relId);
     663          12 : }
     664             : 
     665             : /*
     666             :  * RegisterSnapshotInvalidation
     667             :  *
     668             :  * Register an invalidation event for MVCC scans against a given catalog.
     669             :  * Only needed for catalogs that don't have catcaches.
     670             :  */
     671             : static void
     672     1127438 : RegisterSnapshotInvalidation(InvalidationInfo *info, Oid dbId, Oid relId)
     673             : {
     674     1127438 :     AddSnapshotInvalidationMessage(&info->CurrentCmdInvalidMsgs, dbId, relId);
     675     1127438 : }
     676             : 
     677             : /*
     678             :  * PrepareInvalidationState
     679             :  *      Initialize inval data for the current (sub)transaction.
     680             :  */
     681             : static InvalidationInfo *
     682     4400762 : PrepareInvalidationState(void)
     683             : {
     684             :     TransInvalidationInfo *myInfo;
     685             : 
     686             :     /* PrepareToInvalidateCacheTuple() needs relcache */
     687     4400762 :     AssertCouldGetRelation();
     688             :     /* Can't queue transactional message while collecting inplace messages. */
     689             :     Assert(inplaceInvalInfo == NULL);
     690             : 
     691     8532876 :     if (transInvalInfo != NULL &&
     692     4132114 :         transInvalInfo->my_level == GetCurrentTransactionNestLevel())
     693     4131962 :         return (InvalidationInfo *) transInvalInfo;
     694             : 
     695             :     myInfo = (TransInvalidationInfo *)
     696      268800 :         MemoryContextAllocZero(TopTransactionContext,
     697             :                                sizeof(TransInvalidationInfo));
     698      268800 :     myInfo->parent = transInvalInfo;
     699      268800 :     myInfo->my_level = GetCurrentTransactionNestLevel();
     700             : 
     701             :     /* Now, do we have a previous stack entry? */
     702      268800 :     if (transInvalInfo != NULL)
     703             :     {
     704             :         /* Yes; this one should be for a deeper nesting level. */
     705             :         Assert(myInfo->my_level > transInvalInfo->my_level);
     706             : 
     707             :         /*
     708             :          * The parent (sub)transaction must not have any current (i.e.,
     709             :          * not-yet-locally-processed) messages.  If it did, we'd have a
     710             :          * semantic problem: the new subtransaction presumably ought not be
     711             :          * able to see those events yet, but since the CommandCounter is
     712             :          * linear, that can't work once the subtransaction advances the
     713             :          * counter.  This is a convenient place to check for that, as well as
     714             :          * being important to keep management of the message arrays simple.
     715             :          */
     716         152 :         if (NumMessagesInGroup(&transInvalInfo->ii.CurrentCmdInvalidMsgs) != 0)
     717           0 :             elog(ERROR, "cannot start a subtransaction when there are unprocessed inval messages");
     718             : 
     719             :         /*
     720             :          * MemoryContextAllocZero set firstmsg = nextmsg = 0 in each group,
     721             :          * which is fine for the first (sub)transaction, but otherwise we need
     722             :          * to update them to follow whatever is already in the arrays.
     723             :          */
     724         152 :         SetGroupToFollow(&myInfo->PriorCmdInvalidMsgs,
     725             :                          &transInvalInfo->ii.CurrentCmdInvalidMsgs);
     726         152 :         SetGroupToFollow(&myInfo->ii.CurrentCmdInvalidMsgs,
     727             :                          &myInfo->PriorCmdInvalidMsgs);
     728             :     }
     729             :     else
     730             :     {
     731             :         /*
     732             :          * Here, we need only clear any array pointers left over from a prior
     733             :          * transaction.
     734             :          */
     735      268648 :         InvalMessageArrays[CatCacheMsgs].msgs = NULL;
     736      268648 :         InvalMessageArrays[CatCacheMsgs].maxmsgs = 0;
     737      268648 :         InvalMessageArrays[RelCacheMsgs].msgs = NULL;
     738      268648 :         InvalMessageArrays[RelCacheMsgs].maxmsgs = 0;
     739             :     }
     740             : 
     741      268800 :     transInvalInfo = myInfo;
     742      268800 :     return (InvalidationInfo *) myInfo;
     743             : }
     744             : 
     745             : /*
     746             :  * PrepareInplaceInvalidationState
     747             :  *      Initialize inval data for an inplace update.
     748             :  *
     749             :  * See previous function for more background.
     750             :  */
     751             : static InvalidationInfo *
     752      314244 : PrepareInplaceInvalidationState(void)
     753             : {
     754             :     InvalidationInfo *myInfo;
     755             : 
     756      314244 :     AssertCouldGetRelation();
     757             :     /* limit of one inplace update under assembly */
     758             :     Assert(inplaceInvalInfo == NULL);
     759             : 
     760             :     /* gone after WAL insertion CritSection ends, so use current context */
     761      314244 :     myInfo = palloc0_object(InvalidationInfo);
     762             : 
     763             :     /* Stash our messages past end of the transactional messages, if any. */
     764      314244 :     if (transInvalInfo != NULL)
     765      112178 :         SetGroupToFollow(&myInfo->CurrentCmdInvalidMsgs,
     766             :                          &transInvalInfo->ii.CurrentCmdInvalidMsgs);
     767             :     else
     768             :     {
     769      202066 :         InvalMessageArrays[CatCacheMsgs].msgs = NULL;
     770      202066 :         InvalMessageArrays[CatCacheMsgs].maxmsgs = 0;
     771      202066 :         InvalMessageArrays[RelCacheMsgs].msgs = NULL;
     772      202066 :         InvalMessageArrays[RelCacheMsgs].maxmsgs = 0;
     773             :     }
     774             : 
     775      314244 :     inplaceInvalInfo = myInfo;
     776      314244 :     return myInfo;
     777             : }
     778             : 
     779             : /* ----------------------------------------------------------------
     780             :  *                    public functions
     781             :  * ----------------------------------------------------------------
     782             :  */
     783             : 
     784             : void
     785        4426 : InvalidateSystemCachesExtended(bool debug_discard)
     786             : {
     787             :     int         i;
     788             : 
     789        4426 :     InvalidateCatalogSnapshot();
     790        4426 :     ResetCatalogCachesExt(debug_discard);
     791        4426 :     RelationCacheInvalidate(debug_discard); /* gets smgr and relmap too */
     792             : 
     793       76710 :     for (i = 0; i < syscache_callback_count; i++)
     794             :     {
     795       72284 :         struct SYSCACHECALLBACK *ccitem = syscache_callback_list + i;
     796             : 
     797       72284 :         ccitem->function(ccitem->arg, ccitem->id, 0);
     798             :     }
     799             : 
     800       10162 :     for (i = 0; i < relcache_callback_count; i++)
     801             :     {
     802        5736 :         struct RELCACHECALLBACK *ccitem = relcache_callback_list + i;
     803             : 
     804        5736 :         ccitem->function(ccitem->arg, InvalidOid);
     805             :     }
     806             : 
     807        4466 :     for (i = 0; i < relsync_callback_count; i++)
     808             :     {
     809          40 :         struct RELSYNCCALLBACK *ccitem = relsync_callback_list + i;
     810             : 
     811          40 :         ccitem->function(ccitem->arg, InvalidOid);
     812             :     }
     813        4426 : }
     814             : 
     815             : /*
     816             :  * LocalExecuteInvalidationMessage
     817             :  *
     818             :  * Process a single invalidation message (which could be of any type).
     819             :  * Only the local caches are flushed; this does not transmit the message
     820             :  * to other backends.
     821             :  */
     822             : void
     823    41047396 : LocalExecuteInvalidationMessage(SharedInvalidationMessage *msg)
     824             : {
     825    41047396 :     if (msg->id >= 0)
     826             :     {
     827    32708398 :         if (msg->cc.dbId == MyDatabaseId || msg->cc.dbId == InvalidOid)
     828             :         {
     829    24066122 :             InvalidateCatalogSnapshot();
     830             : 
     831    24066122 :             SysCacheInvalidate(msg->cc.id, msg->cc.hashValue);
     832             : 
     833    24066122 :             CallSyscacheCallbacks(msg->cc.id, msg->cc.hashValue);
     834             :         }
     835             :     }
     836     8338998 :     else if (msg->id == SHAREDINVALCATALOG_ID)
     837             :     {
     838         916 :         if (msg->cat.dbId == MyDatabaseId || msg->cat.dbId == InvalidOid)
     839             :         {
     840         768 :             InvalidateCatalogSnapshot();
     841             : 
     842         768 :             CatalogCacheFlushCatalog(msg->cat.catId);
     843             : 
     844             :             /* CatalogCacheFlushCatalog calls CallSyscacheCallbacks as needed */
     845             :         }
     846             :     }
     847     8338082 :     else if (msg->id == SHAREDINVALRELCACHE_ID)
     848             :     {
     849     4549484 :         if (msg->rc.dbId == MyDatabaseId || msg->rc.dbId == InvalidOid)
     850             :         {
     851             :             int         i;
     852             : 
     853     3343294 :             if (msg->rc.relId == InvalidOid)
     854         602 :                 RelationCacheInvalidate(false);
     855             :             else
     856     3342692 :                 RelationCacheInvalidateEntry(msg->rc.relId);
     857             : 
     858     9124592 :             for (i = 0; i < relcache_callback_count; i++)
     859             :             {
     860     5781304 :                 struct RELCACHECALLBACK *ccitem = relcache_callback_list + i;
     861             : 
     862     5781304 :                 ccitem->function(ccitem->arg, msg->rc.relId);
     863             :             }
     864             :         }
     865             :     }
     866     3788598 :     else if (msg->id == SHAREDINVALSMGR_ID)
     867             :     {
     868             :         /*
     869             :          * We could have smgr entries for relations of other databases, so no
     870             :          * short-circuit test is possible here.
     871             :          */
     872             :         RelFileLocatorBackend rlocator;
     873             : 
     874      499566 :         rlocator.locator = msg->sm.rlocator;
     875      499566 :         rlocator.backend = (msg->sm.backend_hi << 16) | (int) msg->sm.backend_lo;
     876      499566 :         smgrreleaserellocator(rlocator);
     877             :     }
     878     3289032 :     else if (msg->id == SHAREDINVALRELMAP_ID)
     879             :     {
     880             :         /* We only care about our own database and shared catalogs */
     881         686 :         if (msg->rm.dbId == InvalidOid)
     882         274 :             RelationMapInvalidate(true);
     883         412 :         else if (msg->rm.dbId == MyDatabaseId)
     884         278 :             RelationMapInvalidate(false);
     885             :     }
     886     3288346 :     else if (msg->id == SHAREDINVALSNAPSHOT_ID)
     887             :     {
     888             :         /* We only care about our own database and shared catalogs */
     889     3288284 :         if (msg->sn.dbId == InvalidOid)
     890       98334 :             InvalidateCatalogSnapshot();
     891     3189950 :         else if (msg->sn.dbId == MyDatabaseId)
     892     2427130 :             InvalidateCatalogSnapshot();
     893             :     }
     894          62 :     else if (msg->id == SHAREDINVALRELSYNC_ID)
     895             :     {
     896             :         /* We only care about our own database */
     897          62 :         if (msg->rs.dbId == MyDatabaseId)
     898          62 :             CallRelSyncCallbacks(msg->rs.relid);
     899             :     }
     900             :     else
     901           0 :         elog(FATAL, "unrecognized SI message ID: %d", msg->id);
     902    41047390 : }
     903             : 
     904             : /*
     905             :  *      InvalidateSystemCaches
     906             :  *
     907             :  *      This blows away all tuples in the system catalog caches and
     908             :  *      all the cached relation descriptors and smgr cache entries.
     909             :  *      Relation descriptors that have positive refcounts are then rebuilt.
     910             :  *
     911             :  *      We call this when we see a shared-inval-queue overflow signal,
     912             :  *      since that tells us we've lost some shared-inval messages and hence
     913             :  *      don't know what needs to be invalidated.
     914             :  */
     915             : void
     916        4426 : InvalidateSystemCaches(void)
     917             : {
     918        4426 :     InvalidateSystemCachesExtended(false);
     919        4426 : }
     920             : 
     921             : /*
     922             :  * AcceptInvalidationMessages
     923             :  *      Read and process invalidation messages from the shared invalidation
     924             :  *      message queue.
     925             :  *
     926             :  * Note:
     927             :  *      This should be called as the first step in processing a transaction.
     928             :  */
     929             : void
     930    39769942 : AcceptInvalidationMessages(void)
     931             : {
     932             : #ifdef USE_ASSERT_CHECKING
     933             :     /* message handlers shall access catalogs only during transactions */
     934             :     if (IsTransactionState())
     935             :         AssertCouldGetRelation();
     936             : #endif
     937             : 
     938    39769942 :     ReceiveSharedInvalidMessages(LocalExecuteInvalidationMessage,
     939             :                                  InvalidateSystemCaches);
     940             : 
     941             :     /*----------
     942             :      * Test code to force cache flushes anytime a flush could happen.
     943             :      *
     944             :      * This helps detect intermittent faults caused by code that reads a cache
     945             :      * entry and then performs an action that could invalidate the entry, but
     946             :      * rarely actually does so.  This can spot issues that would otherwise
     947             :      * only arise with badly timed concurrent DDL, for example.
     948             :      *
     949             :      * The default debug_discard_caches = 0 does no forced cache flushes.
     950             :      *
     951             :      * If used with CLOBBER_FREED_MEMORY,
     952             :      * debug_discard_caches = 1 (formerly known as CLOBBER_CACHE_ALWAYS)
     953             :      * provides a fairly thorough test that the system contains no cache-flush
     954             :      * hazards.  However, it also makes the system unbelievably slow --- the
     955             :      * regression tests take about 100 times longer than normal.
     956             :      *
     957             :      * If you're a glutton for punishment, try
     958             :      * debug_discard_caches = 3 (formerly known as CLOBBER_CACHE_RECURSIVELY).
     959             :      * This slows things by at least a factor of 10000, so I wouldn't suggest
     960             :      * trying to run the entire regression tests that way.  It's useful to try
     961             :      * a few simple tests, to make sure that cache reload isn't subject to
     962             :      * internal cache-flush hazards, but after you've done a few thousand
     963             :      * recursive reloads it's unlikely you'll learn more.
     964             :      *----------
     965             :      */
     966             : #ifdef DISCARD_CACHES_ENABLED
     967             :     {
     968             :         static int  recursion_depth = 0;
     969             : 
     970             :         if (recursion_depth < debug_discard_caches)
     971             :         {
     972             :             recursion_depth++;
     973             :             InvalidateSystemCachesExtended(true);
     974             :             recursion_depth--;
     975             :         }
     976             :     }
     977             : #endif
     978    39769942 : }
     979             : 
     980             : /*
     981             :  * PostPrepare_Inval
     982             :  *      Clean up after successful PREPARE.
     983             :  *
     984             :  * Here, we want to act as though the transaction aborted, so that we will
     985             :  * undo any syscache changes it made, thereby bringing us into sync with the
     986             :  * outside world, which doesn't believe the transaction committed yet.
     987             :  *
     988             :  * If the prepared transaction is later aborted, there is nothing more to
     989             :  * do; if it commits, we will receive the consequent inval messages just
     990             :  * like everyone else.
     991             :  */
     992             : void
     993         662 : PostPrepare_Inval(void)
     994             : {
     995         662 :     AtEOXact_Inval(false);
     996         662 : }
     997             : 
     998             : /*
     999             :  * xactGetCommittedInvalidationMessages() is called by
    1000             :  * RecordTransactionCommit() to collect invalidation messages to add to the
    1001             :  * commit record. This applies only to commit message types, never to
    1002             :  * abort records. Must always run before AtEOXact_Inval(), since that
    1003             :  * removes the data we need to see.
    1004             :  *
    1005             :  * Remember that this runs before we have officially committed, so we
    1006             :  * must not do anything here to change what might occur *if* we should
    1007             :  * fail between here and the actual commit.
    1008             :  *
    1009             :  * see also xact_redo_commit() and xact_desc_commit()
    1010             :  */
    1011             : int
    1012      515884 : xactGetCommittedInvalidationMessages(SharedInvalidationMessage **msgs,
    1013             :                                      bool *RelcacheInitFileInval)
    1014             : {
    1015             :     SharedInvalidationMessage *msgarray;
    1016             :     int         nummsgs;
    1017             :     int         nmsgs;
    1018             : 
    1019             :     /* Quick exit if we haven't done anything with invalidation messages. */
    1020      515884 :     if (transInvalInfo == NULL)
    1021             :     {
    1022      313234 :         *RelcacheInitFileInval = false;
    1023      313234 :         *msgs = NULL;
    1024      313234 :         return 0;
    1025             :     }
    1026             : 
    1027             :     /* Must be at top of stack */
    1028             :     Assert(transInvalInfo->my_level == 1 && transInvalInfo->parent == NULL);
    1029             : 
    1030             :     /*
    1031             :      * Relcache init file invalidation requires processing both before and
    1032             :      * after we send the SI messages.  However, we need not do anything unless
    1033             :      * we committed.
    1034             :      */
    1035      202650 :     *RelcacheInitFileInval = transInvalInfo->ii.RelcacheInitFileInval;
    1036             : 
    1037             :     /*
    1038             :      * Collect all the pending messages into a single contiguous array of
    1039             :      * invalidation messages, to simplify what needs to happen while building
    1040             :      * the commit WAL message.  Maintain the order that they would be
    1041             :      * processed in by AtEOXact_Inval(), to ensure emulated behaviour in redo
    1042             :      * is as similar as possible to original.  We want the same bugs, if any,
    1043             :      * not new ones.
    1044             :      */
    1045      202650 :     nummsgs = NumMessagesInGroup(&transInvalInfo->PriorCmdInvalidMsgs) +
    1046      202650 :         NumMessagesInGroup(&transInvalInfo->ii.CurrentCmdInvalidMsgs);
    1047             : 
    1048      202650 :     *msgs = msgarray = (SharedInvalidationMessage *)
    1049      202650 :         MemoryContextAlloc(CurTransactionContext,
    1050             :                            nummsgs * sizeof(SharedInvalidationMessage));
    1051             : 
    1052      202650 :     nmsgs = 0;
    1053      202650 :     ProcessMessageSubGroupMulti(&transInvalInfo->PriorCmdInvalidMsgs,
    1054             :                                 CatCacheMsgs,
    1055             :                                 (memcpy(msgarray + nmsgs,
    1056             :                                         msgs,
    1057             :                                         n * sizeof(SharedInvalidationMessage)),
    1058             :                                  nmsgs += n));
    1059      202650 :     ProcessMessageSubGroupMulti(&transInvalInfo->ii.CurrentCmdInvalidMsgs,
    1060             :                                 CatCacheMsgs,
    1061             :                                 (memcpy(msgarray + nmsgs,
    1062             :                                         msgs,
    1063             :                                         n * sizeof(SharedInvalidationMessage)),
    1064             :                                  nmsgs += n));
    1065      202650 :     ProcessMessageSubGroupMulti(&transInvalInfo->PriorCmdInvalidMsgs,
    1066             :                                 RelCacheMsgs,
    1067             :                                 (memcpy(msgarray + nmsgs,
    1068             :                                         msgs,
    1069             :                                         n * sizeof(SharedInvalidationMessage)),
    1070             :                                  nmsgs += n));
    1071      202650 :     ProcessMessageSubGroupMulti(&transInvalInfo->ii.CurrentCmdInvalidMsgs,
    1072             :                                 RelCacheMsgs,
    1073             :                                 (memcpy(msgarray + nmsgs,
    1074             :                                         msgs,
    1075             :                                         n * sizeof(SharedInvalidationMessage)),
    1076             :                                  nmsgs += n));
    1077             :     Assert(nmsgs == nummsgs);
    1078             : 
    1079      202650 :     return nmsgs;
    1080             : }
    1081             : 
    1082             : /*
    1083             :  * inplaceGetInvalidationMessages() is called by the inplace update to collect
    1084             :  * invalidation messages to add to its WAL record.  Like the previous
    1085             :  * function, we might still fail.
    1086             :  */
    1087             : int
    1088      116826 : inplaceGetInvalidationMessages(SharedInvalidationMessage **msgs,
    1089             :                                bool *RelcacheInitFileInval)
    1090             : {
    1091             :     SharedInvalidationMessage *msgarray;
    1092             :     int         nummsgs;
    1093             :     int         nmsgs;
    1094             : 
    1095             :     /* Quick exit if we haven't done anything with invalidation messages. */
    1096      116826 :     if (inplaceInvalInfo == NULL)
    1097             :     {
    1098       29886 :         *RelcacheInitFileInval = false;
    1099       29886 :         *msgs = NULL;
    1100       29886 :         return 0;
    1101             :     }
    1102             : 
    1103       86940 :     *RelcacheInitFileInval = inplaceInvalInfo->RelcacheInitFileInval;
    1104       86940 :     nummsgs = NumMessagesInGroup(&inplaceInvalInfo->CurrentCmdInvalidMsgs);
    1105       86940 :     *msgs = msgarray = (SharedInvalidationMessage *)
    1106       86940 :         palloc(nummsgs * sizeof(SharedInvalidationMessage));
    1107             : 
    1108       86940 :     nmsgs = 0;
    1109       86940 :     ProcessMessageSubGroupMulti(&inplaceInvalInfo->CurrentCmdInvalidMsgs,
    1110             :                                 CatCacheMsgs,
    1111             :                                 (memcpy(msgarray + nmsgs,
    1112             :                                         msgs,
    1113             :                                         n * sizeof(SharedInvalidationMessage)),
    1114             :                                  nmsgs += n));
    1115       86940 :     ProcessMessageSubGroupMulti(&inplaceInvalInfo->CurrentCmdInvalidMsgs,
    1116             :                                 RelCacheMsgs,
    1117             :                                 (memcpy(msgarray + nmsgs,
    1118             :                                         msgs,
    1119             :                                         n * sizeof(SharedInvalidationMessage)),
    1120             :                                  nmsgs += n));
    1121             :     Assert(nmsgs == nummsgs);
    1122             : 
    1123       86940 :     return nmsgs;
    1124             : }
    1125             : 
    1126             : /*
    1127             :  * ProcessCommittedInvalidationMessages is executed by xact_redo_commit() or
    1128             :  * standby_redo() to process invalidation messages. Currently that happens
    1129             :  * only at end-of-xact.
    1130             :  *
    1131             :  * Relcache init file invalidation requires processing both
    1132             :  * before and after we send the SI messages. See AtEOXact_Inval()
    1133             :  */
    1134             : void
    1135       57500 : ProcessCommittedInvalidationMessages(SharedInvalidationMessage *msgs,
    1136             :                                      int nmsgs, bool RelcacheInitFileInval,
    1137             :                                      Oid dbid, Oid tsid)
    1138             : {
    1139       57500 :     if (nmsgs <= 0)
    1140       10406 :         return;
    1141             : 
    1142       47094 :     elog(DEBUG4, "replaying commit with %d messages%s", nmsgs,
    1143             :          (RelcacheInitFileInval ? " and relcache file invalidation" : ""));
    1144             : 
    1145       47094 :     if (RelcacheInitFileInval)
    1146             :     {
    1147         898 :         elog(DEBUG4, "removing relcache init files for database %u", dbid);
    1148             : 
    1149             :         /*
    1150             :          * RelationCacheInitFilePreInvalidate, when the invalidation message
    1151             :          * is for a specific database, requires DatabasePath to be set, but we
    1152             :          * should not use SetDatabasePath during recovery, since it is
    1153             :          * intended to be used only once by normal backends.  Hence, a quick
    1154             :          * hack: set DatabasePath directly then unset after use.
    1155             :          */
    1156         898 :         if (OidIsValid(dbid))
    1157         898 :             DatabasePath = GetDatabasePath(dbid, tsid);
    1158             : 
    1159         898 :         RelationCacheInitFilePreInvalidate();
    1160             : 
    1161         898 :         if (OidIsValid(dbid))
    1162             :         {
    1163         898 :             pfree(DatabasePath);
    1164         898 :             DatabasePath = NULL;
    1165             :         }
    1166             :     }
    1167             : 
    1168       47094 :     SendSharedInvalidMessages(msgs, nmsgs);
    1169             : 
    1170       47094 :     if (RelcacheInitFileInval)
    1171         898 :         RelationCacheInitFilePostInvalidate();
    1172             : }
    1173             : 
    1174             : /*
    1175             :  * AtEOXact_Inval
    1176             :  *      Process queued-up invalidation messages at end of main transaction.
    1177             :  *
    1178             :  * If isCommit, we must send out the messages in our PriorCmdInvalidMsgs list
    1179             :  * to the shared invalidation message queue.  Note that these will be read
    1180             :  * not only by other backends, but also by our own backend at the next
    1181             :  * transaction start (via AcceptInvalidationMessages).  This means that
    1182             :  * we can skip immediate local processing of anything that's still in
    1183             :  * CurrentCmdInvalidMsgs, and just send that list out too.
    1184             :  *
    1185             :  * If not isCommit, we are aborting, and must locally process the messages
    1186             :  * in PriorCmdInvalidMsgs.  No messages need be sent to other backends,
    1187             :  * since they'll not have seen our changed tuples anyway.  We can forget
    1188             :  * about CurrentCmdInvalidMsgs too, since those changes haven't touched
    1189             :  * the caches yet.
    1190             :  *
    1191             :  * In any case, reset our state to empty.  We need not physically
    1192             :  * free memory here, since TopTransactionContext is about to be emptied
    1193             :  * anyway.
    1194             :  *
    1195             :  * Note:
    1196             :  *      This should be called as the last step in processing a transaction.
    1197             :  */
    1198             : void
    1199     1002512 : AtEOXact_Inval(bool isCommit)
    1200             : {
    1201     1002512 :     inplaceInvalInfo = NULL;
    1202             : 
    1203             :     /* Quick exit if no transactional messages */
    1204     1002512 :     if (transInvalInfo == NULL)
    1205      733928 :         return;
    1206             : 
    1207             :     /* Must be at top of stack */
    1208             :     Assert(transInvalInfo->my_level == 1 && transInvalInfo->parent == NULL);
    1209             : 
    1210      268584 :     INJECTION_POINT("transaction-end-process-inval", NULL);
    1211             : 
    1212      268584 :     if (isCommit)
    1213             :     {
    1214             :         /*
    1215             :          * Relcache init file invalidation requires processing both before and
    1216             :          * after we send the SI messages.  However, we need not do anything
    1217             :          * unless we committed.
    1218             :          */
    1219      263716 :         if (transInvalInfo->ii.RelcacheInitFileInval)
    1220       40568 :             RelationCacheInitFilePreInvalidate();
    1221             : 
    1222      263716 :         AppendInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs,
    1223      263716 :                                    &transInvalInfo->ii.CurrentCmdInvalidMsgs);
    1224             : 
    1225      263716 :         ProcessInvalidationMessagesMulti(&transInvalInfo->PriorCmdInvalidMsgs,
    1226             :                                          SendSharedInvalidMessages);
    1227             : 
    1228      263716 :         if (transInvalInfo->ii.RelcacheInitFileInval)
    1229       40568 :             RelationCacheInitFilePostInvalidate();
    1230             :     }
    1231             :     else
    1232             :     {
    1233        4868 :         ProcessInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs,
    1234             :                                     LocalExecuteInvalidationMessage);
    1235             :     }
    1236             : 
    1237             :     /* Need not free anything explicitly */
    1238      268584 :     transInvalInfo = NULL;
    1239             : }
    1240             : 
    1241             : /*
    1242             :  * PreInplace_Inval
    1243             :  *      Process queued-up invalidation before inplace update critical section.
    1244             :  *
    1245             :  * Tasks belong here if they are safe even if the inplace update does not
    1246             :  * complete.  Currently, this just unlinks a cache file, which can fail.  The
    1247             :  * sum of this and AtInplace_Inval() mirrors AtEOXact_Inval(isCommit=true).
    1248             :  */
    1249             : void
    1250      170916 : PreInplace_Inval(void)
    1251             : {
    1252             :     Assert(CritSectionCount == 0);
    1253             : 
    1254      170916 :     if (inplaceInvalInfo && inplaceInvalInfo->RelcacheInitFileInval)
    1255       38046 :         RelationCacheInitFilePreInvalidate();
    1256      170916 : }
    1257             : 
    1258             : /*
    1259             :  * AtInplace_Inval
    1260             :  *      Process queued-up invalidations after inplace update buffer mutation.
    1261             :  */
    1262             : void
    1263      170916 : AtInplace_Inval(void)
    1264             : {
    1265             :     Assert(CritSectionCount > 0);
    1266             : 
    1267      170916 :     if (inplaceInvalInfo == NULL)
    1268       29886 :         return;
    1269             : 
    1270      141030 :     ProcessInvalidationMessagesMulti(&inplaceInvalInfo->CurrentCmdInvalidMsgs,
    1271             :                                      SendSharedInvalidMessages);
    1272             : 
    1273      141030 :     if (inplaceInvalInfo->RelcacheInitFileInval)
    1274       38046 :         RelationCacheInitFilePostInvalidate();
    1275             : 
    1276      141030 :     inplaceInvalInfo = NULL;
    1277             : }
    1278             : 
    1279             : /*
    1280             :  * ForgetInplace_Inval
    1281             :  *      Alternative to PreInplace_Inval()+AtInplace_Inval(): discard queued-up
    1282             :  *      invalidations.  This lets inplace update enumerate invalidations
    1283             :  *      optimistically, before locking the buffer.
    1284             :  */
    1285             : void
    1286      179334 : ForgetInplace_Inval(void)
    1287             : {
    1288      179334 :     inplaceInvalInfo = NULL;
    1289      179334 : }
    1290             : 
    1291             : /*
    1292             :  * AtEOSubXact_Inval
    1293             :  *      Process queued-up invalidation messages at end of subtransaction.
    1294             :  *
    1295             :  * If isCommit, process CurrentCmdInvalidMsgs if any (there probably aren't),
    1296             :  * and then attach both CurrentCmdInvalidMsgs and PriorCmdInvalidMsgs to the
    1297             :  * parent's PriorCmdInvalidMsgs list.
    1298             :  *
    1299             :  * If not isCommit, we are aborting, and must locally process the messages
    1300             :  * in PriorCmdInvalidMsgs.  No messages need be sent to other backends.
    1301             :  * We can forget about CurrentCmdInvalidMsgs too, since those changes haven't
    1302             :  * touched the caches yet.
    1303             :  *
    1304             :  * In any case, pop the transaction stack.  We need not physically free memory
    1305             :  * here, since CurTransactionContext is about to be emptied anyway
    1306             :  * (if aborting).  Beware of the possibility of aborting the same nesting
    1307             :  * level twice, though.
    1308             :  */
    1309             : void
    1310       20164 : AtEOSubXact_Inval(bool isCommit)
    1311             : {
    1312             :     int         my_level;
    1313             :     TransInvalidationInfo *myInfo;
    1314             : 
    1315             :     /*
    1316             :      * Successful inplace update must clear this, but we clear it on abort.
    1317             :      * Inplace updates allocate this in CurrentMemoryContext, which has
    1318             :      * lifespan <= subtransaction lifespan.  Hence, don't free it explicitly.
    1319             :      */
    1320       20164 :     if (isCommit)
    1321             :         Assert(inplaceInvalInfo == NULL);
    1322             :     else
    1323        9412 :         inplaceInvalInfo = NULL;
    1324             : 
    1325             :     /* Quick exit if no transactional messages. */
    1326       20164 :     myInfo = transInvalInfo;
    1327       20164 :     if (myInfo == NULL)
    1328       18482 :         return;
    1329             : 
    1330             :     /* Also bail out quickly if messages are not for this level. */
    1331        1682 :     my_level = GetCurrentTransactionNestLevel();
    1332        1682 :     if (myInfo->my_level != my_level)
    1333             :     {
    1334             :         Assert(myInfo->my_level < my_level);
    1335        1384 :         return;
    1336             :     }
    1337             : 
    1338         298 :     if (isCommit)
    1339             :     {
    1340             :         /* If CurrentCmdInvalidMsgs still has anything, fix it */
    1341         106 :         CommandEndInvalidationMessages();
    1342             : 
    1343             :         /*
    1344             :          * We create invalidation stack entries lazily, so the parent might
    1345             :          * not have one.  Instead of creating one, moving all the data over,
    1346             :          * and then freeing our own, we can just adjust the level of our own
    1347             :          * entry.
    1348             :          */
    1349         106 :         if (myInfo->parent == NULL || myInfo->parent->my_level < my_level - 1)
    1350             :         {
    1351          82 :             myInfo->my_level--;
    1352          82 :             return;
    1353             :         }
    1354             : 
    1355             :         /*
    1356             :          * Pass up my inval messages to parent.  Notice that we stick them in
    1357             :          * PriorCmdInvalidMsgs, not CurrentCmdInvalidMsgs, since they've
    1358             :          * already been locally processed.  (This would trigger the Assert in
    1359             :          * AppendInvalidationMessageSubGroup if the parent's
    1360             :          * CurrentCmdInvalidMsgs isn't empty; but we already checked that in
    1361             :          * PrepareInvalidationState.)
    1362             :          */
    1363          24 :         AppendInvalidationMessages(&myInfo->parent->PriorCmdInvalidMsgs,
    1364             :                                    &myInfo->PriorCmdInvalidMsgs);
    1365             : 
    1366             :         /* Must readjust parent's CurrentCmdInvalidMsgs indexes now */
    1367          24 :         SetGroupToFollow(&myInfo->parent->ii.CurrentCmdInvalidMsgs,
    1368             :                          &myInfo->parent->PriorCmdInvalidMsgs);
    1369             : 
    1370             :         /* Pending relcache inval becomes parent's problem too */
    1371          24 :         if (myInfo->ii.RelcacheInitFileInval)
    1372           0 :             myInfo->parent->ii.RelcacheInitFileInval = true;
    1373             : 
    1374             :         /* Pop the transaction state stack */
    1375          24 :         transInvalInfo = myInfo->parent;
    1376             : 
    1377             :         /* Need not free anything else explicitly */
    1378          24 :         pfree(myInfo);
    1379             :     }
    1380             :     else
    1381             :     {
    1382         192 :         ProcessInvalidationMessages(&myInfo->PriorCmdInvalidMsgs,
    1383             :                                     LocalExecuteInvalidationMessage);
    1384             : 
    1385             :         /* Pop the transaction state stack */
    1386         192 :         transInvalInfo = myInfo->parent;
    1387             : 
    1388             :         /* Need not free anything else explicitly */
    1389         192 :         pfree(myInfo);
    1390             :     }
    1391             : }
    1392             : 
    1393             : /*
    1394             :  * CommandEndInvalidationMessages
    1395             :  *      Process queued-up invalidation messages at end of one command
    1396             :  *      in a transaction.
    1397             :  *
    1398             :  * Here, we send no messages to the shared queue, since we don't know yet if
    1399             :  * we will commit.  We do need to locally process the CurrentCmdInvalidMsgs
    1400             :  * list, so as to flush our caches of any entries we have outdated in the
    1401             :  * current command.  We then move the current-cmd list over to become part
    1402             :  * of the prior-cmds list.
    1403             :  *
    1404             :  * Note:
    1405             :  *      This should be called during CommandCounterIncrement(),
    1406             :  *      after we have advanced the command ID.
    1407             :  */
    1408             : void
    1409     1198166 : CommandEndInvalidationMessages(void)
    1410             : {
    1411             :     /*
    1412             :      * You might think this shouldn't be called outside any transaction, but
    1413             :      * bootstrap does it, and also ABORT issued when not in a transaction. So
    1414             :      * just quietly return if no state to work on.
    1415             :      */
    1416     1198166 :     if (transInvalInfo == NULL)
    1417      380002 :         return;
    1418             : 
    1419      818164 :     ProcessInvalidationMessages(&transInvalInfo->ii.CurrentCmdInvalidMsgs,
    1420             :                                 LocalExecuteInvalidationMessage);
    1421             : 
    1422             :     /* WAL Log per-command invalidation messages for logical decoding */
    1423      818158 :     if (XLogLogicalInfoActive())
    1424        9126 :         LogLogicalInvalidations();
    1425             : 
    1426      818158 :     AppendInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs,
    1427      818158 :                                &transInvalInfo->ii.CurrentCmdInvalidMsgs);
    1428             : }
    1429             : 
    1430             : 
    1431             : /*
    1432             :  * CacheInvalidateHeapTupleCommon
    1433             :  *      Common logic for end-of-command and inplace variants.
    1434             :  */
    1435             : static void
    1436    22937242 : CacheInvalidateHeapTupleCommon(Relation relation,
    1437             :                                HeapTuple tuple,
    1438             :                                HeapTuple newtuple,
    1439             :                                InvalidationInfo *(*prepare_callback) (void))
    1440             : {
    1441             :     InvalidationInfo *info;
    1442             :     Oid         tupleRelId;
    1443             :     Oid         databaseId;
    1444             :     Oid         relationId;
    1445             : 
    1446             :     /* PrepareToInvalidateCacheTuple() needs relcache */
    1447    22937242 :     AssertCouldGetRelation();
    1448             : 
    1449             :     /* Do nothing during bootstrap */
    1450    22937242 :     if (IsBootstrapProcessingMode())
    1451     1341912 :         return;
    1452             : 
    1453             :     /*
    1454             :      * We only need to worry about invalidation for tuples that are in system
    1455             :      * catalogs; user-relation tuples are never in catcaches and can't affect
    1456             :      * the relcache either.
    1457             :      */
    1458    21595330 :     if (!IsCatalogRelation(relation))
    1459    17082828 :         return;
    1460             : 
    1461             :     /*
    1462             :      * IsCatalogRelation() will return true for TOAST tables of system
    1463             :      * catalogs, but we don't care about those, either.
    1464             :      */
    1465     4512502 :     if (IsToastRelation(relation))
    1466       36208 :         return;
    1467             : 
    1468             :     /* Allocate any required resources. */
    1469     4476294 :     info = prepare_callback();
    1470             : 
    1471             :     /*
    1472             :      * First let the catcache do its thing
    1473             :      */
    1474     4476294 :     tupleRelId = RelationGetRelid(relation);
    1475     4476294 :     if (RelationInvalidatesSnapshotsOnly(tupleRelId))
    1476             :     {
    1477     1127438 :         databaseId = IsSharedRelation(tupleRelId) ? InvalidOid : MyDatabaseId;
    1478     1127438 :         RegisterSnapshotInvalidation(info, databaseId, tupleRelId);
    1479             :     }
    1480             :     else
    1481     3348856 :         PrepareToInvalidateCacheTuple(relation, tuple, newtuple,
    1482             :                                       RegisterCatcacheInvalidation,
    1483             :                                       info);
    1484             : 
    1485             :     /*
    1486             :      * Now, is this tuple one of the primary definers of a relcache entry? See
    1487             :      * comments in file header for deeper explanation.
    1488             :      *
    1489             :      * Note we ignore newtuple here; we assume an update cannot move a tuple
    1490             :      * from being part of one relcache entry to being part of another.
    1491             :      */
    1492     4476294 :     if (tupleRelId == RelationRelationId)
    1493             :     {
    1494      781688 :         Form_pg_class classtup = (Form_pg_class) GETSTRUCT(tuple);
    1495             : 
    1496      781688 :         relationId = classtup->oid;
    1497      781688 :         if (classtup->relisshared)
    1498       52526 :             databaseId = InvalidOid;
    1499             :         else
    1500      729162 :             databaseId = MyDatabaseId;
    1501             :     }
    1502     3694606 :     else if (tupleRelId == AttributeRelationId)
    1503             :     {
    1504     1198136 :         Form_pg_attribute atttup = (Form_pg_attribute) GETSTRUCT(tuple);
    1505             : 
    1506     1198136 :         relationId = atttup->attrelid;
    1507             : 
    1508             :         /*
    1509             :          * KLUGE ALERT: we always send the relcache event with MyDatabaseId,
    1510             :          * even if the rel in question is shared (which we can't easily tell).
    1511             :          * This essentially means that only backends in this same database
    1512             :          * will react to the relcache flush request.  This is in fact
    1513             :          * appropriate, since only those backends could see our pg_attribute
    1514             :          * change anyway.  It looks a bit ugly though.  (In practice, shared
    1515             :          * relations can't have schema changes after bootstrap, so we should
    1516             :          * never come here for a shared rel anyway.)
    1517             :          */
    1518     1198136 :         databaseId = MyDatabaseId;
    1519             :     }
    1520     2496470 :     else if (tupleRelId == IndexRelationId)
    1521             :     {
    1522       67948 :         Form_pg_index indextup = (Form_pg_index) GETSTRUCT(tuple);
    1523             : 
    1524             :         /*
    1525             :          * When a pg_index row is updated, we should send out a relcache inval
    1526             :          * for the index relation.  As above, we don't know the shared status
    1527             :          * of the index, but in practice it doesn't matter since indexes of
    1528             :          * shared catalogs can't have such updates.
    1529             :          */
    1530       67948 :         relationId = indextup->indexrelid;
    1531       67948 :         databaseId = MyDatabaseId;
    1532             :     }
    1533     2428522 :     else if (tupleRelId == ConstraintRelationId)
    1534             :     {
    1535       89308 :         Form_pg_constraint constrtup = (Form_pg_constraint) GETSTRUCT(tuple);
    1536             : 
    1537             :         /*
    1538             :          * Foreign keys are part of relcache entries, too, so send out an
    1539             :          * inval for the table that the FK applies to.
    1540             :          */
    1541       89308 :         if (constrtup->contype == CONSTRAINT_FOREIGN &&
    1542        9082 :             OidIsValid(constrtup->conrelid))
    1543             :         {
    1544        9082 :             relationId = constrtup->conrelid;
    1545        9082 :             databaseId = MyDatabaseId;
    1546             :         }
    1547             :         else
    1548       80226 :             return;
    1549             :     }
    1550             :     else
    1551     2339214 :         return;
    1552             : 
    1553             :     /*
    1554             :      * Yes.  We need to register a relcache invalidation event.
    1555             :      */
    1556     2056854 :     RegisterRelcacheInvalidation(info, databaseId, relationId);
    1557             : }
    1558             : 
    1559             : /*
    1560             :  * CacheInvalidateHeapTuple
    1561             :  *      Register the given tuple for invalidation at end of command
    1562             :  *      (ie, current command is creating or outdating this tuple) and end of
    1563             :  *      transaction.  Also, detect whether a relcache invalidation is implied.
    1564             :  *
    1565             :  * For an insert or delete, tuple is the target tuple and newtuple is NULL.
    1566             :  * For an update, we are called just once, with tuple being the old tuple
    1567             :  * version and newtuple the new version.  This allows avoidance of duplicate
    1568             :  * effort during an update.
    1569             :  */
    1570             : void
    1571    22586992 : CacheInvalidateHeapTuple(Relation relation,
    1572             :                          HeapTuple tuple,
    1573             :                          HeapTuple newtuple)
    1574             : {
    1575    22586992 :     CacheInvalidateHeapTupleCommon(relation, tuple, newtuple,
    1576             :                                    PrepareInvalidationState);
    1577    22586992 : }
    1578             : 
    1579             : /*
    1580             :  * CacheInvalidateHeapTupleInplace
    1581             :  *      Register the given tuple for nontransactional invalidation pertaining
    1582             :  *      to an inplace update.  Also, detect whether a relcache invalidation is
    1583             :  *      implied.
    1584             :  *
    1585             :  * Like CacheInvalidateHeapTuple(), but for inplace updates.
    1586             :  *
    1587             :  * Just before and just after the inplace update, the tuple's cache keys must
    1588             :  * match those in key_equivalent_tuple.  Cache keys consist of catcache lookup
    1589             :  * key columns and columns referencing pg_class.oid values,
    1590             :  * e.g. pg_constraint.conrelid, which would trigger relcache inval.
    1591             :  */
    1592             : void
    1593      350250 : CacheInvalidateHeapTupleInplace(Relation relation,
    1594             :                                 HeapTuple key_equivalent_tuple)
    1595             : {
    1596      350250 :     CacheInvalidateHeapTupleCommon(relation, key_equivalent_tuple, NULL,
    1597             :                                    PrepareInplaceInvalidationState);
    1598      350250 : }
    1599             : 
    1600             : /*
    1601             :  * CacheInvalidateCatalog
    1602             :  *      Register invalidation of the whole content of a system catalog.
    1603             :  *
    1604             :  * This is normally used in VACUUM FULL/CLUSTER, where we haven't so much
    1605             :  * changed any tuples as moved them around.  Some uses of catcache entries
    1606             :  * expect their TIDs to be correct, so we have to blow away the entries.
    1607             :  *
    1608             :  * Note: we expect caller to verify that the rel actually is a system
    1609             :  * catalog.  If it isn't, no great harm is done, just a wasted sinval message.
    1610             :  */
    1611             : void
    1612         222 : CacheInvalidateCatalog(Oid catalogId)
    1613             : {
    1614             :     Oid         databaseId;
    1615             : 
    1616         222 :     if (IsSharedRelation(catalogId))
    1617          36 :         databaseId = InvalidOid;
    1618             :     else
    1619         186 :         databaseId = MyDatabaseId;
    1620             : 
    1621         222 :     RegisterCatalogInvalidation(PrepareInvalidationState(),
    1622             :                                 databaseId, catalogId);
    1623         222 : }
    1624             : 
    1625             : /*
    1626             :  * CacheInvalidateRelcache
    1627             :  *      Register invalidation of the specified relation's relcache entry
    1628             :  *      at end of command.
    1629             :  *
    1630             :  * This is used in places that need to force relcache rebuild but aren't
    1631             :  * changing any of the tuples recognized as contributors to the relcache
    1632             :  * entry by CacheInvalidateHeapTuple.  (An example is dropping an index.)
    1633             :  */
    1634             : void
    1635      161314 : CacheInvalidateRelcache(Relation relation)
    1636             : {
    1637             :     Oid         databaseId;
    1638             :     Oid         relationId;
    1639             : 
    1640      161314 :     relationId = RelationGetRelid(relation);
    1641      161314 :     if (relation->rd_rel->relisshared)
    1642        7100 :         databaseId = InvalidOid;
    1643             :     else
    1644      154214 :         databaseId = MyDatabaseId;
    1645             : 
    1646      161314 :     RegisterRelcacheInvalidation(PrepareInvalidationState(),
    1647             :                                  databaseId, relationId);
    1648      161314 : }
    1649             : 
    1650             : /*
    1651             :  * CacheInvalidateRelcacheAll
    1652             :  *      Register invalidation of the whole relcache at the end of command.
    1653             :  *
    1654             :  * This is used by alter publication as changes in publications may affect
    1655             :  * large number of tables.
    1656             :  */
    1657             : void
    1658         196 : CacheInvalidateRelcacheAll(void)
    1659             : {
    1660         196 :     RegisterRelcacheInvalidation(PrepareInvalidationState(),
    1661             :                                  InvalidOid, InvalidOid);
    1662         196 : }
    1663             : 
    1664             : /*
    1665             :  * CacheInvalidateRelcacheByTuple
    1666             :  *      As above, but relation is identified by passing its pg_class tuple.
    1667             :  */
    1668             : void
    1669       76968 : CacheInvalidateRelcacheByTuple(HeapTuple classTuple)
    1670             : {
    1671       76968 :     Form_pg_class classtup = (Form_pg_class) GETSTRUCT(classTuple);
    1672             :     Oid         databaseId;
    1673             :     Oid         relationId;
    1674             : 
    1675       76968 :     relationId = classtup->oid;
    1676       76968 :     if (classtup->relisshared)
    1677        1978 :         databaseId = InvalidOid;
    1678             :     else
    1679       74990 :         databaseId = MyDatabaseId;
    1680       76968 :     RegisterRelcacheInvalidation(PrepareInvalidationState(),
    1681             :                                  databaseId, relationId);
    1682       76968 : }
    1683             : 
    1684             : /*
    1685             :  * CacheInvalidateRelcacheByRelid
    1686             :  *      As above, but relation is identified by passing its OID.
    1687             :  *      This is the least efficient of the three options; use one of
    1688             :  *      the above routines if you have a Relation or pg_class tuple.
    1689             :  */
    1690             : void
    1691       30922 : CacheInvalidateRelcacheByRelid(Oid relid)
    1692             : {
    1693             :     HeapTuple   tup;
    1694             : 
    1695       30922 :     tup = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
    1696       30922 :     if (!HeapTupleIsValid(tup))
    1697           0 :         elog(ERROR, "cache lookup failed for relation %u", relid);
    1698       30922 :     CacheInvalidateRelcacheByTuple(tup);
    1699       30922 :     ReleaseSysCache(tup);
    1700       30922 : }
    1701             : 
    1702             : /*
    1703             :  * CacheInvalidateRelSync
    1704             :  *      Register invalidation of the cache in logical decoding output plugin
    1705             :  *      for a database.
    1706             :  *
    1707             :  * This type of invalidation message is used for the specific purpose of output
    1708             :  * plugins. Processes which do not decode WALs would do nothing even when it
    1709             :  * receives the message.
    1710             :  */
    1711             : void
    1712          12 : CacheInvalidateRelSync(Oid relid)
    1713             : {
    1714          12 :     RegisterRelsyncInvalidation(PrepareInvalidationState(),
    1715             :                                 MyDatabaseId, relid);
    1716          12 : }
    1717             : 
    1718             : /*
    1719             :  * CacheInvalidateRelSyncAll
    1720             :  *      Register invalidation of the whole cache in logical decoding output
    1721             :  *      plugin.
    1722             :  */
    1723             : void
    1724           6 : CacheInvalidateRelSyncAll(void)
    1725             : {
    1726           6 :     CacheInvalidateRelSync(InvalidOid);
    1727           6 : }
    1728             : 
    1729             : /*
    1730             :  * CacheInvalidateSmgr
    1731             :  *      Register invalidation of smgr references to a physical relation.
    1732             :  *
    1733             :  * Sending this type of invalidation msg forces other backends to close open
    1734             :  * smgr entries for the rel.  This should be done to flush dangling open-file
    1735             :  * references when the physical rel is being dropped or truncated.  Because
    1736             :  * these are nontransactional (i.e., not-rollback-able) operations, we just
    1737             :  * send the inval message immediately without any queuing.
    1738             :  *
    1739             :  * Note: in most cases there will have been a relcache flush issued against
    1740             :  * the rel at the logical level.  We need a separate smgr-level flush because
    1741             :  * it is possible for backends to have open smgr entries for rels they don't
    1742             :  * have a relcache entry for, e.g. because the only thing they ever did with
    1743             :  * the rel is write out dirty shared buffers.
    1744             :  *
    1745             :  * Note: because these messages are nontransactional, they won't be captured
    1746             :  * in commit/abort WAL entries.  Instead, calls to CacheInvalidateSmgr()
    1747             :  * should happen in low-level smgr.c routines, which are executed while
    1748             :  * replaying WAL as well as when creating it.
    1749             :  *
    1750             :  * Note: In order to avoid bloating SharedInvalidationMessage, we store only
    1751             :  * three bytes of the ProcNumber using what would otherwise be padding space.
    1752             :  * Thus, the maximum possible ProcNumber is 2^23-1.
    1753             :  */
    1754             : void
    1755      103566 : CacheInvalidateSmgr(RelFileLocatorBackend rlocator)
    1756             : {
    1757             :     SharedInvalidationMessage msg;
    1758             : 
    1759             :     /* verify optimization stated above stays valid */
    1760             :     StaticAssertDecl(MAX_BACKENDS_BITS <= 23,
    1761             :                      "MAX_BACKENDS_BITS is too big for inval.c");
    1762             : 
    1763      103566 :     msg.sm.id = SHAREDINVALSMGR_ID;
    1764      103566 :     msg.sm.backend_hi = rlocator.backend >> 16;
    1765      103566 :     msg.sm.backend_lo = rlocator.backend & 0xffff;
    1766      103566 :     msg.sm.rlocator = rlocator.locator;
    1767             :     /* check AddCatcacheInvalidationMessage() for an explanation */
    1768             :     VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
    1769             : 
    1770      103566 :     SendSharedInvalidMessages(&msg, 1);
    1771      103566 : }
    1772             : 
    1773             : /*
    1774             :  * CacheInvalidateRelmap
    1775             :  *      Register invalidation of the relation mapping for a database,
    1776             :  *      or for the shared catalogs if databaseId is zero.
    1777             :  *
    1778             :  * Sending this type of invalidation msg forces other backends to re-read
    1779             :  * the indicated relation mapping file.  It is also necessary to send a
    1780             :  * relcache inval for the specific relations whose mapping has been altered,
    1781             :  * else the relcache won't get updated with the new filenode data.
    1782             :  *
    1783             :  * Note: because these messages are nontransactional, they won't be captured
    1784             :  * in commit/abort WAL entries.  Instead, calls to CacheInvalidateRelmap()
    1785             :  * should happen in low-level relmapper.c routines, which are executed while
    1786             :  * replaying WAL as well as when creating it.
    1787             :  */
    1788             : void
    1789         404 : CacheInvalidateRelmap(Oid databaseId)
    1790             : {
    1791             :     SharedInvalidationMessage msg;
    1792             : 
    1793         404 :     msg.rm.id = SHAREDINVALRELMAP_ID;
    1794         404 :     msg.rm.dbId = databaseId;
    1795             :     /* check AddCatcacheInvalidationMessage() for an explanation */
    1796             :     VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
    1797             : 
    1798         404 :     SendSharedInvalidMessages(&msg, 1);
    1799         404 : }
    1800             : 
    1801             : 
    1802             : /*
    1803             :  * CacheRegisterSyscacheCallback
    1804             :  *      Register the specified function to be called for all future
    1805             :  *      invalidation events in the specified cache.  The cache ID and the
    1806             :  *      hash value of the tuple being invalidated will be passed to the
    1807             :  *      function.
    1808             :  *
    1809             :  * NOTE: Hash value zero will be passed if a cache reset request is received.
    1810             :  * In this case the called routines should flush all cached state.
    1811             :  * Yes, there's a possibility of a false match to zero, but it doesn't seem
    1812             :  * worth troubling over, especially since most of the current callees just
    1813             :  * flush all cached state anyway.
    1814             :  */
    1815             : void
    1816      593536 : CacheRegisterSyscacheCallback(int cacheid,
    1817             :                               SyscacheCallbackFunction func,
    1818             :                               Datum arg)
    1819             : {
    1820      593536 :     if (cacheid < 0 || cacheid >= SysCacheSize)
    1821           0 :         elog(FATAL, "invalid cache ID: %d", cacheid);
    1822      593536 :     if (syscache_callback_count >= MAX_SYSCACHE_CALLBACKS)
    1823           0 :         elog(FATAL, "out of syscache_callback_list slots");
    1824             : 
    1825      593536 :     if (syscache_callback_links[cacheid] == 0)
    1826             :     {
    1827             :         /* first callback for this cache */
    1828      418804 :         syscache_callback_links[cacheid] = syscache_callback_count + 1;
    1829             :     }
    1830             :     else
    1831             :     {
    1832             :         /* add to end of chain, so that older callbacks are called first */
    1833      174732 :         int         i = syscache_callback_links[cacheid] - 1;
    1834             : 
    1835      209108 :         while (syscache_callback_list[i].link > 0)
    1836       34376 :             i = syscache_callback_list[i].link - 1;
    1837      174732 :         syscache_callback_list[i].link = syscache_callback_count + 1;
    1838             :     }
    1839             : 
    1840      593536 :     syscache_callback_list[syscache_callback_count].id = cacheid;
    1841      593536 :     syscache_callback_list[syscache_callback_count].link = 0;
    1842      593536 :     syscache_callback_list[syscache_callback_count].function = func;
    1843      593536 :     syscache_callback_list[syscache_callback_count].arg = arg;
    1844             : 
    1845      593536 :     ++syscache_callback_count;
    1846      593536 : }
    1847             : 
    1848             : /*
    1849             :  * CacheRegisterRelcacheCallback
    1850             :  *      Register the specified function to be called for all future
    1851             :  *      relcache invalidation events.  The OID of the relation being
    1852             :  *      invalidated will be passed to the function.
    1853             :  *
    1854             :  * NOTE: InvalidOid will be passed if a cache reset request is received.
    1855             :  * In this case the called routines should flush all cached state.
    1856             :  */
    1857             : void
    1858       46524 : CacheRegisterRelcacheCallback(RelcacheCallbackFunction func,
    1859             :                               Datum arg)
    1860             : {
    1861       46524 :     if (relcache_callback_count >= MAX_RELCACHE_CALLBACKS)
    1862           0 :         elog(FATAL, "out of relcache_callback_list slots");
    1863             : 
    1864       46524 :     relcache_callback_list[relcache_callback_count].function = func;
    1865       46524 :     relcache_callback_list[relcache_callback_count].arg = arg;
    1866             : 
    1867       46524 :     ++relcache_callback_count;
    1868       46524 : }
    1869             : 
    1870             : /*
    1871             :  * CacheRegisterRelSyncCallback
    1872             :  *      Register the specified function to be called for all future
    1873             :  *      relsynccache invalidation events.
    1874             :  *
    1875             :  * This function is intended to be call from the logical decoding output
    1876             :  * plugins.
    1877             :  */
    1878             : void
    1879         804 : CacheRegisterRelSyncCallback(RelSyncCallbackFunction func,
    1880             :                              Datum arg)
    1881             : {
    1882         804 :     if (relsync_callback_count >= MAX_RELSYNC_CALLBACKS)
    1883           0 :         elog(FATAL, "out of relsync_callback_list slots");
    1884             : 
    1885         804 :     relsync_callback_list[relsync_callback_count].function = func;
    1886         804 :     relsync_callback_list[relsync_callback_count].arg = arg;
    1887             : 
    1888         804 :     ++relsync_callback_count;
    1889         804 : }
    1890             : 
    1891             : /*
    1892             :  * CallSyscacheCallbacks
    1893             :  *
    1894             :  * This is exported so that CatalogCacheFlushCatalog can call it, saving
    1895             :  * this module from knowing which catcache IDs correspond to which catalogs.
    1896             :  */
    1897             : void
    1898    24067174 : CallSyscacheCallbacks(int cacheid, uint32 hashvalue)
    1899             : {
    1900             :     int         i;
    1901             : 
    1902    24067174 :     if (cacheid < 0 || cacheid >= SysCacheSize)
    1903           0 :         elog(ERROR, "invalid cache ID: %d", cacheid);
    1904             : 
    1905    24067174 :     i = syscache_callback_links[cacheid] - 1;
    1906    27639142 :     while (i >= 0)
    1907             :     {
    1908     3571968 :         struct SYSCACHECALLBACK *ccitem = syscache_callback_list + i;
    1909             : 
    1910             :         Assert(ccitem->id == cacheid);
    1911     3571968 :         ccitem->function(ccitem->arg, cacheid, hashvalue);
    1912     3571968 :         i = ccitem->link - 1;
    1913             :     }
    1914    24067174 : }
    1915             : 
    1916             : /*
    1917             :  * CallSyscacheCallbacks
    1918             :  */
    1919             : void
    1920          62 : CallRelSyncCallbacks(Oid relid)
    1921             : {
    1922         104 :     for (int i = 0; i < relsync_callback_count; i++)
    1923             :     {
    1924          42 :         struct RELSYNCCALLBACK *ccitem = relsync_callback_list + i;
    1925             : 
    1926          42 :         ccitem->function(ccitem->arg, relid);
    1927             :     }
    1928          62 : }
    1929             : 
    1930             : /*
    1931             :  * LogLogicalInvalidations
    1932             :  *
    1933             :  * Emit WAL for invalidations caused by the current command.
    1934             :  *
    1935             :  * This is currently only used for logging invalidations at the command end
    1936             :  * or at commit time if any invalidations are pending.
    1937             :  */
    1938             : void
    1939       35984 : LogLogicalInvalidations(void)
    1940             : {
    1941             :     xl_xact_invals xlrec;
    1942             :     InvalidationMsgsGroup *group;
    1943             :     int         nmsgs;
    1944             : 
    1945             :     /* Quick exit if we haven't done anything with invalidation messages. */
    1946       35984 :     if (transInvalInfo == NULL)
    1947       23302 :         return;
    1948             : 
    1949       12682 :     group = &transInvalInfo->ii.CurrentCmdInvalidMsgs;
    1950       12682 :     nmsgs = NumMessagesInGroup(group);
    1951             : 
    1952       12682 :     if (nmsgs > 0)
    1953             :     {
    1954             :         /* prepare record */
    1955       10002 :         memset(&xlrec, 0, MinSizeOfXactInvals);
    1956       10002 :         xlrec.nmsgs = nmsgs;
    1957             : 
    1958             :         /* perform insertion */
    1959       10002 :         XLogBeginInsert();
    1960       10002 :         XLogRegisterData(&xlrec, MinSizeOfXactInvals);
    1961       10002 :         ProcessMessageSubGroupMulti(group, CatCacheMsgs,
    1962             :                                     XLogRegisterData(msgs,
    1963             :                                                      n * sizeof(SharedInvalidationMessage)));
    1964       10002 :         ProcessMessageSubGroupMulti(group, RelCacheMsgs,
    1965             :                                     XLogRegisterData(msgs,
    1966             :                                                      n * sizeof(SharedInvalidationMessage)));
    1967       10002 :         XLogInsert(RM_XACT_ID, XLOG_XACT_INVALIDATIONS);
    1968             :     }
    1969             : }

Generated by: LCOV version 1.16