LCOV - code coverage report
Current view: top level - src/backend/utils/cache - inval.c (source / functions) Hit Total Coverage
Test: PostgreSQL 14devel Lines: 346 353 98.0 %
Date: 2020-11-27 12:05:55 Functions: 36 36 100.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * inval.c
       4             :  *    POSTGRES cache invalidation dispatcher code.
       5             :  *
       6             :  *  This is subtle stuff, so pay attention:
       7             :  *
       8             :  *  When a tuple is updated or deleted, our standard visibility rules
       9             :  *  consider that it is *still valid* so long as we are in the same command,
      10             :  *  ie, until the next CommandCounterIncrement() or transaction commit.
      11             :  *  (See access/heap/heapam_visibility.c, and note that system catalogs are
      12             :  *  generally scanned under the most current snapshot available, rather than
      13             :  *  the transaction snapshot.)  At the command boundary, the old tuple stops
      14             :  *  being valid and the new version, if any, becomes valid.  Therefore,
      15             :  *  we cannot simply flush a tuple from the system caches during heap_update()
      16             :  *  or heap_delete().  The tuple is still good at that point; what's more,
      17             :  *  even if we did flush it, it might be reloaded into the caches by a later
      18             :  *  request in the same command.  So the correct behavior is to keep a list
      19             :  *  of outdated (updated/deleted) tuples and then do the required cache
      20             :  *  flushes at the next command boundary.  We must also keep track of
      21             :  *  inserted tuples so that we can flush "negative" cache entries that match
      22             :  *  the new tuples; again, that mustn't happen until end of command.
      23             :  *
      24             :  *  Once we have finished the command, we still need to remember inserted
      25             :  *  tuples (including new versions of updated tuples), so that we can flush
      26             :  *  them from the caches if we abort the transaction.  Similarly, we'd better
      27             :  *  be able to flush "negative" cache entries that may have been loaded in
      28             :  *  place of deleted tuples, so we still need the deleted ones too.
      29             :  *
      30             :  *  If we successfully complete the transaction, we have to broadcast all
      31             :  *  these invalidation events to other backends (via the SI message queue)
      32             :  *  so that they can flush obsolete entries from their caches.  Note we have
      33             :  *  to record the transaction commit before sending SI messages, otherwise
      34             :  *  the other backends won't see our updated tuples as good.
      35             :  *
      36             :  *  When a subtransaction aborts, we can process and discard any events
      37             :  *  it has queued.  When a subtransaction commits, we just add its events
      38             :  *  to the pending lists of the parent transaction.
      39             :  *
      40             :  *  In short, we need to remember until xact end every insert or delete
      41             :  *  of a tuple that might be in the system caches.  Updates are treated as
      42             :  *  two events, delete + insert, for simplicity.  (If the update doesn't
      43             :  *  change the tuple hash value, catcache.c optimizes this into one event.)
      44             :  *
      45             :  *  We do not need to register EVERY tuple operation in this way, just those
      46             :  *  on tuples in relations that have associated catcaches.  We do, however,
      47             :  *  have to register every operation on every tuple that *could* be in a
      48             :  *  catcache, whether or not it currently is in our cache.  Also, if the
      49             :  *  tuple is in a relation that has multiple catcaches, we need to register
      50             :  *  an invalidation message for each such catcache.  catcache.c's
      51             :  *  PrepareToInvalidateCacheTuple() routine provides the knowledge of which
      52             :  *  catcaches may need invalidation for a given tuple.
      53             :  *
      54             :  *  Also, whenever we see an operation on a pg_class, pg_attribute, or
      55             :  *  pg_index tuple, we register a relcache flush operation for the relation
      56             :  *  described by that tuple (as specified in CacheInvalidateHeapTuple()).
      57             :  *  Likewise for pg_constraint tuples for foreign keys on relations.
      58             :  *
      59             :  *  We keep the relcache flush requests in lists separate from the catcache
      60             :  *  tuple flush requests.  This allows us to issue all the pending catcache
      61             :  *  flushes before we issue relcache flushes, which saves us from loading
      62             :  *  a catcache tuple during relcache load only to flush it again right away.
      63             :  *  Also, we avoid queuing multiple relcache flush requests for the same
      64             :  *  relation, since a relcache flush is relatively expensive to do.
      65             :  *  (XXX is it worth testing likewise for duplicate catcache flush entries?
      66             :  *  Probably not.)
      67             :  *
      68             :  *  If a relcache flush is issued for a system relation that we preload
      69             :  *  from the relcache init file, we must also delete the init file so that
      70             :  *  it will be rebuilt during the next backend restart.  The actual work of
      71             :  *  manipulating the init file is in relcache.c, but we keep track of the
      72             :  *  need for it here.
      73             :  *
      74             :  *  The request lists proper are kept in CurTransactionContext of their
      75             :  *  creating (sub)transaction, since they can be forgotten on abort of that
      76             :  *  transaction but must be kept till top-level commit otherwise.  For
      77             :  *  simplicity we keep the controlling list-of-lists in TopTransactionContext.
      78             :  *
      79             :  *  Currently, inval messages are sent without regard for the possibility
      80             :  *  that the object described by the catalog tuple might be a session-local
      81             :  *  object such as a temporary table.  This is because (1) this code has
      82             :  *  no practical way to tell the difference, and (2) it is not certain that
      83             :  *  other backends don't have catalog cache or even relcache entries for
      84             :  *  such tables, anyway; there is nothing that prevents that.  It might be
      85             :  *  worth trying to avoid sending such inval traffic in the future, if those
      86             :  *  problems can be overcome cheaply.
      87             :  *
      88             :  *  When wal_level=logical, write invalidations into WAL at each command end to
      89             :  *  support the decoding of the in-progress transactions.  See
      90             :  *  CommandEndInvalidationMessages.
      91             :  *
      92             :  * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
      93             :  * Portions Copyright (c) 1994, Regents of the University of California
      94             :  *
      95             :  * IDENTIFICATION
      96             :  *    src/backend/utils/cache/inval.c
      97             :  *
      98             :  *-------------------------------------------------------------------------
      99             :  */
     100             : #include "postgres.h"
     101             : 
     102             : #include <limits.h>
     103             : 
     104             : #include "access/htup_details.h"
     105             : #include "access/xact.h"
     106             : #include "catalog/catalog.h"
     107             : #include "catalog/pg_constraint.h"
     108             : #include "miscadmin.h"
     109             : #include "storage/sinval.h"
     110             : #include "storage/smgr.h"
     111             : #include "utils/catcache.h"
     112             : #include "utils/inval.h"
     113             : #include "utils/memdebug.h"
     114             : #include "utils/memutils.h"
     115             : #include "utils/rel.h"
     116             : #include "utils/relmapper.h"
     117             : #include "utils/snapmgr.h"
     118             : #include "utils/syscache.h"
     119             : 
     120             : 
     121             : /*
     122             :  * To minimize palloc traffic, we keep pending requests in successively-
     123             :  * larger chunks (a slightly more sophisticated version of an expansible
     124             :  * array).  All request types can be stored as SharedInvalidationMessage
     125             :  * records.  The ordering of requests within a list is never significant.
     126             :  */
     127             : typedef struct InvalidationChunk
     128             : {
     129             :     struct InvalidationChunk *next; /* list link */
     130             :     int         nitems;         /* # items currently stored in chunk */
     131             :     int         maxitems;       /* size of allocated array in this chunk */
     132             :     SharedInvalidationMessage msgs[FLEXIBLE_ARRAY_MEMBER];
     133             : } InvalidationChunk;
     134             : 
     135             : typedef struct InvalidationListHeader
     136             : {
     137             :     InvalidationChunk *cclist;  /* list of chunks holding catcache msgs */
     138             :     InvalidationChunk *rclist;  /* list of chunks holding relcache msgs */
     139             : } InvalidationListHeader;
     140             : 
     141             : /*----------------
     142             :  * Invalidation info is divided into two lists:
     143             :  *  1) events so far in current command, not yet reflected to caches.
     144             :  *  2) events in previous commands of current transaction; these have
     145             :  *     been reflected to local caches, and must be either broadcast to
     146             :  *     other backends or rolled back from local cache when we commit
     147             :  *     or abort the transaction.
     148             :  * Actually, we need two such lists for each level of nested transaction,
     149             :  * so that we can discard events from an aborted subtransaction.  When
     150             :  * a subtransaction commits, we append its lists to the parent's lists.
     151             :  *
     152             :  * The relcache-file-invalidated flag can just be a simple boolean,
     153             :  * since we only act on it at transaction commit; we don't care which
     154             :  * command of the transaction set it.
     155             :  *----------------
     156             :  */
     157             : 
     158             : typedef struct TransInvalidationInfo
     159             : {
     160             :     /* Back link to parent transaction's info */
     161             :     struct TransInvalidationInfo *parent;
     162             : 
     163             :     /* Subtransaction nesting depth */
     164             :     int         my_level;
     165             : 
     166             :     /* head of current-command event list */
     167             :     InvalidationListHeader CurrentCmdInvalidMsgs;
     168             : 
     169             :     /* head of previous-commands event list */
     170             :     InvalidationListHeader PriorCmdInvalidMsgs;
     171             : 
     172             :     /* init file must be invalidated? */
     173             :     bool        RelcacheInitFileInval;
     174             : } TransInvalidationInfo;
     175             : 
     176             : static TransInvalidationInfo *transInvalInfo = NULL;
     177             : 
     178             : static SharedInvalidationMessage *SharedInvalidMessagesArray;
     179             : static int  numSharedInvalidMessagesArray;
     180             : static int  maxSharedInvalidMessagesArray;
     181             : 
     182             : 
     183             : /*
     184             :  * Dynamically-registered callback functions.  Current implementation
     185             :  * assumes there won't be enough of these to justify a dynamically resizable
     186             :  * array; it'd be easy to improve that if needed.
     187             :  *
     188             :  * To avoid searching in CallSyscacheCallbacks, all callbacks for a given
     189             :  * syscache are linked into a list pointed to by syscache_callback_links[id].
     190             :  * The link values are syscache_callback_list[] index plus 1, or 0 for none.
     191             :  */
     192             : 
     193             : #define MAX_SYSCACHE_CALLBACKS 64
     194             : #define MAX_RELCACHE_CALLBACKS 10
     195             : 
     196             : static struct SYSCACHECALLBACK
     197             : {
     198             :     int16       id;             /* cache number */
     199             :     int16       link;           /* next callback index+1 for same cache */
     200             :     SyscacheCallbackFunction function;
     201             :     Datum       arg;
     202             : }           syscache_callback_list[MAX_SYSCACHE_CALLBACKS];
     203             : 
     204             : static int16 syscache_callback_links[SysCacheSize];
     205             : 
     206             : static int  syscache_callback_count = 0;
     207             : 
     208             : static struct RELCACHECALLBACK
     209             : {
     210             :     RelcacheCallbackFunction function;
     211             :     Datum       arg;
     212             : }           relcache_callback_list[MAX_RELCACHE_CALLBACKS];
     213             : 
     214             : static int  relcache_callback_count = 0;
     215             : 
     216             : /* ----------------------------------------------------------------
     217             :  *              Invalidation list support functions
     218             :  *
     219             :  * These three routines encapsulate processing of the "chunked"
     220             :  * representation of what is logically just a list of messages.
     221             :  * ----------------------------------------------------------------
     222             :  */
     223             : 
     224             : /*
     225             :  * AddInvalidationMessage
     226             :  *      Add an invalidation message to a list (of chunks).
     227             :  *
     228             :  * Note that we do not pay any great attention to maintaining the original
     229             :  * ordering of the messages.
     230             :  */
     231             : static void
     232     6056102 : AddInvalidationMessage(InvalidationChunk **listHdr,
     233             :                        SharedInvalidationMessage *msg)
     234             : {
     235     6056102 :     InvalidationChunk *chunk = *listHdr;
     236             : 
     237     6056102 :     if (chunk == NULL)
     238             :     {
     239             :         /* First time through; create initial chunk */
     240             : #define FIRSTCHUNKSIZE 32
     241             :         chunk = (InvalidationChunk *)
     242     1160992 :             MemoryContextAlloc(CurTransactionContext,
     243             :                                offsetof(InvalidationChunk, msgs) +
     244             :                                FIRSTCHUNKSIZE * sizeof(SharedInvalidationMessage));
     245     1160992 :         chunk->nitems = 0;
     246     1160992 :         chunk->maxitems = FIRSTCHUNKSIZE;
     247     1160992 :         chunk->next = *listHdr;
     248     1160992 :         *listHdr = chunk;
     249             :     }
     250     4895110 :     else if (chunk->nitems >= chunk->maxitems)
     251             :     {
     252             :         /* Need another chunk; double size of last chunk */
     253       14960 :         int         chunksize = 2 * chunk->maxitems;
     254             : 
     255             :         chunk = (InvalidationChunk *)
     256       14960 :             MemoryContextAlloc(CurTransactionContext,
     257             :                                offsetof(InvalidationChunk, msgs) +
     258             :                                chunksize * sizeof(SharedInvalidationMessage));
     259       14960 :         chunk->nitems = 0;
     260       14960 :         chunk->maxitems = chunksize;
     261       14960 :         chunk->next = *listHdr;
     262       14960 :         *listHdr = chunk;
     263             :     }
     264             :     /* Okay, add message to current chunk */
     265     6056102 :     chunk->msgs[chunk->nitems] = *msg;
     266     6056102 :     chunk->nitems++;
     267     6056102 : }
     268             : 
     269             : /*
     270             :  * Append one list of invalidation message chunks to another, resetting
     271             :  * the source chunk-list pointer to NULL.
     272             :  */
     273             : static void
     274     1911832 : AppendInvalidationMessageList(InvalidationChunk **destHdr,
     275             :                               InvalidationChunk **srcHdr)
     276             : {
     277     1911832 :     InvalidationChunk *chunk = *srcHdr;
     278             : 
     279     1911832 :     if (chunk == NULL)
     280      751054 :         return;                 /* nothing to do */
     281             : 
     282     1175786 :     while (chunk->next != NULL)
     283       15008 :         chunk = chunk->next;
     284             : 
     285     1160778 :     chunk->next = *destHdr;
     286             : 
     287     1160778 :     *destHdr = *srcHdr;
     288             : 
     289     1160778 :     *srcHdr = NULL;
     290             : }
     291             : 
     292             : /*
     293             :  * Process a list of invalidation messages.
     294             :  *
     295             :  * This is a macro that executes the given code fragment for each message in
     296             :  * a message chunk list.  The fragment should refer to the message as *msg.
     297             :  */
     298             : #define ProcessMessageList(listHdr, codeFragment) \
     299             :     do { \
     300             :         InvalidationChunk *_chunk; \
     301             :         for (_chunk = (listHdr); _chunk != NULL; _chunk = _chunk->next) \
     302             :         { \
     303             :             int     _cindex; \
     304             :             for (_cindex = 0; _cindex < _chunk->nitems; _cindex++) \
     305             :             { \
     306             :                 SharedInvalidationMessage *msg = &_chunk->msgs[_cindex]; \
     307             :                 codeFragment; \
     308             :             } \
     309             :         } \
     310             :     } while (0)
     311             : 
     312             : /*
     313             :  * Process a list of invalidation messages group-wise.
     314             :  *
     315             :  * As above, but the code fragment can handle an array of messages.
     316             :  * The fragment should refer to the messages as msgs[], with n entries.
     317             :  */
     318             : #define ProcessMessageListMulti(listHdr, codeFragment) \
     319             :     do { \
     320             :         InvalidationChunk *_chunk; \
     321             :         for (_chunk = (listHdr); _chunk != NULL; _chunk = _chunk->next) \
     322             :         { \
     323             :             SharedInvalidationMessage *msgs = _chunk->msgs; \
     324             :             int     n = _chunk->nitems; \
     325             :             codeFragment; \
     326             :         } \
     327             :     } while (0)
     328             : 
     329             : 
     330             : /* ----------------------------------------------------------------
     331             :  *              Invalidation set support functions
     332             :  *
     333             :  * These routines understand about the division of a logical invalidation
     334             :  * list into separate physical lists for catcache and relcache entries.
     335             :  * ----------------------------------------------------------------
     336             :  */
     337             : 
     338             : /*
     339             :  * Add a catcache inval entry
     340             :  */
     341             : static void
     342     5075276 : AddCatcacheInvalidationMessage(InvalidationListHeader *hdr,
     343             :                                int id, uint32 hashValue, Oid dbId)
     344             : {
     345             :     SharedInvalidationMessage msg;
     346             : 
     347             :     Assert(id < CHAR_MAX);
     348     5075276 :     msg.cc.id = (int8) id;
     349     5075276 :     msg.cc.dbId = dbId;
     350     5075276 :     msg.cc.hashValue = hashValue;
     351             : 
     352             :     /*
     353             :      * Define padding bytes in SharedInvalidationMessage structs to be
     354             :      * defined. Otherwise the sinvaladt.c ringbuffer, which is accessed by
     355             :      * multiple processes, will cause spurious valgrind warnings about
     356             :      * undefined memory being used. That's because valgrind remembers the
     357             :      * undefined bytes from the last local process's store, not realizing that
     358             :      * another process has written since, filling the previously uninitialized
     359             :      * bytes
     360             :      */
     361             :     VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
     362             : 
     363     5075276 :     AddInvalidationMessage(&hdr->cclist, &msg);
     364     5075276 : }
     365             : 
     366             : /*
     367             :  * Add a whole-catalog inval entry
     368             :  */
     369             : static void
     370         176 : AddCatalogInvalidationMessage(InvalidationListHeader *hdr,
     371             :                               Oid dbId, Oid catId)
     372             : {
     373             :     SharedInvalidationMessage msg;
     374             : 
     375         176 :     msg.cat.id = SHAREDINVALCATALOG_ID;
     376         176 :     msg.cat.dbId = dbId;
     377         176 :     msg.cat.catId = catId;
     378             :     /* check AddCatcacheInvalidationMessage() for an explanation */
     379             :     VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
     380             : 
     381         176 :     AddInvalidationMessage(&hdr->cclist, &msg);
     382         176 : }
     383             : 
     384             : /*
     385             :  * Add a relcache inval entry
     386             :  */
     387             : static void
     388     1691098 : AddRelcacheInvalidationMessage(InvalidationListHeader *hdr,
     389             :                                Oid dbId, Oid relId)
     390             : {
     391             :     SharedInvalidationMessage msg;
     392             : 
     393             :     /*
     394             :      * Don't add a duplicate item. We assume dbId need not be checked because
     395             :      * it will never change. InvalidOid for relId means all relations so we
     396             :      * don't need to add individual ones when it is present.
     397             :      */
     398     5819108 :     ProcessMessageList(hdr->rclist,
     399             :                        if (msg->rc.id == SHAREDINVALRELCACHE_ID &&
     400             :                            (msg->rc.relId == relId ||
     401             :                             msg->rc.relId == InvalidOid))
     402             :                        return);
     403             : 
     404             :     /* OK, add the item */
     405      600704 :     msg.rc.id = SHAREDINVALRELCACHE_ID;
     406      600704 :     msg.rc.dbId = dbId;
     407      600704 :     msg.rc.relId = relId;
     408             :     /* check AddCatcacheInvalidationMessage() for an explanation */
     409             :     VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
     410             : 
     411      600704 :     AddInvalidationMessage(&hdr->rclist, &msg);
     412             : }
     413             : 
     414             : /*
     415             :  * Add a snapshot inval entry
     416             :  */
     417             : static void
     418     3831652 : AddSnapshotInvalidationMessage(InvalidationListHeader *hdr,
     419             :                                Oid dbId, Oid relId)
     420             : {
     421             :     SharedInvalidationMessage msg;
     422             : 
     423             :     /* Don't add a duplicate item */
     424             :     /* We assume dbId need not be checked because it will never change */
     425     4163682 :     ProcessMessageList(hdr->rclist,
     426             :                        if (msg->sn.id == SHAREDINVALSNAPSHOT_ID &&
     427             :                            msg->sn.relId == relId)
     428             :                        return);
     429             : 
     430             :     /* OK, add the item */
     431      379946 :     msg.sn.id = SHAREDINVALSNAPSHOT_ID;
     432      379946 :     msg.sn.dbId = dbId;
     433      379946 :     msg.sn.relId = relId;
     434             :     /* check AddCatcacheInvalidationMessage() for an explanation */
     435             :     VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
     436             : 
     437      379946 :     AddInvalidationMessage(&hdr->rclist, &msg);
     438             : }
     439             : 
     440             : /*
     441             :  * Append one list of invalidation messages to another, resetting
     442             :  * the source list to empty.
     443             :  */
     444             : static void
     445      955916 : AppendInvalidationMessages(InvalidationListHeader *dest,
     446             :                            InvalidationListHeader *src)
     447             : {
     448      955916 :     AppendInvalidationMessageList(&dest->cclist, &src->cclist);
     449      955916 :     AppendInvalidationMessageList(&dest->rclist, &src->rclist);
     450      955916 : }
     451             : 
     452             : /*
     453             :  * Execute the given function for all the messages in an invalidation list.
     454             :  * The list is not altered.
     455             :  *
     456             :  * catcache entries are processed first, for reasons mentioned above.
     457             :  */
     458             : static void
     459      640992 : ProcessInvalidationMessages(InvalidationListHeader *hdr,
     460             :                             void (*func) (SharedInvalidationMessage *msg))
     461             : {
     462     5813218 :     ProcessMessageList(hdr->cclist, func(msg));
     463     2035696 :     ProcessMessageList(hdr->rclist, func(msg));
     464      640988 : }
     465             : 
     466             : /*
     467             :  * As above, but the function is able to process an array of messages
     468             :  * rather than just one at a time.
     469             :  */
     470             : static void
     471      947024 : ProcessInvalidationMessagesMulti(InvalidationListHeader *hdr,
     472             :                                  void (*func) (const SharedInvalidationMessage *msgs, int n))
     473             : {
     474     2046830 :     ProcessMessageListMulti(hdr->cclist, func(msgs, n));
     475     2157340 :     ProcessMessageListMulti(hdr->rclist, func(msgs, n));
     476      947024 : }
     477             : 
     478             : /* ----------------------------------------------------------------
     479             :  *                    private support functions
     480             :  * ----------------------------------------------------------------
     481             :  */
     482             : 
     483             : /*
     484             :  * RegisterCatcacheInvalidation
     485             :  *
     486             :  * Register an invalidation event for a catcache tuple entry.
     487             :  */
     488             : static void
     489     5075276 : RegisterCatcacheInvalidation(int cacheId,
     490             :                              uint32 hashValue,
     491             :                              Oid dbId)
     492             : {
     493     5075276 :     AddCatcacheInvalidationMessage(&transInvalInfo->CurrentCmdInvalidMsgs,
     494             :                                    cacheId, hashValue, dbId);
     495     5075276 : }
     496             : 
     497             : /*
     498             :  * RegisterCatalogInvalidation
     499             :  *
     500             :  * Register an invalidation event for all catcache entries from a catalog.
     501             :  */
     502             : static void
     503         176 : RegisterCatalogInvalidation(Oid dbId, Oid catId)
     504             : {
     505         176 :     AddCatalogInvalidationMessage(&transInvalInfo->CurrentCmdInvalidMsgs,
     506             :                                   dbId, catId);
     507         176 : }
     508             : 
     509             : /*
     510             :  * RegisterRelcacheInvalidation
     511             :  *
     512             :  * As above, but register a relcache invalidation event.
     513             :  */
     514             : static void
     515     1691098 : RegisterRelcacheInvalidation(Oid dbId, Oid relId)
     516             : {
     517     1691098 :     AddRelcacheInvalidationMessage(&transInvalInfo->CurrentCmdInvalidMsgs,
     518             :                                    dbId, relId);
     519             : 
     520             :     /*
     521             :      * Most of the time, relcache invalidation is associated with system
     522             :      * catalog updates, but there are a few cases where it isn't.  Quick hack
     523             :      * to ensure that the next CommandCounterIncrement() will think that we
     524             :      * need to do CommandEndInvalidationMessages().
     525             :      */
     526     1691098 :     (void) GetCurrentCommandId(true);
     527             : 
     528             :     /*
     529             :      * If the relation being invalidated is one of those cached in a relcache
     530             :      * init file, mark that we need to zap that file at commit. For simplicity
     531             :      * invalidations for a specific database always invalidate the shared file
     532             :      * as well.  Also zap when we are invalidating whole relcache.
     533             :      */
     534     1691098 :     if (relId == InvalidOid || RelationIdIsInInitFile(relId))
     535      131900 :         transInvalInfo->RelcacheInitFileInval = true;
     536     1691098 : }
     537             : 
     538             : /*
     539             :  * RegisterSnapshotInvalidation
     540             :  *
     541             :  * Register an invalidation event for MVCC scans against a given catalog.
     542             :  * Only needed for catalogs that don't have catcaches.
     543             :  */
     544             : static void
     545     3831652 : RegisterSnapshotInvalidation(Oid dbId, Oid relId)
     546             : {
     547     3831652 :     AddSnapshotInvalidationMessage(&transInvalInfo->CurrentCmdInvalidMsgs,
     548             :                                    dbId, relId);
     549     3831652 : }
     550             : 
     551             : /*
     552             :  * LocalExecuteInvalidationMessage
     553             :  *
     554             :  * Process a single invalidation message (which could be of any type).
     555             :  * Only the local caches are flushed; this does not transmit the message
     556             :  * to other backends.
     557             :  */
     558             : void
     559    24524142 : LocalExecuteInvalidationMessage(SharedInvalidationMessage *msg)
     560             : {
     561    24524142 :     if (msg->id >= 0)
     562             :     {
     563    20873570 :         if (msg->cc.dbId == MyDatabaseId || msg->cc.dbId == InvalidOid)
     564             :         {
     565    16065942 :             InvalidateCatalogSnapshot();
     566             : 
     567    16065942 :             SysCacheInvalidate(msg->cc.id, msg->cc.hashValue);
     568             : 
     569    16065942 :             CallSyscacheCallbacks(msg->cc.id, msg->cc.hashValue);
     570             :         }
     571             :     }
     572     3650572 :     else if (msg->id == SHAREDINVALCATALOG_ID)
     573             :     {
     574         566 :         if (msg->cat.dbId == MyDatabaseId || msg->cat.dbId == InvalidOid)
     575             :         {
     576         488 :             InvalidateCatalogSnapshot();
     577             : 
     578         488 :             CatalogCacheFlushCatalog(msg->cat.catId);
     579             : 
     580             :             /* CatalogCacheFlushCatalog calls CallSyscacheCallbacks as needed */
     581             :         }
     582             :     }
     583     3650006 :     else if (msg->id == SHAREDINVALRELCACHE_ID)
     584             :     {
     585     1909950 :         if (msg->rc.dbId == MyDatabaseId || msg->rc.dbId == InvalidOid)
     586             :         {
     587             :             int         i;
     588             : 
     589     1496272 :             if (msg->rc.relId == InvalidOid)
     590          24 :                 RelationCacheInvalidate();
     591             :             else
     592     1496248 :                 RelationCacheInvalidateEntry(msg->rc.relId);
     593             : 
     594     4059764 :             for (i = 0; i < relcache_callback_count; i++)
     595             :             {
     596     2563496 :                 struct RELCACHECALLBACK *ccitem = relcache_callback_list + i;
     597             : 
     598     2563496 :                 ccitem->function(ccitem->arg, msg->rc.relId);
     599             :             }
     600             :         }
     601             :     }
     602     1740056 :     else if (msg->id == SHAREDINVALSMGR_ID)
     603             :     {
     604             :         /*
     605             :          * We could have smgr entries for relations of other databases, so no
     606             :          * short-circuit test is possible here.
     607             :          */
     608             :         RelFileNodeBackend rnode;
     609             : 
     610      225606 :         rnode.node = msg->sm.rnode;
     611      225606 :         rnode.backend = (msg->sm.backend_hi << 16) | (int) msg->sm.backend_lo;
     612      225606 :         smgrclosenode(rnode);
     613             :     }
     614     1514450 :     else if (msg->id == SHAREDINVALRELMAP_ID)
     615             :     {
     616             :         /* We only care about our own database and shared catalogs */
     617         592 :         if (msg->rm.dbId == InvalidOid)
     618         378 :             RelationMapInvalidate(true);
     619         214 :         else if (msg->rm.dbId == MyDatabaseId)
     620         166 :             RelationMapInvalidate(false);
     621             :     }
     622     1513858 :     else if (msg->id == SHAREDINVALSNAPSHOT_ID)
     623             :     {
     624             :         /* We only care about our own database and shared catalogs */
     625     1513858 :         if (msg->rm.dbId == InvalidOid)
     626       36922 :             InvalidateCatalogSnapshot();
     627     1476936 :         else if (msg->rm.dbId == MyDatabaseId)
     628     1146066 :             InvalidateCatalogSnapshot();
     629             :     }
     630             :     else
     631           0 :         elog(FATAL, "unrecognized SI message ID: %d", msg->id);
     632    24524138 : }
     633             : 
     634             : /*
     635             :  *      InvalidateSystemCaches
     636             :  *
     637             :  *      This blows away all tuples in the system catalog caches and
     638             :  *      all the cached relation descriptors and smgr cache entries.
     639             :  *      Relation descriptors that have positive refcounts are then rebuilt.
     640             :  *
     641             :  *      We call this when we see a shared-inval-queue overflow signal,
     642             :  *      since that tells us we've lost some shared-inval messages and hence
     643             :  *      don't know what needs to be invalidated.
     644             :  */
     645             : void
     646        2410 : InvalidateSystemCaches(void)
     647             : {
     648             :     int         i;
     649             : 
     650        2410 :     InvalidateCatalogSnapshot();
     651        2410 :     ResetCatalogCaches();
     652        2410 :     RelationCacheInvalidate();  /* gets smgr and relmap too */
     653             : 
     654       28766 :     for (i = 0; i < syscache_callback_count; i++)
     655             :     {
     656       26356 :         struct SYSCACHECALLBACK *ccitem = syscache_callback_list + i;
     657             : 
     658       26356 :         ccitem->function(ccitem->arg, ccitem->id, 0);
     659             :     }
     660             : 
     661        5532 :     for (i = 0; i < relcache_callback_count; i++)
     662             :     {
     663        3122 :         struct RELCACHECALLBACK *ccitem = relcache_callback_list + i;
     664             : 
     665        3122 :         ccitem->function(ccitem->arg, InvalidOid);
     666             :     }
     667        2410 : }
     668             : 
     669             : 
     670             : /* ----------------------------------------------------------------
     671             :  *                    public functions
     672             :  * ----------------------------------------------------------------
     673             :  */
     674             : 
     675             : /*
     676             :  * AcceptInvalidationMessages
     677             :  *      Read and process invalidation messages from the shared invalidation
     678             :  *      message queue.
     679             :  *
     680             :  * Note:
     681             :  *      This should be called as the first step in processing a transaction.
     682             :  */
     683             : void
     684    29904148 : AcceptInvalidationMessages(void)
     685             : {
     686    29904148 :     ReceiveSharedInvalidMessages(LocalExecuteInvalidationMessage,
     687             :                                  InvalidateSystemCaches);
     688             : 
     689             :     /*
     690             :      * Test code to force cache flushes anytime a flush could happen.
     691             :      *
     692             :      * If used with CLOBBER_FREED_MEMORY, CLOBBER_CACHE_ALWAYS provides a
     693             :      * fairly thorough test that the system contains no cache-flush hazards.
     694             :      * However, it also makes the system unbelievably slow --- the regression
     695             :      * tests take about 100 times longer than normal.
     696             :      *
     697             :      * If you're a glutton for punishment, try CLOBBER_CACHE_RECURSIVELY. This
     698             :      * slows things by at least a factor of 10000, so I wouldn't suggest
     699             :      * trying to run the entire regression tests that way.  It's useful to try
     700             :      * a few simple tests, to make sure that cache reload isn't subject to
     701             :      * internal cache-flush hazards, but after you've done a few thousand
     702             :      * recursive reloads it's unlikely you'll learn more.
     703             :      */
     704             : #if defined(CLOBBER_CACHE_ALWAYS)
     705             :     {
     706             :         static bool in_recursion = false;
     707             : 
     708             :         if (!in_recursion)
     709             :         {
     710             :             in_recursion = true;
     711             :             InvalidateSystemCaches();
     712             :             in_recursion = false;
     713             :         }
     714             :     }
     715             : #elif defined(CLOBBER_CACHE_RECURSIVELY)
     716             :     {
     717             :         static int  recursion_depth = 0;
     718             : 
     719             :         /* Maximum depth is arbitrary depending on your threshold of pain */
     720             :         if (recursion_depth < 3)
     721             :         {
     722             :             recursion_depth++;
     723             :             InvalidateSystemCaches();
     724             :             recursion_depth--;
     725             :         }
     726             :     }
     727             : #endif
     728    29904148 : }
     729             : 
     730             : /*
     731             :  * PrepareInvalidationState
     732             :  *      Initialize inval lists for the current (sub)transaction.
     733             :  */
     734             : static void
     735     6901740 : PrepareInvalidationState(void)
     736             : {
     737             :     TransInvalidationInfo *myInfo;
     738             : 
     739    13484606 :     if (transInvalInfo != NULL &&
     740     6582866 :         transInvalInfo->my_level == GetCurrentTransactionNestLevel())
     741     6582768 :         return;
     742             : 
     743             :     myInfo = (TransInvalidationInfo *)
     744      318972 :         MemoryContextAllocZero(TopTransactionContext,
     745             :                                sizeof(TransInvalidationInfo));
     746      318972 :     myInfo->parent = transInvalInfo;
     747      318972 :     myInfo->my_level = GetCurrentTransactionNestLevel();
     748             : 
     749             :     /*
     750             :      * If there's any previous entry, this one should be for a deeper nesting
     751             :      * level.
     752             :      */
     753             :     Assert(transInvalInfo == NULL ||
     754             :            myInfo->my_level > transInvalInfo->my_level);
     755             : 
     756      318972 :     transInvalInfo = myInfo;
     757             : }
     758             : 
     759             : /*
     760             :  * PostPrepare_Inval
     761             :  *      Clean up after successful PREPARE.
     762             :  *
     763             :  * Here, we want to act as though the transaction aborted, so that we will
     764             :  * undo any syscache changes it made, thereby bringing us into sync with the
     765             :  * outside world, which doesn't believe the transaction committed yet.
     766             :  *
     767             :  * If the prepared transaction is later aborted, there is nothing more to
     768             :  * do; if it commits, we will receive the consequent inval messages just
     769             :  * like everyone else.
     770             :  */
     771             : void
     772          68 : PostPrepare_Inval(void)
     773             : {
     774          68 :     AtEOXact_Inval(false);
     775          68 : }
     776             : 
     777             : /*
     778             :  * Collect invalidation messages into SharedInvalidMessagesArray array.
     779             :  */
     780             : static void
     781     1146580 : MakeSharedInvalidMessagesArray(const SharedInvalidationMessage *msgs, int n)
     782             : {
     783             :     /*
     784             :      * Initialise array first time through in each commit
     785             :      */
     786     1146580 :     if (SharedInvalidMessagesArray == NULL)
     787             :     {
     788      315712 :         maxSharedInvalidMessagesArray = FIRSTCHUNKSIZE;
     789      315712 :         numSharedInvalidMessagesArray = 0;
     790             : 
     791             :         /*
     792             :          * Although this is being palloc'd we don't actually free it directly.
     793             :          * We're so close to EOXact that we now we're going to lose it anyhow.
     794             :          */
     795      315712 :         SharedInvalidMessagesArray = palloc(maxSharedInvalidMessagesArray
     796             :                                             * sizeof(SharedInvalidationMessage));
     797             :     }
     798             : 
     799     1146580 :     if ((numSharedInvalidMessagesArray + n) > maxSharedInvalidMessagesArray)
     800             :     {
     801      111642 :         while ((numSharedInvalidMessagesArray + n) > maxSharedInvalidMessagesArray)
     802       56630 :             maxSharedInvalidMessagesArray *= 2;
     803             : 
     804       55012 :         SharedInvalidMessagesArray = repalloc(SharedInvalidMessagesArray,
     805             :                                               maxSharedInvalidMessagesArray
     806             :                                               * sizeof(SharedInvalidationMessage));
     807             :     }
     808             : 
     809             :     /*
     810             :      * Append the next chunk onto the array
     811             :      */
     812     1146580 :     memcpy(SharedInvalidMessagesArray + numSharedInvalidMessagesArray,
     813             :            msgs, n * sizeof(SharedInvalidationMessage));
     814     1146580 :     numSharedInvalidMessagesArray += n;
     815     1146580 : }
     816             : 
     817             : /*
     818             :  * xactGetCommittedInvalidationMessages() is executed by
     819             :  * RecordTransactionCommit() to add invalidation messages onto the
     820             :  * commit record. This applies only to commit message types, never to
     821             :  * abort records. Must always run before AtEOXact_Inval(), since that
     822             :  * removes the data we need to see.
     823             :  *
     824             :  * Remember that this runs before we have officially committed, so we
     825             :  * must not do anything here to change what might occur *if* we should
     826             :  * fail between here and the actual commit.
     827             :  *
     828             :  * see also xact_redo_commit() and xact_desc_commit()
     829             :  */
     830             : int
     831      487774 : xactGetCommittedInvalidationMessages(SharedInvalidationMessage **msgs,
     832             :                                      bool *RelcacheInitFileInval)
     833             : {
     834             :     MemoryContext oldcontext;
     835             : 
     836             :     /* Quick exit if we haven't done anything with invalidation messages. */
     837      487774 :     if (transInvalInfo == NULL)
     838             :     {
     839      176290 :         *RelcacheInitFileInval = false;
     840      176290 :         *msgs = NULL;
     841      176290 :         return 0;
     842             :     }
     843             : 
     844             :     /* Must be at top of stack */
     845             :     Assert(transInvalInfo->my_level == 1 && transInvalInfo->parent == NULL);
     846             : 
     847             :     /*
     848             :      * Relcache init file invalidation requires processing both before and
     849             :      * after we send the SI messages.  However, we need not do anything unless
     850             :      * we committed.
     851             :      */
     852      311484 :     *RelcacheInitFileInval = transInvalInfo->RelcacheInitFileInval;
     853             : 
     854             :     /*
     855             :      * Walk through TransInvalidationInfo to collect all the messages into a
     856             :      * single contiguous array of invalidation messages. It must be contiguous
     857             :      * so we can copy directly into WAL message. Maintain the order that they
     858             :      * would be processed in by AtEOXact_Inval(), to ensure emulated behaviour
     859             :      * in redo is as similar as possible to original. We want the same bugs,
     860             :      * if any, not new ones.
     861             :      */
     862      311484 :     oldcontext = MemoryContextSwitchTo(CurTransactionContext);
     863             : 
     864      311484 :     ProcessInvalidationMessagesMulti(&transInvalInfo->CurrentCmdInvalidMsgs,
     865             :                                      MakeSharedInvalidMessagesArray);
     866      311484 :     ProcessInvalidationMessagesMulti(&transInvalInfo->PriorCmdInvalidMsgs,
     867             :                                      MakeSharedInvalidMessagesArray);
     868      311484 :     MemoryContextSwitchTo(oldcontext);
     869             : 
     870             :     Assert(!(numSharedInvalidMessagesArray > 0 &&
     871             :              SharedInvalidMessagesArray == NULL));
     872             : 
     873      311484 :     *msgs = SharedInvalidMessagesArray;
     874             : 
     875      311484 :     return numSharedInvalidMessagesArray;
     876             : }
     877             : 
     878             : /*
     879             :  * ProcessCommittedInvalidationMessages is executed by xact_redo_commit() or
     880             :  * standby_redo() to process invalidation messages. Currently that happens
     881             :  * only at end-of-xact.
     882             :  *
     883             :  * Relcache init file invalidation requires processing both
     884             :  * before and after we send the SI messages. See AtEOXact_Inval()
     885             :  */
     886             : void
     887         294 : ProcessCommittedInvalidationMessages(SharedInvalidationMessage *msgs,
     888             :                                      int nmsgs, bool RelcacheInitFileInval,
     889             :                                      Oid dbid, Oid tsid)
     890             : {
     891         294 :     if (nmsgs <= 0)
     892         114 :         return;
     893             : 
     894         180 :     elog(trace_recovery(DEBUG4), "replaying commit with %d messages%s", nmsgs,
     895             :          (RelcacheInitFileInval ? " and relcache file invalidation" : ""));
     896             : 
     897         180 :     if (RelcacheInitFileInval)
     898             :     {
     899          14 :         elog(trace_recovery(DEBUG4), "removing relcache init files for database %u",
     900             :              dbid);
     901             : 
     902             :         /*
     903             :          * RelationCacheInitFilePreInvalidate, when the invalidation message
     904             :          * is for a specific database, requires DatabasePath to be set, but we
     905             :          * should not use SetDatabasePath during recovery, since it is
     906             :          * intended to be used only once by normal backends.  Hence, a quick
     907             :          * hack: set DatabasePath directly then unset after use.
     908             :          */
     909          14 :         if (OidIsValid(dbid))
     910          14 :             DatabasePath = GetDatabasePath(dbid, tsid);
     911             : 
     912          14 :         RelationCacheInitFilePreInvalidate();
     913             : 
     914          14 :         if (OidIsValid(dbid))
     915             :         {
     916          14 :             pfree(DatabasePath);
     917          14 :             DatabasePath = NULL;
     918             :         }
     919             :     }
     920             : 
     921         180 :     SendSharedInvalidMessages(msgs, nmsgs);
     922             : 
     923         180 :     if (RelcacheInitFileInval)
     924          14 :         RelationCacheInitFilePostInvalidate();
     925             : }
     926             : 
     927             : /*
     928             :  * AtEOXact_Inval
     929             :  *      Process queued-up invalidation messages at end of main transaction.
     930             :  *
     931             :  * If isCommit, we must send out the messages in our PriorCmdInvalidMsgs list
     932             :  * to the shared invalidation message queue.  Note that these will be read
     933             :  * not only by other backends, but also by our own backend at the next
     934             :  * transaction start (via AcceptInvalidationMessages).  This means that
     935             :  * we can skip immediate local processing of anything that's still in
     936             :  * CurrentCmdInvalidMsgs, and just send that list out too.
     937             :  *
     938             :  * If not isCommit, we are aborting, and must locally process the messages
     939             :  * in PriorCmdInvalidMsgs.  No messages need be sent to other backends,
     940             :  * since they'll not have seen our changed tuples anyway.  We can forget
     941             :  * about CurrentCmdInvalidMsgs too, since those changes haven't touched
     942             :  * the caches yet.
     943             :  *
     944             :  * In any case, reset the various lists to empty.  We need not physically
     945             :  * free memory here, since TopTransactionContext is about to be emptied
     946             :  * anyway.
     947             :  *
     948             :  * Note:
     949             :  *      This should be called as the last step in processing a transaction.
     950             :  */
     951             : void
     952      531962 : AtEOXact_Inval(bool isCommit)
     953             : {
     954             :     /* Quick exit if no messages */
     955      531962 :     if (transInvalInfo == NULL)
     956      213122 :         return;
     957             : 
     958             :     /* Must be at top of stack */
     959             :     Assert(transInvalInfo->my_level == 1 && transInvalInfo->parent == NULL);
     960             : 
     961      318840 :     if (isCommit)
     962             :     {
     963             :         /*
     964             :          * Relcache init file invalidation requires processing both before and
     965             :          * after we send the SI messages.  However, we need not do anything
     966             :          * unless we committed.
     967             :          */
     968      316932 :         if (transInvalInfo->RelcacheInitFileInval)
     969       37512 :             RelationCacheInitFilePreInvalidate();
     970             : 
     971      316932 :         AppendInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs,
     972      316932 :                                    &transInvalInfo->CurrentCmdInvalidMsgs);
     973             : 
     974      316932 :         ProcessInvalidationMessagesMulti(&transInvalInfo->PriorCmdInvalidMsgs,
     975             :                                          SendSharedInvalidMessages);
     976             : 
     977      316932 :         if (transInvalInfo->RelcacheInitFileInval)
     978       37512 :             RelationCacheInitFilePostInvalidate();
     979             :     }
     980             :     else
     981             :     {
     982        1908 :         ProcessInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs,
     983             :                                     LocalExecuteInvalidationMessage);
     984             :     }
     985             : 
     986             :     /* Need not free anything explicitly */
     987      318840 :     transInvalInfo = NULL;
     988      318840 :     SharedInvalidMessagesArray = NULL;
     989      318840 :     numSharedInvalidMessagesArray = 0;
     990             : }
     991             : 
     992             : /*
     993             :  * AtEOSubXact_Inval
     994             :  *      Process queued-up invalidation messages at end of subtransaction.
     995             :  *
     996             :  * If isCommit, process CurrentCmdInvalidMsgs if any (there probably aren't),
     997             :  * and then attach both CurrentCmdInvalidMsgs and PriorCmdInvalidMsgs to the
     998             :  * parent's PriorCmdInvalidMsgs list.
     999             :  *
    1000             :  * If not isCommit, we are aborting, and must locally process the messages
    1001             :  * in PriorCmdInvalidMsgs.  No messages need be sent to other backends.
    1002             :  * We can forget about CurrentCmdInvalidMsgs too, since those changes haven't
    1003             :  * touched the caches yet.
    1004             :  *
    1005             :  * In any case, pop the transaction stack.  We need not physically free memory
    1006             :  * here, since CurTransactionContext is about to be emptied anyway
    1007             :  * (if aborting).  Beware of the possibility of aborting the same nesting
    1008             :  * level twice, though.
    1009             :  */
    1010             : void
    1011        7872 : AtEOSubXact_Inval(bool isCommit)
    1012             : {
    1013             :     int         my_level;
    1014        7872 :     TransInvalidationInfo *myInfo = transInvalInfo;
    1015             : 
    1016             :     /* Quick exit if no messages. */
    1017        7872 :     if (myInfo == NULL)
    1018        7334 :         return;
    1019             : 
    1020             :     /* Also bail out quickly if messages are not for this level. */
    1021         538 :     my_level = GetCurrentTransactionNestLevel();
    1022         538 :     if (myInfo->my_level != my_level)
    1023             :     {
    1024             :         Assert(myInfo->my_level < my_level);
    1025         376 :         return;
    1026             :     }
    1027             : 
    1028         162 :     if (isCommit)
    1029             :     {
    1030             :         /* If CurrentCmdInvalidMsgs still has anything, fix it */
    1031          48 :         CommandEndInvalidationMessages();
    1032             : 
    1033             :         /*
    1034             :          * We create invalidation stack entries lazily, so the parent might
    1035             :          * not have one.  Instead of creating one, moving all the data over,
    1036             :          * and then freeing our own, we can just adjust the level of our own
    1037             :          * entry.
    1038             :          */
    1039          48 :         if (myInfo->parent == NULL || myInfo->parent->my_level < my_level - 1)
    1040             :         {
    1041          30 :             myInfo->my_level--;
    1042          30 :             return;
    1043             :         }
    1044             : 
    1045             :         /* Pass up my inval messages to parent */
    1046          18 :         AppendInvalidationMessages(&myInfo->parent->PriorCmdInvalidMsgs,
    1047             :                                    &myInfo->PriorCmdInvalidMsgs);
    1048             : 
    1049             :         /* Pending relcache inval becomes parent's problem too */
    1050          18 :         if (myInfo->RelcacheInitFileInval)
    1051           0 :             myInfo->parent->RelcacheInitFileInval = true;
    1052             : 
    1053             :         /* Pop the transaction state stack */
    1054          18 :         transInvalInfo = myInfo->parent;
    1055             : 
    1056             :         /* Need not free anything else explicitly */
    1057          18 :         pfree(myInfo);
    1058             :     }
    1059             :     else
    1060             :     {
    1061         114 :         ProcessInvalidationMessages(&myInfo->PriorCmdInvalidMsgs,
    1062             :                                     LocalExecuteInvalidationMessage);
    1063             : 
    1064             :         /* Pop the transaction state stack */
    1065         114 :         transInvalInfo = myInfo->parent;
    1066             : 
    1067             :         /* Need not free anything else explicitly */
    1068         114 :         pfree(myInfo);
    1069             :     }
    1070             : }
    1071             : 
    1072             : /*
    1073             :  * CommandEndInvalidationMessages
    1074             :  *      Process queued-up invalidation messages at end of one command
    1075             :  *      in a transaction.
    1076             :  *
    1077             :  * Here, we send no messages to the shared queue, since we don't know yet if
    1078             :  * we will commit.  We do need to locally process the CurrentCmdInvalidMsgs
    1079             :  * list, so as to flush our caches of any entries we have outdated in the
    1080             :  * current command.  We then move the current-cmd list over to become part
    1081             :  * of the prior-cmds list.
    1082             :  *
    1083             :  * Note:
    1084             :  *      This should be called during CommandCounterIncrement(),
    1085             :  *      after we have advanced the command ID.
    1086             :  */
    1087             : void
    1088      779288 : CommandEndInvalidationMessages(void)
    1089             : {
    1090             :     /*
    1091             :      * You might think this shouldn't be called outside any transaction, but
    1092             :      * bootstrap does it, and also ABORT issued when not in a transaction. So
    1093             :      * just quietly return if no state to work on.
    1094             :      */
    1095      779288 :     if (transInvalInfo == NULL)
    1096      140318 :         return;
    1097             : 
    1098      638970 :     ProcessInvalidationMessages(&transInvalInfo->CurrentCmdInvalidMsgs,
    1099             :                                 LocalExecuteInvalidationMessage);
    1100             : 
    1101             :     /* WAL Log per-command invalidation messages for wal_level=logical */
    1102      638966 :     if (XLogLogicalInfoActive())
    1103        5664 :         LogLogicalInvalidations();
    1104             : 
    1105      638966 :     AppendInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs,
    1106      638966 :                                &transInvalInfo->CurrentCmdInvalidMsgs);
    1107             : }
    1108             : 
    1109             : 
    1110             : /*
    1111             :  * CacheInvalidateHeapTuple
    1112             :  *      Register the given tuple for invalidation at end of command
    1113             :  *      (ie, current command is creating or outdating this tuple).
    1114             :  *      Also, detect whether a relcache invalidation is implied.
    1115             :  *
    1116             :  * For an insert or delete, tuple is the target tuple and newtuple is NULL.
    1117             :  * For an update, we are called just once, with tuple being the old tuple
    1118             :  * version and newtuple the new version.  This allows avoidance of duplicate
    1119             :  * effort during an update.
    1120             :  */
    1121             : void
    1122    22646634 : CacheInvalidateHeapTuple(Relation relation,
    1123             :                          HeapTuple tuple,
    1124             :                          HeapTuple newtuple)
    1125             : {
    1126             :     Oid         tupleRelId;
    1127             :     Oid         databaseId;
    1128             :     Oid         relationId;
    1129             : 
    1130             :     /* Do nothing during bootstrap */
    1131    22646634 :     if (IsBootstrapProcessingMode())
    1132     4466880 :         return;
    1133             : 
    1134             :     /*
    1135             :      * We only need to worry about invalidation for tuples that are in system
    1136             :      * catalogs; user-relation tuples are never in catcaches and can't affect
    1137             :      * the relcache either.
    1138             :      */
    1139    18179754 :     if (!IsCatalogRelation(relation))
    1140    11332306 :         return;
    1141             : 
    1142             :     /*
    1143             :      * IsCatalogRelation() will return true for TOAST tables of system
    1144             :      * catalogs, but we don't care about those, either.
    1145             :      */
    1146     6847448 :     if (IsToastRelation(relation))
    1147      110168 :         return;
    1148             : 
    1149             :     /*
    1150             :      * If we're not prepared to queue invalidation messages for this
    1151             :      * subtransaction level, get ready now.
    1152             :      */
    1153     6737280 :     PrepareInvalidationState();
    1154             : 
    1155             :     /*
    1156             :      * First let the catcache do its thing
    1157             :      */
    1158     6737280 :     tupleRelId = RelationGetRelid(relation);
    1159     6737280 :     if (RelationInvalidatesSnapshotsOnly(tupleRelId))
    1160             :     {
    1161     3831652 :         databaseId = IsSharedRelation(tupleRelId) ? InvalidOid : MyDatabaseId;
    1162     3831652 :         RegisterSnapshotInvalidation(databaseId, tupleRelId);
    1163             :     }
    1164             :     else
    1165     2905628 :         PrepareToInvalidateCacheTuple(relation, tuple, newtuple,
    1166             :                                       RegisterCatcacheInvalidation);
    1167             : 
    1168             :     /*
    1169             :      * Now, is this tuple one of the primary definers of a relcache entry? See
    1170             :      * comments in file header for deeper explanation.
    1171             :      *
    1172             :      * Note we ignore newtuple here; we assume an update cannot move a tuple
    1173             :      * from being part of one relcache entry to being part of another.
    1174             :      */
    1175     6737280 :     if (tupleRelId == RelationRelationId)
    1176             :     {
    1177      433934 :         Form_pg_class classtup = (Form_pg_class) GETSTRUCT(tuple);
    1178             : 
    1179      433934 :         relationId = classtup->oid;
    1180      433934 :         if (classtup->relisshared)
    1181       17282 :             databaseId = InvalidOid;
    1182             :         else
    1183      416652 :             databaseId = MyDatabaseId;
    1184             :     }
    1185     6303346 :     else if (tupleRelId == AttributeRelationId)
    1186             :     {
    1187     1061898 :         Form_pg_attribute atttup = (Form_pg_attribute) GETSTRUCT(tuple);
    1188             : 
    1189     1061898 :         relationId = atttup->attrelid;
    1190             : 
    1191             :         /*
    1192             :          * KLUGE ALERT: we always send the relcache event with MyDatabaseId,
    1193             :          * even if the rel in question is shared (which we can't easily tell).
    1194             :          * This essentially means that only backends in this same database
    1195             :          * will react to the relcache flush request.  This is in fact
    1196             :          * appropriate, since only those backends could see our pg_attribute
    1197             :          * change anyway.  It looks a bit ugly though.  (In practice, shared
    1198             :          * relations can't have schema changes after bootstrap, so we should
    1199             :          * never come here for a shared rel anyway.)
    1200             :          */
    1201     1061898 :         databaseId = MyDatabaseId;
    1202             :     }
    1203     5241448 :     else if (tupleRelId == IndexRelationId)
    1204             :     {
    1205       31938 :         Form_pg_index indextup = (Form_pg_index) GETSTRUCT(tuple);
    1206             : 
    1207             :         /*
    1208             :          * When a pg_index row is updated, we should send out a relcache inval
    1209             :          * for the index relation.  As above, we don't know the shared status
    1210             :          * of the index, but in practice it doesn't matter since indexes of
    1211             :          * shared catalogs can't have such updates.
    1212             :          */
    1213       31938 :         relationId = indextup->indexrelid;
    1214       31938 :         databaseId = MyDatabaseId;
    1215             :     }
    1216     5209510 :     else if (tupleRelId == ConstraintRelationId)
    1217             :     {
    1218       17782 :         Form_pg_constraint constrtup = (Form_pg_constraint) GETSTRUCT(tuple);
    1219             : 
    1220             :         /*
    1221             :          * Foreign keys are part of relcache entries, too, so send out an
    1222             :          * inval for the table that the FK applies to.
    1223             :          */
    1224       17782 :         if (constrtup->contype == CONSTRAINT_FOREIGN &&
    1225        3720 :             OidIsValid(constrtup->conrelid))
    1226             :         {
    1227        3720 :             relationId = constrtup->conrelid;
    1228        3720 :             databaseId = MyDatabaseId;
    1229             :         }
    1230             :         else
    1231       14062 :             return;
    1232             :     }
    1233             :     else
    1234     5191728 :         return;
    1235             : 
    1236             :     /*
    1237             :      * Yes.  We need to register a relcache invalidation event.
    1238             :      */
    1239     1531490 :     RegisterRelcacheInvalidation(databaseId, relationId);
    1240             : }
    1241             : 
    1242             : /*
    1243             :  * CacheInvalidateCatalog
    1244             :  *      Register invalidation of the whole content of a system catalog.
    1245             :  *
    1246             :  * This is normally used in VACUUM FULL/CLUSTER, where we haven't so much
    1247             :  * changed any tuples as moved them around.  Some uses of catcache entries
    1248             :  * expect their TIDs to be correct, so we have to blow away the entries.
    1249             :  *
    1250             :  * Note: we expect caller to verify that the rel actually is a system
    1251             :  * catalog.  If it isn't, no great harm is done, just a wasted sinval message.
    1252             :  */
    1253             : void
    1254         176 : CacheInvalidateCatalog(Oid catalogId)
    1255             : {
    1256             :     Oid         databaseId;
    1257             : 
    1258         176 :     PrepareInvalidationState();
    1259             : 
    1260         176 :     if (IsSharedRelation(catalogId))
    1261          30 :         databaseId = InvalidOid;
    1262             :     else
    1263         146 :         databaseId = MyDatabaseId;
    1264             : 
    1265         176 :     RegisterCatalogInvalidation(databaseId, catalogId);
    1266         176 : }
    1267             : 
    1268             : /*
    1269             :  * CacheInvalidateRelcache
    1270             :  *      Register invalidation of the specified relation's relcache entry
    1271             :  *      at end of command.
    1272             :  *
    1273             :  * This is used in places that need to force relcache rebuild but aren't
    1274             :  * changing any of the tuples recognized as contributors to the relcache
    1275             :  * entry by CacheInvalidateHeapTuple.  (An example is dropping an index.)
    1276             :  */
    1277             : void
    1278      110512 : CacheInvalidateRelcache(Relation relation)
    1279             : {
    1280             :     Oid         databaseId;
    1281             :     Oid         relationId;
    1282             : 
    1283      110512 :     PrepareInvalidationState();
    1284             : 
    1285      110512 :     relationId = RelationGetRelid(relation);
    1286      110512 :     if (relation->rd_rel->relisshared)
    1287       10700 :         databaseId = InvalidOid;
    1288             :     else
    1289       99812 :         databaseId = MyDatabaseId;
    1290             : 
    1291      110512 :     RegisterRelcacheInvalidation(databaseId, relationId);
    1292      110512 : }
    1293             : 
    1294             : /*
    1295             :  * CacheInvalidateRelcacheAll
    1296             :  *      Register invalidation of the whole relcache at the end of command.
    1297             :  *
    1298             :  * This is used by alter publication as changes in publications may affect
    1299             :  * large number of tables.
    1300             :  */
    1301             : void
    1302           6 : CacheInvalidateRelcacheAll(void)
    1303             : {
    1304           6 :     PrepareInvalidationState();
    1305             : 
    1306           6 :     RegisterRelcacheInvalidation(InvalidOid, InvalidOid);
    1307           6 : }
    1308             : 
    1309             : /*
    1310             :  * CacheInvalidateRelcacheByTuple
    1311             :  *      As above, but relation is identified by passing its pg_class tuple.
    1312             :  */
    1313             : void
    1314       49090 : CacheInvalidateRelcacheByTuple(HeapTuple classTuple)
    1315             : {
    1316       49090 :     Form_pg_class classtup = (Form_pg_class) GETSTRUCT(classTuple);
    1317             :     Oid         databaseId;
    1318             :     Oid         relationId;
    1319             : 
    1320       49090 :     PrepareInvalidationState();
    1321             : 
    1322       49090 :     relationId = classtup->oid;
    1323       49090 :     if (classtup->relisshared)
    1324        4318 :         databaseId = InvalidOid;
    1325             :     else
    1326       44772 :         databaseId = MyDatabaseId;
    1327       49090 :     RegisterRelcacheInvalidation(databaseId, relationId);
    1328       49090 : }
    1329             : 
    1330             : /*
    1331             :  * CacheInvalidateRelcacheByRelid
    1332             :  *      As above, but relation is identified by passing its OID.
    1333             :  *      This is the least efficient of the three options; use one of
    1334             :  *      the above routines if you have a Relation or pg_class tuple.
    1335             :  */
    1336             : void
    1337        4676 : CacheInvalidateRelcacheByRelid(Oid relid)
    1338             : {
    1339             :     HeapTuple   tup;
    1340             : 
    1341        4676 :     PrepareInvalidationState();
    1342             : 
    1343        4676 :     tup = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
    1344        4676 :     if (!HeapTupleIsValid(tup))
    1345           0 :         elog(ERROR, "cache lookup failed for relation %u", relid);
    1346        4676 :     CacheInvalidateRelcacheByTuple(tup);
    1347        4676 :     ReleaseSysCache(tup);
    1348        4676 : }
    1349             : 
    1350             : 
    1351             : /*
    1352             :  * CacheInvalidateSmgr
    1353             :  *      Register invalidation of smgr references to a physical relation.
    1354             :  *
    1355             :  * Sending this type of invalidation msg forces other backends to close open
    1356             :  * smgr entries for the rel.  This should be done to flush dangling open-file
    1357             :  * references when the physical rel is being dropped or truncated.  Because
    1358             :  * these are nontransactional (i.e., not-rollback-able) operations, we just
    1359             :  * send the inval message immediately without any queuing.
    1360             :  *
    1361             :  * Note: in most cases there will have been a relcache flush issued against
    1362             :  * the rel at the logical level.  We need a separate smgr-level flush because
    1363             :  * it is possible for backends to have open smgr entries for rels they don't
    1364             :  * have a relcache entry for, e.g. because the only thing they ever did with
    1365             :  * the rel is write out dirty shared buffers.
    1366             :  *
    1367             :  * Note: because these messages are nontransactional, they won't be captured
    1368             :  * in commit/abort WAL entries.  Instead, calls to CacheInvalidateSmgr()
    1369             :  * should happen in low-level smgr.c routines, which are executed while
    1370             :  * replaying WAL as well as when creating it.
    1371             :  *
    1372             :  * Note: In order to avoid bloating SharedInvalidationMessage, we store only
    1373             :  * three bytes of the backend ID using what would otherwise be padding space.
    1374             :  * Thus, the maximum possible backend ID is 2^23-1.
    1375             :  */
    1376             : void
    1377       61306 : CacheInvalidateSmgr(RelFileNodeBackend rnode)
    1378             : {
    1379             :     SharedInvalidationMessage msg;
    1380             : 
    1381       61306 :     msg.sm.id = SHAREDINVALSMGR_ID;
    1382       61306 :     msg.sm.backend_hi = rnode.backend >> 16;
    1383       61306 :     msg.sm.backend_lo = rnode.backend & 0xffff;
    1384       61306 :     msg.sm.rnode = rnode.node;
    1385             :     /* check AddCatcacheInvalidationMessage() for an explanation */
    1386             :     VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
    1387             : 
    1388       61306 :     SendSharedInvalidMessages(&msg, 1);
    1389       61306 : }
    1390             : 
    1391             : /*
    1392             :  * CacheInvalidateRelmap
    1393             :  *      Register invalidation of the relation mapping for a database,
    1394             :  *      or for the shared catalogs if databaseId is zero.
    1395             :  *
    1396             :  * Sending this type of invalidation msg forces other backends to re-read
    1397             :  * the indicated relation mapping file.  It is also necessary to send a
    1398             :  * relcache inval for the specific relations whose mapping has been altered,
    1399             :  * else the relcache won't get updated with the new filenode data.
    1400             :  *
    1401             :  * Note: because these messages are nontransactional, they won't be captured
    1402             :  * in commit/abort WAL entries.  Instead, calls to CacheInvalidateRelmap()
    1403             :  * should happen in low-level relmapper.c routines, which are executed while
    1404             :  * replaying WAL as well as when creating it.
    1405             :  */
    1406             : void
    1407         482 : CacheInvalidateRelmap(Oid databaseId)
    1408             : {
    1409             :     SharedInvalidationMessage msg;
    1410             : 
    1411         482 :     msg.rm.id = SHAREDINVALRELMAP_ID;
    1412         482 :     msg.rm.dbId = databaseId;
    1413             :     /* check AddCatcacheInvalidationMessage() for an explanation */
    1414             :     VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
    1415             : 
    1416         482 :     SendSharedInvalidMessages(&msg, 1);
    1417         482 : }
    1418             : 
    1419             : 
    1420             : /*
    1421             :  * CacheRegisterSyscacheCallback
    1422             :  *      Register the specified function to be called for all future
    1423             :  *      invalidation events in the specified cache.  The cache ID and the
    1424             :  *      hash value of the tuple being invalidated will be passed to the
    1425             :  *      function.
    1426             :  *
    1427             :  * NOTE: Hash value zero will be passed if a cache reset request is received.
    1428             :  * In this case the called routines should flush all cached state.
    1429             :  * Yes, there's a possibility of a false match to zero, but it doesn't seem
    1430             :  * worth troubling over, especially since most of the current callees just
    1431             :  * flush all cached state anyway.
    1432             :  */
    1433             : void
    1434      139704 : CacheRegisterSyscacheCallback(int cacheid,
    1435             :                               SyscacheCallbackFunction func,
    1436             :                               Datum arg)
    1437             : {
    1438      139704 :     if (cacheid < 0 || cacheid >= SysCacheSize)
    1439           0 :         elog(FATAL, "invalid cache ID: %d", cacheid);
    1440      139704 :     if (syscache_callback_count >= MAX_SYSCACHE_CALLBACKS)
    1441           0 :         elog(FATAL, "out of syscache_callback_list slots");
    1442             : 
    1443      139704 :     if (syscache_callback_links[cacheid] == 0)
    1444             :     {
    1445             :         /* first callback for this cache */
    1446      126710 :         syscache_callback_links[cacheid] = syscache_callback_count + 1;
    1447             :     }
    1448             :     else
    1449             :     {
    1450             :         /* add to end of chain, so that older callbacks are called first */
    1451       12994 :         int         i = syscache_callback_links[cacheid] - 1;
    1452             : 
    1453       12996 :         while (syscache_callback_list[i].link > 0)
    1454           2 :             i = syscache_callback_list[i].link - 1;
    1455       12994 :         syscache_callback_list[i].link = syscache_callback_count + 1;
    1456             :     }
    1457             : 
    1458      139704 :     syscache_callback_list[syscache_callback_count].id = cacheid;
    1459      139704 :     syscache_callback_list[syscache_callback_count].link = 0;
    1460      139704 :     syscache_callback_list[syscache_callback_count].function = func;
    1461      139704 :     syscache_callback_list[syscache_callback_count].arg = arg;
    1462             : 
    1463      139704 :     ++syscache_callback_count;
    1464      139704 : }
    1465             : 
    1466             : /*
    1467             :  * CacheRegisterRelcacheCallback
    1468             :  *      Register the specified function to be called for all future
    1469             :  *      relcache invalidation events.  The OID of the relation being
    1470             :  *      invalidated will be passed to the function.
    1471             :  *
    1472             :  * NOTE: InvalidOid will be passed if a cache reset request is received.
    1473             :  * In this case the called routines should flush all cached state.
    1474             :  */
    1475             : void
    1476       14850 : CacheRegisterRelcacheCallback(RelcacheCallbackFunction func,
    1477             :                               Datum arg)
    1478             : {
    1479       14850 :     if (relcache_callback_count >= MAX_RELCACHE_CALLBACKS)
    1480           0 :         elog(FATAL, "out of relcache_callback_list slots");
    1481             : 
    1482       14850 :     relcache_callback_list[relcache_callback_count].function = func;
    1483       14850 :     relcache_callback_list[relcache_callback_count].arg = arg;
    1484             : 
    1485       14850 :     ++relcache_callback_count;
    1486       14850 : }
    1487             : 
    1488             : /*
    1489             :  * CallSyscacheCallbacks
    1490             :  *
    1491             :  * This is exported so that CatalogCacheFlushCatalog can call it, saving
    1492             :  * this module from knowing which catcache IDs correspond to which catalogs.
    1493             :  */
    1494             : void
    1495    16066604 : CallSyscacheCallbacks(int cacheid, uint32 hashvalue)
    1496             : {
    1497             :     int         i;
    1498             : 
    1499    16066604 :     if (cacheid < 0 || cacheid >= SysCacheSize)
    1500           0 :         elog(ERROR, "invalid cache ID: %d", cacheid);
    1501             : 
    1502    16066604 :     i = syscache_callback_links[cacheid] - 1;
    1503    17643442 :     while (i >= 0)
    1504             :     {
    1505     1576838 :         struct SYSCACHECALLBACK *ccitem = syscache_callback_list + i;
    1506             : 
    1507             :         Assert(ccitem->id == cacheid);
    1508     1576838 :         ccitem->function(ccitem->arg, cacheid, hashvalue);
    1509     1576838 :         i = ccitem->link - 1;
    1510             :     }
    1511    16066604 : }
    1512             : 
    1513             : /*
    1514             :  * LogLogicalInvalidations
    1515             :  *
    1516             :  * Emit WAL for invalidations.  This is currently only used for logging
    1517             :  * invalidations at the command end or at commit time if any invalidations
    1518             :  * are pending.
    1519             :  */
    1520             : void
    1521       14610 : LogLogicalInvalidations()
    1522             : {
    1523             :     xl_xact_invals xlrec;
    1524             :     SharedInvalidationMessage *invalMessages;
    1525       14610 :     int         nmsgs = 0;
    1526             : 
    1527             :     /* Quick exit if we haven't done anything with invalidation messages. */
    1528       14610 :     if (transInvalInfo == NULL)
    1529        7486 :         return;
    1530             : 
    1531        7124 :     ProcessInvalidationMessagesMulti(&transInvalInfo->CurrentCmdInvalidMsgs,
    1532             :                                      MakeSharedInvalidMessagesArray);
    1533             : 
    1534             :     Assert(!(numSharedInvalidMessagesArray > 0 &&
    1535             :              SharedInvalidMessagesArray == NULL));
    1536             : 
    1537        7124 :     invalMessages = SharedInvalidMessagesArray;
    1538        7124 :     nmsgs = numSharedInvalidMessagesArray;
    1539        7124 :     SharedInvalidMessagesArray = NULL;
    1540        7124 :     numSharedInvalidMessagesArray = 0;
    1541             : 
    1542        7124 :     if (nmsgs > 0)
    1543             :     {
    1544             :         /* prepare record */
    1545        5934 :         memset(&xlrec, 0, MinSizeOfXactInvals);
    1546        5934 :         xlrec.nmsgs = nmsgs;
    1547             : 
    1548             :         /* perform insertion */
    1549        5934 :         XLogBeginInsert();
    1550        5934 :         XLogRegisterData((char *) (&xlrec), MinSizeOfXactInvals);
    1551        5934 :         XLogRegisterData((char *) invalMessages,
    1552             :                          nmsgs * sizeof(SharedInvalidationMessage));
    1553        5934 :         XLogInsert(RM_XACT_ID, XLOG_XACT_INVALIDATIONS);
    1554             : 
    1555        5934 :         pfree(invalMessages);
    1556             :     }
    1557             : }

Generated by: LCOV version 1.13