Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * inval.c
4 : * POSTGRES cache invalidation dispatcher code.
5 : *
6 : * This is subtle stuff, so pay attention:
7 : *
8 : * When a tuple is updated or deleted, our standard visibility rules
9 : * consider that it is *still valid* so long as we are in the same command,
10 : * ie, until the next CommandCounterIncrement() or transaction commit.
11 : * (See access/heap/heapam_visibility.c, and note that system catalogs are
12 : * generally scanned under the most current snapshot available, rather than
13 : * the transaction snapshot.) At the command boundary, the old tuple stops
14 : * being valid and the new version, if any, becomes valid. Therefore,
15 : * we cannot simply flush a tuple from the system caches during heap_update()
16 : * or heap_delete(). The tuple is still good at that point; what's more,
17 : * even if we did flush it, it might be reloaded into the caches by a later
18 : * request in the same command. So the correct behavior is to keep a list
19 : * of outdated (updated/deleted) tuples and then do the required cache
20 : * flushes at the next command boundary. We must also keep track of
21 : * inserted tuples so that we can flush "negative" cache entries that match
22 : * the new tuples; again, that mustn't happen until end of command.
23 : *
24 : * Once we have finished the command, we still need to remember inserted
25 : * tuples (including new versions of updated tuples), so that we can flush
26 : * them from the caches if we abort the transaction. Similarly, we'd better
27 : * be able to flush "negative" cache entries that may have been loaded in
28 : * place of deleted tuples, so we still need the deleted ones too.
29 : *
30 : * If we successfully complete the transaction, we have to broadcast all
31 : * these invalidation events to other backends (via the SI message queue)
32 : * so that they can flush obsolete entries from their caches. Note we have
33 : * to record the transaction commit before sending SI messages, otherwise
34 : * the other backends won't see our updated tuples as good.
35 : *
36 : * When a subtransaction aborts, we can process and discard any events
37 : * it has queued. When a subtransaction commits, we just add its events
38 : * to the pending lists of the parent transaction.
39 : *
40 : * In short, we need to remember until xact end every insert or delete
41 : * of a tuple that might be in the system caches. Updates are treated as
42 : * two events, delete + insert, for simplicity. (If the update doesn't
43 : * change the tuple hash value, catcache.c optimizes this into one event.)
44 : *
45 : * We do not need to register EVERY tuple operation in this way, just those
46 : * on tuples in relations that have associated catcaches. We do, however,
47 : * have to register every operation on every tuple that *could* be in a
48 : * catcache, whether or not it currently is in our cache. Also, if the
49 : * tuple is in a relation that has multiple catcaches, we need to register
50 : * an invalidation message for each such catcache. catcache.c's
51 : * PrepareToInvalidateCacheTuple() routine provides the knowledge of which
52 : * catcaches may need invalidation for a given tuple.
53 : *
54 : * Also, whenever we see an operation on a pg_class, pg_attribute, or
55 : * pg_index tuple, we register a relcache flush operation for the relation
56 : * described by that tuple (as specified in CacheInvalidateHeapTuple()).
57 : * Likewise for pg_constraint tuples for foreign keys on relations.
58 : *
59 : * We keep the relcache flush requests in lists separate from the catcache
60 : * tuple flush requests. This allows us to issue all the pending catcache
61 : * flushes before we issue relcache flushes, which saves us from loading
62 : * a catcache tuple during relcache load only to flush it again right away.
63 : * Also, we avoid queuing multiple relcache flush requests for the same
64 : * relation, since a relcache flush is relatively expensive to do.
65 : * (XXX is it worth testing likewise for duplicate catcache flush entries?
66 : * Probably not.)
67 : *
68 : * Many subsystems own higher-level caches that depend on relcache and/or
69 : * catcache, and they register callbacks here to invalidate their caches.
70 : * While building a higher-level cache entry, a backend may receive a
71 : * callback for the being-built entry or one of its dependencies. This
72 : * implies the new higher-level entry would be born stale, and it might
73 : * remain stale for the life of the backend. Many caches do not prevent
74 : * that. They rely on DDL for can't-miss catalog changes taking
75 : * AccessExclusiveLock on suitable objects. (For a change made with less
76 : * locking, backends might never read the change.) The relation cache,
77 : * however, needs to reflect changes from CREATE INDEX CONCURRENTLY no later
78 : * than the beginning of the next transaction. Hence, when a relevant
79 : * invalidation callback arrives during a build, relcache.c reattempts that
80 : * build. Caches with similar needs could do likewise.
81 : *
82 : * If a relcache flush is issued for a system relation that we preload
83 : * from the relcache init file, we must also delete the init file so that
84 : * it will be rebuilt during the next backend restart. The actual work of
85 : * manipulating the init file is in relcache.c, but we keep track of the
86 : * need for it here.
87 : *
88 : * Currently, inval messages are sent without regard for the possibility
89 : * that the object described by the catalog tuple might be a session-local
90 : * object such as a temporary table. This is because (1) this code has
91 : * no practical way to tell the difference, and (2) it is not certain that
92 : * other backends don't have catalog cache or even relcache entries for
93 : * such tables, anyway; there is nothing that prevents that. It might be
94 : * worth trying to avoid sending such inval traffic in the future, if those
95 : * problems can be overcome cheaply.
96 : *
97 : * When making a nontransactional change to a cacheable object, we must
98 : * likewise send the invalidation immediately, before ending the change's
99 : * critical section. This includes inplace heap updates, relmap, and smgr.
100 : *
101 : * When wal_level=logical, write invalidations into WAL at each command end to
102 : * support the decoding of the in-progress transactions. See
103 : * CommandEndInvalidationMessages.
104 : *
105 : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
106 : * Portions Copyright (c) 1994, Regents of the University of California
107 : *
108 : * IDENTIFICATION
109 : * src/backend/utils/cache/inval.c
110 : *
111 : *-------------------------------------------------------------------------
112 : */
113 : #include "postgres.h"
114 :
115 : #include <limits.h>
116 :
117 : #include "access/htup_details.h"
118 : #include "access/xact.h"
119 : #include "access/xloginsert.h"
120 : #include "catalog/catalog.h"
121 : #include "catalog/pg_constraint.h"
122 : #include "miscadmin.h"
123 : #include "storage/procnumber.h"
124 : #include "storage/sinval.h"
125 : #include "storage/smgr.h"
126 : #include "utils/catcache.h"
127 : #include "utils/injection_point.h"
128 : #include "utils/inval.h"
129 : #include "utils/memdebug.h"
130 : #include "utils/memutils.h"
131 : #include "utils/rel.h"
132 : #include "utils/relmapper.h"
133 : #include "utils/snapmgr.h"
134 : #include "utils/syscache.h"
135 :
136 :
137 : /*
138 : * Pending requests are stored as ready-to-send SharedInvalidationMessages.
139 : * We keep the messages themselves in arrays in TopTransactionContext (there
140 : * are separate arrays for catcache and relcache messages). For transactional
141 : * messages, control information is kept in a chain of TransInvalidationInfo
142 : * structs, also allocated in TopTransactionContext. (We could keep a
143 : * subtransaction's TransInvalidationInfo in its CurTransactionContext; but
144 : * that's more wasteful not less so, since in very many scenarios it'd be the
145 : * only allocation in the subtransaction's CurTransactionContext.) For
146 : * inplace update messages, control information appears in an
147 : * InvalidationInfo, allocated in CurrentMemoryContext.
148 : *
149 : * We can store the message arrays densely, and yet avoid moving data around
150 : * within an array, because within any one subtransaction we need only
151 : * distinguish between messages emitted by prior commands and those emitted
152 : * by the current command. Once a command completes and we've done local
153 : * processing on its messages, we can fold those into the prior-commands
154 : * messages just by changing array indexes in the TransInvalidationInfo
155 : * struct. Similarly, we need distinguish messages of prior subtransactions
156 : * from those of the current subtransaction only until the subtransaction
157 : * completes, after which we adjust the array indexes in the parent's
158 : * TransInvalidationInfo to include the subtransaction's messages. Inplace
159 : * invalidations don't need a concept of command or subtransaction boundaries,
160 : * since we send them during the WAL insertion critical section.
161 : *
162 : * The ordering of the individual messages within a command's or
163 : * subtransaction's output is not considered significant, although this
164 : * implementation happens to preserve the order in which they were queued.
165 : * (Previous versions of this code did not preserve it.)
166 : *
167 : * For notational convenience, control information is kept in two-element
168 : * arrays, the first for catcache messages and the second for relcache
169 : * messages.
170 : */
171 : #define CatCacheMsgs 0
172 : #define RelCacheMsgs 1
173 :
174 : /* Pointers to main arrays in TopTransactionContext */
175 : typedef struct InvalMessageArray
176 : {
177 : SharedInvalidationMessage *msgs; /* palloc'd array (can be expanded) */
178 : int maxmsgs; /* current allocated size of array */
179 : } InvalMessageArray;
180 :
181 : static InvalMessageArray InvalMessageArrays[2];
182 :
183 : /* Control information for one logical group of messages */
184 : typedef struct InvalidationMsgsGroup
185 : {
186 : int firstmsg[2]; /* first index in relevant array */
187 : int nextmsg[2]; /* last+1 index */
188 : } InvalidationMsgsGroup;
189 :
190 : /* Macros to help preserve InvalidationMsgsGroup abstraction */
191 : #define SetSubGroupToFollow(targetgroup, priorgroup, subgroup) \
192 : do { \
193 : (targetgroup)->firstmsg[subgroup] = \
194 : (targetgroup)->nextmsg[subgroup] = \
195 : (priorgroup)->nextmsg[subgroup]; \
196 : } while (0)
197 :
198 : #define SetGroupToFollow(targetgroup, priorgroup) \
199 : do { \
200 : SetSubGroupToFollow(targetgroup, priorgroup, CatCacheMsgs); \
201 : SetSubGroupToFollow(targetgroup, priorgroup, RelCacheMsgs); \
202 : } while (0)
203 :
204 : #define NumMessagesInSubGroup(group, subgroup) \
205 : ((group)->nextmsg[subgroup] - (group)->firstmsg[subgroup])
206 :
207 : #define NumMessagesInGroup(group) \
208 : (NumMessagesInSubGroup(group, CatCacheMsgs) + \
209 : NumMessagesInSubGroup(group, RelCacheMsgs))
210 :
211 :
212 : /*----------------
213 : * Transactional invalidation messages are divided into two groups:
214 : * 1) events so far in current command, not yet reflected to caches.
215 : * 2) events in previous commands of current transaction; these have
216 : * been reflected to local caches, and must be either broadcast to
217 : * other backends or rolled back from local cache when we commit
218 : * or abort the transaction.
219 : * Actually, we need such groups for each level of nested transaction,
220 : * so that we can discard events from an aborted subtransaction. When
221 : * a subtransaction commits, we append its events to the parent's groups.
222 : *
223 : * The relcache-file-invalidated flag can just be a simple boolean,
224 : * since we only act on it at transaction commit; we don't care which
225 : * command of the transaction set it.
226 : *----------------
227 : */
228 :
229 : /* fields common to both transactional and inplace invalidation */
230 : typedef struct InvalidationInfo
231 : {
232 : /* Events emitted by current command */
233 : InvalidationMsgsGroup CurrentCmdInvalidMsgs;
234 :
235 : /* init file must be invalidated? */
236 : bool RelcacheInitFileInval;
237 : } InvalidationInfo;
238 :
239 : /* subclass adding fields specific to transactional invalidation */
240 : typedef struct TransInvalidationInfo
241 : {
242 : /* Base class */
243 : struct InvalidationInfo ii;
244 :
245 : /* Events emitted by previous commands of this (sub)transaction */
246 : InvalidationMsgsGroup PriorCmdInvalidMsgs;
247 :
248 : /* Back link to parent transaction's info */
249 : struct TransInvalidationInfo *parent;
250 :
251 : /* Subtransaction nesting depth */
252 : int my_level;
253 : } TransInvalidationInfo;
254 :
255 : static TransInvalidationInfo *transInvalInfo = NULL;
256 :
257 : static InvalidationInfo *inplaceInvalInfo = NULL;
258 :
259 : /* GUC storage */
260 : int debug_discard_caches = 0;
261 :
262 : /*
263 : * Dynamically-registered callback functions. Current implementation
264 : * assumes there won't be enough of these to justify a dynamically resizable
265 : * array; it'd be easy to improve that if needed.
266 : *
267 : * To avoid searching in CallSyscacheCallbacks, all callbacks for a given
268 : * syscache are linked into a list pointed to by syscache_callback_links[id].
269 : * The link values are syscache_callback_list[] index plus 1, or 0 for none.
270 : */
271 :
272 : #define MAX_SYSCACHE_CALLBACKS 64
273 : #define MAX_RELCACHE_CALLBACKS 10
274 : #define MAX_RELSYNC_CALLBACKS 10
275 :
276 : static struct SYSCACHECALLBACK
277 : {
278 : int16 id; /* cache number */
279 : int16 link; /* next callback index+1 for same cache */
280 : SyscacheCallbackFunction function;
281 : Datum arg;
282 : } syscache_callback_list[MAX_SYSCACHE_CALLBACKS];
283 :
284 : static int16 syscache_callback_links[SysCacheSize];
285 :
286 : static int syscache_callback_count = 0;
287 :
288 : static struct RELCACHECALLBACK
289 : {
290 : RelcacheCallbackFunction function;
291 : Datum arg;
292 : } relcache_callback_list[MAX_RELCACHE_CALLBACKS];
293 :
294 : static int relcache_callback_count = 0;
295 :
296 : static struct RELSYNCCALLBACK
297 : {
298 : RelSyncCallbackFunction function;
299 : Datum arg;
300 : } relsync_callback_list[MAX_RELSYNC_CALLBACKS];
301 :
302 : static int relsync_callback_count = 0;
303 :
304 :
305 : /* ----------------------------------------------------------------
306 : * Invalidation subgroup support functions
307 : * ----------------------------------------------------------------
308 : */
309 :
310 : /*
311 : * AddInvalidationMessage
312 : * Add an invalidation message to a (sub)group.
313 : *
314 : * The group must be the last active one, since we assume we can add to the
315 : * end of the relevant InvalMessageArray.
316 : *
317 : * subgroup must be CatCacheMsgs or RelCacheMsgs.
318 : */
319 : static void
320 7185630 : AddInvalidationMessage(InvalidationMsgsGroup *group, int subgroup,
321 : const SharedInvalidationMessage *msg)
322 : {
323 7185630 : InvalMessageArray *ima = &InvalMessageArrays[subgroup];
324 7185630 : int nextindex = group->nextmsg[subgroup];
325 :
326 7185630 : if (nextindex >= ima->maxmsgs)
327 : {
328 801688 : if (ima->msgs == NULL)
329 : {
330 : /* Create new storage array in TopTransactionContext */
331 744052 : int reqsize = 32; /* arbitrary */
332 :
333 744052 : ima->msgs = (SharedInvalidationMessage *)
334 744052 : MemoryContextAlloc(TopTransactionContext,
335 : reqsize * sizeof(SharedInvalidationMessage));
336 744052 : ima->maxmsgs = reqsize;
337 : Assert(nextindex == 0);
338 : }
339 : else
340 : {
341 : /* Enlarge storage array */
342 57636 : int reqsize = 2 * ima->maxmsgs;
343 :
344 57636 : ima->msgs = (SharedInvalidationMessage *)
345 57636 : repalloc(ima->msgs,
346 : reqsize * sizeof(SharedInvalidationMessage));
347 57636 : ima->maxmsgs = reqsize;
348 : }
349 : }
350 : /* Okay, add message to current group */
351 7185630 : ima->msgs[nextindex] = *msg;
352 7185630 : group->nextmsg[subgroup]++;
353 7185630 : }
354 :
355 : /*
356 : * Append one subgroup of invalidation messages to another, resetting
357 : * the source subgroup to empty.
358 : */
359 : static void
360 2063612 : AppendInvalidationMessageSubGroup(InvalidationMsgsGroup *dest,
361 : InvalidationMsgsGroup *src,
362 : int subgroup)
363 : {
364 : /* Messages must be adjacent in main array */
365 : Assert(dest->nextmsg[subgroup] == src->firstmsg[subgroup]);
366 :
367 : /* ... which makes this easy: */
368 2063612 : dest->nextmsg[subgroup] = src->nextmsg[subgroup];
369 :
370 : /*
371 : * This is handy for some callers and irrelevant for others. But we do it
372 : * always, reasoning that it's bad to leave different groups pointing at
373 : * the same fragment of the message array.
374 : */
375 2063612 : SetSubGroupToFollow(src, dest, subgroup);
376 2063612 : }
377 :
378 : /*
379 : * Process a subgroup of invalidation messages.
380 : *
381 : * This is a macro that executes the given code fragment for each message in
382 : * a message subgroup. The fragment should refer to the message as *msg.
383 : */
384 : #define ProcessMessageSubGroup(group, subgroup, codeFragment) \
385 : do { \
386 : int _msgindex = (group)->firstmsg[subgroup]; \
387 : int _endmsg = (group)->nextmsg[subgroup]; \
388 : for (; _msgindex < _endmsg; _msgindex++) \
389 : { \
390 : SharedInvalidationMessage *msg = \
391 : &InvalMessageArrays[subgroup].msgs[_msgindex]; \
392 : codeFragment; \
393 : } \
394 : } while (0)
395 :
396 : /*
397 : * Process a subgroup of invalidation messages as an array.
398 : *
399 : * As above, but the code fragment can handle an array of messages.
400 : * The fragment should refer to the messages as msgs[], with n entries.
401 : */
402 : #define ProcessMessageSubGroupMulti(group, subgroup, codeFragment) \
403 : do { \
404 : int n = NumMessagesInSubGroup(group, subgroup); \
405 : if (n > 0) { \
406 : SharedInvalidationMessage *msgs = \
407 : &InvalMessageArrays[subgroup].msgs[(group)->firstmsg[subgroup]]; \
408 : codeFragment; \
409 : } \
410 : } while (0)
411 :
412 :
413 : /* ----------------------------------------------------------------
414 : * Invalidation group support functions
415 : *
416 : * These routines understand about the division of a logical invalidation
417 : * group into separate physical arrays for catcache and relcache entries.
418 : * ----------------------------------------------------------------
419 : */
420 :
421 : /*
422 : * Add a catcache inval entry
423 : */
424 : static void
425 5727216 : AddCatcacheInvalidationMessage(InvalidationMsgsGroup *group,
426 : int id, uint32 hashValue, Oid dbId)
427 : {
428 : SharedInvalidationMessage msg;
429 :
430 : Assert(id < CHAR_MAX);
431 5727216 : msg.cc.id = (int8) id;
432 5727216 : msg.cc.dbId = dbId;
433 5727216 : msg.cc.hashValue = hashValue;
434 :
435 : /*
436 : * Define padding bytes in SharedInvalidationMessage structs to be
437 : * defined. Otherwise the sinvaladt.c ringbuffer, which is accessed by
438 : * multiple processes, will cause spurious valgrind warnings about
439 : * undefined memory being used. That's because valgrind remembers the
440 : * undefined bytes from the last local process's store, not realizing that
441 : * another process has written since, filling the previously uninitialized
442 : * bytes
443 : */
444 : VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
445 :
446 5727216 : AddInvalidationMessage(group, CatCacheMsgs, &msg);
447 5727216 : }
448 :
449 : /*
450 : * Add a whole-catalog inval entry
451 : */
452 : static void
453 222 : AddCatalogInvalidationMessage(InvalidationMsgsGroup *group,
454 : Oid dbId, Oid catId)
455 : {
456 : SharedInvalidationMessage msg;
457 :
458 222 : msg.cat.id = SHAREDINVALCATALOG_ID;
459 222 : msg.cat.dbId = dbId;
460 222 : msg.cat.catId = catId;
461 : /* check AddCatcacheInvalidationMessage() for an explanation */
462 : VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
463 :
464 222 : AddInvalidationMessage(group, CatCacheMsgs, &msg);
465 222 : }
466 :
467 : /*
468 : * Add a relcache inval entry
469 : */
470 : static void
471 2127276 : AddRelcacheInvalidationMessage(InvalidationMsgsGroup *group,
472 : Oid dbId, Oid relId)
473 : {
474 : SharedInvalidationMessage msg;
475 :
476 : /*
477 : * Don't add a duplicate item. We assume dbId need not be checked because
478 : * it will never change. InvalidOid for relId means all relations so we
479 : * don't need to add individual ones when it is present.
480 : */
481 6471962 : ProcessMessageSubGroup(group, RelCacheMsgs,
482 : if (msg->rc.id == SHAREDINVALRELCACHE_ID &&
483 : (msg->rc.relId == relId ||
484 : msg->rc.relId == InvalidOid))
485 : return);
486 :
487 : /* OK, add the item */
488 926626 : msg.rc.id = SHAREDINVALRELCACHE_ID;
489 926626 : msg.rc.dbId = dbId;
490 926626 : msg.rc.relId = relId;
491 : /* check AddCatcacheInvalidationMessage() for an explanation */
492 : VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
493 :
494 926626 : AddInvalidationMessage(group, RelCacheMsgs, &msg);
495 : }
496 :
497 : /*
498 : * Add a relsync inval entry
499 : *
500 : * We put these into the relcache subgroup for simplicity. This message is the
501 : * same as AddRelcacheInvalidationMessage() except that it is for
502 : * RelationSyncCache maintained by decoding plugin pgoutput.
503 : */
504 : static void
505 12 : AddRelsyncInvalidationMessage(InvalidationMsgsGroup *group,
506 : Oid dbId, Oid relId)
507 : {
508 : SharedInvalidationMessage msg;
509 :
510 : /* Don't add a duplicate item. */
511 12 : ProcessMessageSubGroup(group, RelCacheMsgs,
512 : if (msg->rc.id == SHAREDINVALRELSYNC_ID &&
513 : (msg->rc.relId == relId ||
514 : msg->rc.relId == InvalidOid))
515 : return);
516 :
517 : /* OK, add the item */
518 12 : msg.rc.id = SHAREDINVALRELSYNC_ID;
519 12 : msg.rc.dbId = dbId;
520 12 : msg.rc.relId = relId;
521 : /* check AddCatcacheInvalidationMessage() for an explanation */
522 : VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
523 :
524 12 : AddInvalidationMessage(group, RelCacheMsgs, &msg);
525 : }
526 :
527 : /*
528 : * Add a snapshot inval entry
529 : *
530 : * We put these into the relcache subgroup for simplicity.
531 : */
532 : static void
533 1057930 : AddSnapshotInvalidationMessage(InvalidationMsgsGroup *group,
534 : Oid dbId, Oid relId)
535 : {
536 : SharedInvalidationMessage msg;
537 :
538 : /* Don't add a duplicate item */
539 : /* We assume dbId need not be checked because it will never change */
540 1533460 : ProcessMessageSubGroup(group, RelCacheMsgs,
541 : if (msg->sn.id == SHAREDINVALSNAPSHOT_ID &&
542 : msg->sn.relId == relId)
543 : return);
544 :
545 : /* OK, add the item */
546 531554 : msg.sn.id = SHAREDINVALSNAPSHOT_ID;
547 531554 : msg.sn.dbId = dbId;
548 531554 : msg.sn.relId = relId;
549 : /* check AddCatcacheInvalidationMessage() for an explanation */
550 : VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
551 :
552 531554 : AddInvalidationMessage(group, RelCacheMsgs, &msg);
553 : }
554 :
555 : /*
556 : * Append one group of invalidation messages to another, resetting
557 : * the source group to empty.
558 : */
559 : static void
560 1031806 : AppendInvalidationMessages(InvalidationMsgsGroup *dest,
561 : InvalidationMsgsGroup *src)
562 : {
563 1031806 : AppendInvalidationMessageSubGroup(dest, src, CatCacheMsgs);
564 1031806 : AppendInvalidationMessageSubGroup(dest, src, RelCacheMsgs);
565 1031806 : }
566 :
567 : /*
568 : * Execute the given function for all the messages in an invalidation group.
569 : * The group is not altered.
570 : *
571 : * catcache entries are processed first, for reasons mentioned above.
572 : */
573 : static void
574 781218 : ProcessInvalidationMessages(InvalidationMsgsGroup *group,
575 : void (*func) (SharedInvalidationMessage *msg))
576 : {
577 5957434 : ProcessMessageSubGroup(group, CatCacheMsgs, func(msg));
578 1939092 : ProcessMessageSubGroup(group, RelCacheMsgs, func(msg));
579 781212 : }
580 :
581 : /*
582 : * As above, but the function is able to process an array of messages
583 : * rather than just one at a time.
584 : */
585 : static void
586 389590 : ProcessInvalidationMessagesMulti(InvalidationMsgsGroup *group,
587 : void (*func) (const SharedInvalidationMessage *msgs, int n))
588 : {
589 389590 : ProcessMessageSubGroupMulti(group, CatCacheMsgs, func(msgs, n));
590 389590 : ProcessMessageSubGroupMulti(group, RelCacheMsgs, func(msgs, n));
591 389590 : }
592 :
593 : /* ----------------------------------------------------------------
594 : * private support functions
595 : * ----------------------------------------------------------------
596 : */
597 :
598 : /*
599 : * RegisterCatcacheInvalidation
600 : *
601 : * Register an invalidation event for a catcache tuple entry.
602 : */
603 : static void
604 5727216 : RegisterCatcacheInvalidation(int cacheId,
605 : uint32 hashValue,
606 : Oid dbId,
607 : void *context)
608 : {
609 5727216 : InvalidationInfo *info = (InvalidationInfo *) context;
610 :
611 5727216 : AddCatcacheInvalidationMessage(&info->CurrentCmdInvalidMsgs,
612 : cacheId, hashValue, dbId);
613 5727216 : }
614 :
615 : /*
616 : * RegisterCatalogInvalidation
617 : *
618 : * Register an invalidation event for all catcache entries from a catalog.
619 : */
620 : static void
621 222 : RegisterCatalogInvalidation(InvalidationInfo *info, Oid dbId, Oid catId)
622 : {
623 222 : AddCatalogInvalidationMessage(&info->CurrentCmdInvalidMsgs, dbId, catId);
624 222 : }
625 :
626 : /*
627 : * RegisterRelcacheInvalidation
628 : *
629 : * As above, but register a relcache invalidation event.
630 : */
631 : static void
632 2127276 : RegisterRelcacheInvalidation(InvalidationInfo *info, Oid dbId, Oid relId)
633 : {
634 2127276 : AddRelcacheInvalidationMessage(&info->CurrentCmdInvalidMsgs, dbId, relId);
635 :
636 : /*
637 : * Most of the time, relcache invalidation is associated with system
638 : * catalog updates, but there are a few cases where it isn't. Quick hack
639 : * to ensure that the next CommandCounterIncrement() will think that we
640 : * need to do CommandEndInvalidationMessages().
641 : */
642 2127276 : (void) GetCurrentCommandId(true);
643 :
644 : /*
645 : * If the relation being invalidated is one of those cached in a relcache
646 : * init file, mark that we need to zap that file at commit. For simplicity
647 : * invalidations for a specific database always invalidate the shared file
648 : * as well. Also zap when we are invalidating whole relcache.
649 : */
650 2127276 : if (relId == InvalidOid || RelationIdIsInInitFile(relId))
651 170208 : info->RelcacheInitFileInval = true;
652 2127276 : }
653 :
654 : /*
655 : * RegisterRelsyncInvalidation
656 : *
657 : * As above, but register a relsynccache invalidation event.
658 : */
659 : static void
660 12 : RegisterRelsyncInvalidation(InvalidationInfo *info, Oid dbId, Oid relId)
661 : {
662 12 : AddRelsyncInvalidationMessage(&info->CurrentCmdInvalidMsgs, dbId, relId);
663 12 : }
664 :
665 : /*
666 : * RegisterSnapshotInvalidation
667 : *
668 : * Register an invalidation event for MVCC scans against a given catalog.
669 : * Only needed for catalogs that don't have catcaches.
670 : */
671 : static void
672 1057930 : RegisterSnapshotInvalidation(InvalidationInfo *info, Oid dbId, Oid relId)
673 : {
674 1057930 : AddSnapshotInvalidationMessage(&info->CurrentCmdInvalidMsgs, dbId, relId);
675 1057930 : }
676 :
677 : /*
678 : * PrepareInvalidationState
679 : * Initialize inval data for the current (sub)transaction.
680 : */
681 : static InvalidationInfo *
682 4177334 : PrepareInvalidationState(void)
683 : {
684 : TransInvalidationInfo *myInfo;
685 :
686 : /* PrepareToInvalidateCacheTuple() needs relcache */
687 4177334 : AssertCouldGetRelation();
688 : /* Can't queue transactional message while collecting inplace messages. */
689 : Assert(inplaceInvalInfo == NULL);
690 :
691 8094576 : if (transInvalInfo != NULL &&
692 3917242 : transInvalInfo->my_level == GetCurrentTransactionNestLevel())
693 3917100 : return (InvalidationInfo *) transInvalInfo;
694 :
695 : myInfo = (TransInvalidationInfo *)
696 260234 : MemoryContextAllocZero(TopTransactionContext,
697 : sizeof(TransInvalidationInfo));
698 260234 : myInfo->parent = transInvalInfo;
699 260234 : myInfo->my_level = GetCurrentTransactionNestLevel();
700 :
701 : /* Now, do we have a previous stack entry? */
702 260234 : if (transInvalInfo != NULL)
703 : {
704 : /* Yes; this one should be for a deeper nesting level. */
705 : Assert(myInfo->my_level > transInvalInfo->my_level);
706 :
707 : /*
708 : * The parent (sub)transaction must not have any current (i.e.,
709 : * not-yet-locally-processed) messages. If it did, we'd have a
710 : * semantic problem: the new subtransaction presumably ought not be
711 : * able to see those events yet, but since the CommandCounter is
712 : * linear, that can't work once the subtransaction advances the
713 : * counter. This is a convenient place to check for that, as well as
714 : * being important to keep management of the message arrays simple.
715 : */
716 142 : if (NumMessagesInGroup(&transInvalInfo->ii.CurrentCmdInvalidMsgs) != 0)
717 0 : elog(ERROR, "cannot start a subtransaction when there are unprocessed inval messages");
718 :
719 : /*
720 : * MemoryContextAllocZero set firstmsg = nextmsg = 0 in each group,
721 : * which is fine for the first (sub)transaction, but otherwise we need
722 : * to update them to follow whatever is already in the arrays.
723 : */
724 142 : SetGroupToFollow(&myInfo->PriorCmdInvalidMsgs,
725 : &transInvalInfo->ii.CurrentCmdInvalidMsgs);
726 142 : SetGroupToFollow(&myInfo->ii.CurrentCmdInvalidMsgs,
727 : &myInfo->PriorCmdInvalidMsgs);
728 : }
729 : else
730 : {
731 : /*
732 : * Here, we need only clear any array pointers left over from a prior
733 : * transaction.
734 : */
735 260092 : InvalMessageArrays[CatCacheMsgs].msgs = NULL;
736 260092 : InvalMessageArrays[CatCacheMsgs].maxmsgs = 0;
737 260092 : InvalMessageArrays[RelCacheMsgs].msgs = NULL;
738 260092 : InvalMessageArrays[RelCacheMsgs].maxmsgs = 0;
739 : }
740 :
741 260234 : transInvalInfo = myInfo;
742 260234 : return (InvalidationInfo *) myInfo;
743 : }
744 :
745 : /*
746 : * PrepareInplaceInvalidationState
747 : * Initialize inval data for an inplace update.
748 : *
749 : * See previous function for more background.
750 : */
751 : static InvalidationInfo *
752 246982 : PrepareInplaceInvalidationState(void)
753 : {
754 : InvalidationInfo *myInfo;
755 :
756 246982 : AssertCouldGetRelation();
757 : /* limit of one inplace update under assembly */
758 : Assert(inplaceInvalInfo == NULL);
759 :
760 : /* gone after WAL insertion CritSection ends, so use current context */
761 246982 : myInfo = (InvalidationInfo *) palloc0(sizeof(InvalidationInfo));
762 :
763 : /* Stash our messages past end of the transactional messages, if any. */
764 246982 : if (transInvalInfo != NULL)
765 109686 : SetGroupToFollow(&myInfo->CurrentCmdInvalidMsgs,
766 : &transInvalInfo->ii.CurrentCmdInvalidMsgs);
767 : else
768 : {
769 137296 : InvalMessageArrays[CatCacheMsgs].msgs = NULL;
770 137296 : InvalMessageArrays[CatCacheMsgs].maxmsgs = 0;
771 137296 : InvalMessageArrays[RelCacheMsgs].msgs = NULL;
772 137296 : InvalMessageArrays[RelCacheMsgs].maxmsgs = 0;
773 : }
774 :
775 246982 : inplaceInvalInfo = myInfo;
776 246982 : return myInfo;
777 : }
778 :
779 : /* ----------------------------------------------------------------
780 : * public functions
781 : * ----------------------------------------------------------------
782 : */
783 :
784 : void
785 4134 : InvalidateSystemCachesExtended(bool debug_discard)
786 : {
787 : int i;
788 :
789 4134 : InvalidateCatalogSnapshot();
790 4134 : ResetCatalogCachesExt(debug_discard);
791 4134 : RelationCacheInvalidate(debug_discard); /* gets smgr and relmap too */
792 :
793 71092 : for (i = 0; i < syscache_callback_count; i++)
794 : {
795 66958 : struct SYSCACHECALLBACK *ccitem = syscache_callback_list + i;
796 :
797 66958 : ccitem->function(ccitem->arg, ccitem->id, 0);
798 : }
799 :
800 9454 : for (i = 0; i < relcache_callback_count; i++)
801 : {
802 5320 : struct RELCACHECALLBACK *ccitem = relcache_callback_list + i;
803 :
804 5320 : ccitem->function(ccitem->arg, InvalidOid);
805 : }
806 :
807 4170 : for (i = 0; i < relsync_callback_count; i++)
808 : {
809 36 : struct RELSYNCCALLBACK *ccitem = relsync_callback_list + i;
810 :
811 36 : ccitem->function(ccitem->arg, InvalidOid);
812 : }
813 4134 : }
814 :
815 : /*
816 : * LocalExecuteInvalidationMessage
817 : *
818 : * Process a single invalidation message (which could be of any type).
819 : * Only the local caches are flushed; this does not transmit the message
820 : * to other backends.
821 : */
822 : void
823 35667240 : LocalExecuteInvalidationMessage(SharedInvalidationMessage *msg)
824 : {
825 35667240 : if (msg->id >= 0)
826 : {
827 28542288 : if (msg->cc.dbId == MyDatabaseId || msg->cc.dbId == InvalidOid)
828 : {
829 20449100 : InvalidateCatalogSnapshot();
830 :
831 20449100 : SysCacheInvalidate(msg->cc.id, msg->cc.hashValue);
832 :
833 20449100 : CallSyscacheCallbacks(msg->cc.id, msg->cc.hashValue);
834 : }
835 : }
836 7124952 : else if (msg->id == SHAREDINVALCATALOG_ID)
837 : {
838 922 : if (msg->cat.dbId == MyDatabaseId || msg->cat.dbId == InvalidOid)
839 : {
840 772 : InvalidateCatalogSnapshot();
841 :
842 772 : CatalogCacheFlushCatalog(msg->cat.catId);
843 :
844 : /* CatalogCacheFlushCatalog calls CallSyscacheCallbacks as needed */
845 : }
846 : }
847 7124030 : else if (msg->id == SHAREDINVALRELCACHE_ID)
848 : {
849 3910838 : if (msg->rc.dbId == MyDatabaseId || msg->rc.dbId == InvalidOid)
850 : {
851 : int i;
852 :
853 2791014 : if (msg->rc.relId == InvalidOid)
854 448 : RelationCacheInvalidate(false);
855 : else
856 2790566 : RelationCacheInvalidateEntry(msg->rc.relId);
857 :
858 7591808 : for (i = 0; i < relcache_callback_count; i++)
859 : {
860 4800800 : struct RELCACHECALLBACK *ccitem = relcache_callback_list + i;
861 :
862 4800800 : ccitem->function(ccitem->arg, msg->rc.relId);
863 : }
864 : }
865 : }
866 3213192 : else if (msg->id == SHAREDINVALSMGR_ID)
867 : {
868 : /*
869 : * We could have smgr entries for relations of other databases, so no
870 : * short-circuit test is possible here.
871 : */
872 : RelFileLocatorBackend rlocator;
873 :
874 430566 : rlocator.locator = msg->sm.rlocator;
875 430566 : rlocator.backend = (msg->sm.backend_hi << 16) | (int) msg->sm.backend_lo;
876 430566 : smgrreleaserellocator(rlocator);
877 : }
878 2782626 : else if (msg->id == SHAREDINVALRELMAP_ID)
879 : {
880 : /* We only care about our own database and shared catalogs */
881 680 : if (msg->rm.dbId == InvalidOid)
882 268 : RelationMapInvalidate(true);
883 412 : else if (msg->rm.dbId == MyDatabaseId)
884 274 : RelationMapInvalidate(false);
885 : }
886 2781946 : else if (msg->id == SHAREDINVALSNAPSHOT_ID)
887 : {
888 : /* We only care about our own database and shared catalogs */
889 2781884 : if (msg->sn.dbId == InvalidOid)
890 85658 : InvalidateCatalogSnapshot();
891 2696226 : else if (msg->sn.dbId == MyDatabaseId)
892 1999122 : InvalidateCatalogSnapshot();
893 : }
894 62 : else if (msg->id == SHAREDINVALRELSYNC_ID)
895 : {
896 : /* We only care about our own database */
897 62 : if (msg->rs.dbId == MyDatabaseId)
898 62 : CallRelSyncCallbacks(msg->rs.relid);
899 : }
900 : else
901 0 : elog(FATAL, "unrecognized SI message ID: %d", msg->id);
902 35667234 : }
903 :
904 : /*
905 : * InvalidateSystemCaches
906 : *
907 : * This blows away all tuples in the system catalog caches and
908 : * all the cached relation descriptors and smgr cache entries.
909 : * Relation descriptors that have positive refcounts are then rebuilt.
910 : *
911 : * We call this when we see a shared-inval-queue overflow signal,
912 : * since that tells us we've lost some shared-inval messages and hence
913 : * don't know what needs to be invalidated.
914 : */
915 : void
916 4134 : InvalidateSystemCaches(void)
917 : {
918 4134 : InvalidateSystemCachesExtended(false);
919 4134 : }
920 :
921 : /*
922 : * AcceptInvalidationMessages
923 : * Read and process invalidation messages from the shared invalidation
924 : * message queue.
925 : *
926 : * Note:
927 : * This should be called as the first step in processing a transaction.
928 : */
929 : void
930 36585830 : AcceptInvalidationMessages(void)
931 : {
932 : #ifdef USE_ASSERT_CHECKING
933 : /* message handlers shall access catalogs only during transactions */
934 : if (IsTransactionState())
935 : AssertCouldGetRelation();
936 : #endif
937 :
938 36585830 : ReceiveSharedInvalidMessages(LocalExecuteInvalidationMessage,
939 : InvalidateSystemCaches);
940 :
941 : /*----------
942 : * Test code to force cache flushes anytime a flush could happen.
943 : *
944 : * This helps detect intermittent faults caused by code that reads a cache
945 : * entry and then performs an action that could invalidate the entry, but
946 : * rarely actually does so. This can spot issues that would otherwise
947 : * only arise with badly timed concurrent DDL, for example.
948 : *
949 : * The default debug_discard_caches = 0 does no forced cache flushes.
950 : *
951 : * If used with CLOBBER_FREED_MEMORY,
952 : * debug_discard_caches = 1 (formerly known as CLOBBER_CACHE_ALWAYS)
953 : * provides a fairly thorough test that the system contains no cache-flush
954 : * hazards. However, it also makes the system unbelievably slow --- the
955 : * regression tests take about 100 times longer than normal.
956 : *
957 : * If you're a glutton for punishment, try
958 : * debug_discard_caches = 3 (formerly known as CLOBBER_CACHE_RECURSIVELY).
959 : * This slows things by at least a factor of 10000, so I wouldn't suggest
960 : * trying to run the entire regression tests that way. It's useful to try
961 : * a few simple tests, to make sure that cache reload isn't subject to
962 : * internal cache-flush hazards, but after you've done a few thousand
963 : * recursive reloads it's unlikely you'll learn more.
964 : *----------
965 : */
966 : #ifdef DISCARD_CACHES_ENABLED
967 : {
968 : static int recursion_depth = 0;
969 :
970 : if (recursion_depth < debug_discard_caches)
971 : {
972 : recursion_depth++;
973 : InvalidateSystemCachesExtended(true);
974 : recursion_depth--;
975 : }
976 : }
977 : #endif
978 36585830 : }
979 :
980 : /*
981 : * PostPrepare_Inval
982 : * Clean up after successful PREPARE.
983 : *
984 : * Here, we want to act as though the transaction aborted, so that we will
985 : * undo any syscache changes it made, thereby bringing us into sync with the
986 : * outside world, which doesn't believe the transaction committed yet.
987 : *
988 : * If the prepared transaction is later aborted, there is nothing more to
989 : * do; if it commits, we will receive the consequent inval messages just
990 : * like everyone else.
991 : */
992 : void
993 594 : PostPrepare_Inval(void)
994 : {
995 594 : AtEOXact_Inval(false);
996 594 : }
997 :
998 : /*
999 : * xactGetCommittedInvalidationMessages() is called by
1000 : * RecordTransactionCommit() to collect invalidation messages to add to the
1001 : * commit record. This applies only to commit message types, never to
1002 : * abort records. Must always run before AtEOXact_Inval(), since that
1003 : * removes the data we need to see.
1004 : *
1005 : * Remember that this runs before we have officially committed, so we
1006 : * must not do anything here to change what might occur *if* we should
1007 : * fail between here and the actual commit.
1008 : *
1009 : * see also xact_redo_commit() and xact_desc_commit()
1010 : */
1011 : int
1012 393588 : xactGetCommittedInvalidationMessages(SharedInvalidationMessage **msgs,
1013 : bool *RelcacheInitFileInval)
1014 : {
1015 : SharedInvalidationMessage *msgarray;
1016 : int nummsgs;
1017 : int nmsgs;
1018 :
1019 : /* Quick exit if we haven't done anything with invalidation messages. */
1020 393588 : if (transInvalInfo == NULL)
1021 : {
1022 230524 : *RelcacheInitFileInval = false;
1023 230524 : *msgs = NULL;
1024 230524 : return 0;
1025 : }
1026 :
1027 : /* Must be at top of stack */
1028 : Assert(transInvalInfo->my_level == 1 && transInvalInfo->parent == NULL);
1029 :
1030 : /*
1031 : * Relcache init file invalidation requires processing both before and
1032 : * after we send the SI messages. However, we need not do anything unless
1033 : * we committed.
1034 : */
1035 163064 : *RelcacheInitFileInval = transInvalInfo->ii.RelcacheInitFileInval;
1036 :
1037 : /*
1038 : * Collect all the pending messages into a single contiguous array of
1039 : * invalidation messages, to simplify what needs to happen while building
1040 : * the commit WAL message. Maintain the order that they would be
1041 : * processed in by AtEOXact_Inval(), to ensure emulated behaviour in redo
1042 : * is as similar as possible to original. We want the same bugs, if any,
1043 : * not new ones.
1044 : */
1045 163064 : nummsgs = NumMessagesInGroup(&transInvalInfo->PriorCmdInvalidMsgs) +
1046 163064 : NumMessagesInGroup(&transInvalInfo->ii.CurrentCmdInvalidMsgs);
1047 :
1048 163064 : *msgs = msgarray = (SharedInvalidationMessage *)
1049 163064 : MemoryContextAlloc(CurTransactionContext,
1050 : nummsgs * sizeof(SharedInvalidationMessage));
1051 :
1052 163064 : nmsgs = 0;
1053 163064 : ProcessMessageSubGroupMulti(&transInvalInfo->PriorCmdInvalidMsgs,
1054 : CatCacheMsgs,
1055 : (memcpy(msgarray + nmsgs,
1056 : msgs,
1057 : n * sizeof(SharedInvalidationMessage)),
1058 : nmsgs += n));
1059 163064 : ProcessMessageSubGroupMulti(&transInvalInfo->ii.CurrentCmdInvalidMsgs,
1060 : CatCacheMsgs,
1061 : (memcpy(msgarray + nmsgs,
1062 : msgs,
1063 : n * sizeof(SharedInvalidationMessage)),
1064 : nmsgs += n));
1065 163064 : ProcessMessageSubGroupMulti(&transInvalInfo->PriorCmdInvalidMsgs,
1066 : RelCacheMsgs,
1067 : (memcpy(msgarray + nmsgs,
1068 : msgs,
1069 : n * sizeof(SharedInvalidationMessage)),
1070 : nmsgs += n));
1071 163064 : ProcessMessageSubGroupMulti(&transInvalInfo->ii.CurrentCmdInvalidMsgs,
1072 : RelCacheMsgs,
1073 : (memcpy(msgarray + nmsgs,
1074 : msgs,
1075 : n * sizeof(SharedInvalidationMessage)),
1076 : nmsgs += n));
1077 : Assert(nmsgs == nummsgs);
1078 :
1079 163064 : return nmsgs;
1080 : }
1081 :
1082 : /*
1083 : * inplaceGetInvalidationMessages() is called by the inplace update to collect
1084 : * invalidation messages to add to its WAL record. Like the previous
1085 : * function, we might still fail.
1086 : */
1087 : int
1088 96894 : inplaceGetInvalidationMessages(SharedInvalidationMessage **msgs,
1089 : bool *RelcacheInitFileInval)
1090 : {
1091 : SharedInvalidationMessage *msgarray;
1092 : int nummsgs;
1093 : int nmsgs;
1094 :
1095 : /* Quick exit if we haven't done anything with invalidation messages. */
1096 96894 : if (inplaceInvalInfo == NULL)
1097 : {
1098 29008 : *RelcacheInitFileInval = false;
1099 29008 : *msgs = NULL;
1100 29008 : return 0;
1101 : }
1102 :
1103 67886 : *RelcacheInitFileInval = inplaceInvalInfo->RelcacheInitFileInval;
1104 67886 : nummsgs = NumMessagesInGroup(&inplaceInvalInfo->CurrentCmdInvalidMsgs);
1105 67886 : *msgs = msgarray = (SharedInvalidationMessage *)
1106 67886 : palloc(nummsgs * sizeof(SharedInvalidationMessage));
1107 :
1108 67886 : nmsgs = 0;
1109 67886 : ProcessMessageSubGroupMulti(&inplaceInvalInfo->CurrentCmdInvalidMsgs,
1110 : CatCacheMsgs,
1111 : (memcpy(msgarray + nmsgs,
1112 : msgs,
1113 : n * sizeof(SharedInvalidationMessage)),
1114 : nmsgs += n));
1115 67886 : ProcessMessageSubGroupMulti(&inplaceInvalInfo->CurrentCmdInvalidMsgs,
1116 : RelCacheMsgs,
1117 : (memcpy(msgarray + nmsgs,
1118 : msgs,
1119 : n * sizeof(SharedInvalidationMessage)),
1120 : nmsgs += n));
1121 : Assert(nmsgs == nummsgs);
1122 :
1123 67886 : return nmsgs;
1124 : }
1125 :
1126 : /*
1127 : * ProcessCommittedInvalidationMessages is executed by xact_redo_commit() or
1128 : * standby_redo() to process invalidation messages. Currently that happens
1129 : * only at end-of-xact.
1130 : *
1131 : * Relcache init file invalidation requires processing both
1132 : * before and after we send the SI messages. See AtEOXact_Inval()
1133 : */
1134 : void
1135 54244 : ProcessCommittedInvalidationMessages(SharedInvalidationMessage *msgs,
1136 : int nmsgs, bool RelcacheInitFileInval,
1137 : Oid dbid, Oid tsid)
1138 : {
1139 54244 : if (nmsgs <= 0)
1140 10140 : return;
1141 :
1142 44104 : elog(DEBUG4, "replaying commit with %d messages%s", nmsgs,
1143 : (RelcacheInitFileInval ? " and relcache file invalidation" : ""));
1144 :
1145 44104 : if (RelcacheInitFileInval)
1146 : {
1147 686 : elog(DEBUG4, "removing relcache init files for database %u", dbid);
1148 :
1149 : /*
1150 : * RelationCacheInitFilePreInvalidate, when the invalidation message
1151 : * is for a specific database, requires DatabasePath to be set, but we
1152 : * should not use SetDatabasePath during recovery, since it is
1153 : * intended to be used only once by normal backends. Hence, a quick
1154 : * hack: set DatabasePath directly then unset after use.
1155 : */
1156 686 : if (OidIsValid(dbid))
1157 686 : DatabasePath = GetDatabasePath(dbid, tsid);
1158 :
1159 686 : RelationCacheInitFilePreInvalidate();
1160 :
1161 686 : if (OidIsValid(dbid))
1162 : {
1163 686 : pfree(DatabasePath);
1164 686 : DatabasePath = NULL;
1165 : }
1166 : }
1167 :
1168 44104 : SendSharedInvalidMessages(msgs, nmsgs);
1169 :
1170 44104 : if (RelcacheInitFileInval)
1171 686 : RelationCacheInitFilePostInvalidate();
1172 : }
1173 :
1174 : /*
1175 : * AtEOXact_Inval
1176 : * Process queued-up invalidation messages at end of main transaction.
1177 : *
1178 : * If isCommit, we must send out the messages in our PriorCmdInvalidMsgs list
1179 : * to the shared invalidation message queue. Note that these will be read
1180 : * not only by other backends, but also by our own backend at the next
1181 : * transaction start (via AcceptInvalidationMessages). This means that
1182 : * we can skip immediate local processing of anything that's still in
1183 : * CurrentCmdInvalidMsgs, and just send that list out too.
1184 : *
1185 : * If not isCommit, we are aborting, and must locally process the messages
1186 : * in PriorCmdInvalidMsgs. No messages need be sent to other backends,
1187 : * since they'll not have seen our changed tuples anyway. We can forget
1188 : * about CurrentCmdInvalidMsgs too, since those changes haven't touched
1189 : * the caches yet.
1190 : *
1191 : * In any case, reset our state to empty. We need not physically
1192 : * free memory here, since TopTransactionContext is about to be emptied
1193 : * anyway.
1194 : *
1195 : * Note:
1196 : * This should be called as the last step in processing a transaction.
1197 : */
1198 : void
1199 855496 : AtEOXact_Inval(bool isCommit)
1200 : {
1201 855496 : inplaceInvalInfo = NULL;
1202 :
1203 : /* Quick exit if no transactional messages */
1204 855496 : if (transInvalInfo == NULL)
1205 595468 : return;
1206 :
1207 : /* Must be at top of stack */
1208 : Assert(transInvalInfo->my_level == 1 && transInvalInfo->parent == NULL);
1209 :
1210 260028 : INJECTION_POINT("transaction-end-process-inval");
1211 :
1212 260028 : if (isCommit)
1213 : {
1214 : /*
1215 : * Relcache init file invalidation requires processing both before and
1216 : * after we send the SI messages. However, we need not do anything
1217 : * unless we committed.
1218 : */
1219 255390 : if (transInvalInfo->ii.RelcacheInitFileInval)
1220 37188 : RelationCacheInitFilePreInvalidate();
1221 :
1222 255390 : AppendInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs,
1223 255390 : &transInvalInfo->ii.CurrentCmdInvalidMsgs);
1224 :
1225 255390 : ProcessInvalidationMessagesMulti(&transInvalInfo->PriorCmdInvalidMsgs,
1226 : SendSharedInvalidMessages);
1227 :
1228 255390 : if (transInvalInfo->ii.RelcacheInitFileInval)
1229 37188 : RelationCacheInitFilePostInvalidate();
1230 : }
1231 : else
1232 : {
1233 4638 : ProcessInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs,
1234 : LocalExecuteInvalidationMessage);
1235 : }
1236 :
1237 : /* Need not free anything explicitly */
1238 260028 : transInvalInfo = NULL;
1239 : }
1240 :
1241 : /*
1242 : * PreInplace_Inval
1243 : * Process queued-up invalidation before inplace update critical section.
1244 : *
1245 : * Tasks belong here if they are safe even if the inplace update does not
1246 : * complete. Currently, this just unlinks a cache file, which can fail. The
1247 : * sum of this and AtInplace_Inval() mirrors AtEOXact_Inval(isCommit=true).
1248 : */
1249 : void
1250 163208 : PreInplace_Inval(void)
1251 : {
1252 : Assert(CritSectionCount == 0);
1253 :
1254 163208 : if (inplaceInvalInfo && inplaceInvalInfo->RelcacheInitFileInval)
1255 34756 : RelationCacheInitFilePreInvalidate();
1256 163208 : }
1257 :
1258 : /*
1259 : * AtInplace_Inval
1260 : * Process queued-up invalidations after inplace update buffer mutation.
1261 : */
1262 : void
1263 163208 : AtInplace_Inval(void)
1264 : {
1265 : Assert(CritSectionCount > 0);
1266 :
1267 163208 : if (inplaceInvalInfo == NULL)
1268 29008 : return;
1269 :
1270 134200 : ProcessInvalidationMessagesMulti(&inplaceInvalInfo->CurrentCmdInvalidMsgs,
1271 : SendSharedInvalidMessages);
1272 :
1273 134200 : if (inplaceInvalInfo->RelcacheInitFileInval)
1274 34756 : RelationCacheInitFilePostInvalidate();
1275 :
1276 134200 : inplaceInvalInfo = NULL;
1277 : }
1278 :
1279 : /*
1280 : * ForgetInplace_Inval
1281 : * Alternative to PreInplace_Inval()+AtInplace_Inval(): discard queued-up
1282 : * invalidations. This lets inplace update enumerate invalidations
1283 : * optimistically, before locking the buffer.
1284 : */
1285 : void
1286 118662 : ForgetInplace_Inval(void)
1287 : {
1288 118662 : inplaceInvalInfo = NULL;
1289 118662 : }
1290 :
1291 : /*
1292 : * AtEOSubXact_Inval
1293 : * Process queued-up invalidation messages at end of subtransaction.
1294 : *
1295 : * If isCommit, process CurrentCmdInvalidMsgs if any (there probably aren't),
1296 : * and then attach both CurrentCmdInvalidMsgs and PriorCmdInvalidMsgs to the
1297 : * parent's PriorCmdInvalidMsgs list.
1298 : *
1299 : * If not isCommit, we are aborting, and must locally process the messages
1300 : * in PriorCmdInvalidMsgs. No messages need be sent to other backends.
1301 : * We can forget about CurrentCmdInvalidMsgs too, since those changes haven't
1302 : * touched the caches yet.
1303 : *
1304 : * In any case, pop the transaction stack. We need not physically free memory
1305 : * here, since CurTransactionContext is about to be emptied anyway
1306 : * (if aborting). Beware of the possibility of aborting the same nesting
1307 : * level twice, though.
1308 : */
1309 : void
1310 20072 : AtEOSubXact_Inval(bool isCommit)
1311 : {
1312 : int my_level;
1313 : TransInvalidationInfo *myInfo;
1314 :
1315 : /*
1316 : * Successful inplace update must clear this, but we clear it on abort.
1317 : * Inplace updates allocate this in CurrentMemoryContext, which has
1318 : * lifespan <= subtransaction lifespan. Hence, don't free it explicitly.
1319 : */
1320 20072 : if (isCommit)
1321 : Assert(inplaceInvalInfo == NULL);
1322 : else
1323 9340 : inplaceInvalInfo = NULL;
1324 :
1325 : /* Quick exit if no transactional messages. */
1326 20072 : myInfo = transInvalInfo;
1327 20072 : if (myInfo == NULL)
1328 18422 : return;
1329 :
1330 : /* Also bail out quickly if messages are not for this level. */
1331 1650 : my_level = GetCurrentTransactionNestLevel();
1332 1650 : if (myInfo->my_level != my_level)
1333 : {
1334 : Assert(myInfo->my_level < my_level);
1335 1370 : return;
1336 : }
1337 :
1338 280 : if (isCommit)
1339 : {
1340 : /* If CurrentCmdInvalidMsgs still has anything, fix it */
1341 98 : CommandEndInvalidationMessages();
1342 :
1343 : /*
1344 : * We create invalidation stack entries lazily, so the parent might
1345 : * not have one. Instead of creating one, moving all the data over,
1346 : * and then freeing our own, we can just adjust the level of our own
1347 : * entry.
1348 : */
1349 98 : if (myInfo->parent == NULL || myInfo->parent->my_level < my_level - 1)
1350 : {
1351 74 : myInfo->my_level--;
1352 74 : return;
1353 : }
1354 :
1355 : /*
1356 : * Pass up my inval messages to parent. Notice that we stick them in
1357 : * PriorCmdInvalidMsgs, not CurrentCmdInvalidMsgs, since they've
1358 : * already been locally processed. (This would trigger the Assert in
1359 : * AppendInvalidationMessageSubGroup if the parent's
1360 : * CurrentCmdInvalidMsgs isn't empty; but we already checked that in
1361 : * PrepareInvalidationState.)
1362 : */
1363 24 : AppendInvalidationMessages(&myInfo->parent->PriorCmdInvalidMsgs,
1364 : &myInfo->PriorCmdInvalidMsgs);
1365 :
1366 : /* Must readjust parent's CurrentCmdInvalidMsgs indexes now */
1367 24 : SetGroupToFollow(&myInfo->parent->ii.CurrentCmdInvalidMsgs,
1368 : &myInfo->parent->PriorCmdInvalidMsgs);
1369 :
1370 : /* Pending relcache inval becomes parent's problem too */
1371 24 : if (myInfo->ii.RelcacheInitFileInval)
1372 0 : myInfo->parent->ii.RelcacheInitFileInval = true;
1373 :
1374 : /* Pop the transaction state stack */
1375 24 : transInvalInfo = myInfo->parent;
1376 :
1377 : /* Need not free anything else explicitly */
1378 24 : pfree(myInfo);
1379 : }
1380 : else
1381 : {
1382 182 : ProcessInvalidationMessages(&myInfo->PriorCmdInvalidMsgs,
1383 : LocalExecuteInvalidationMessage);
1384 :
1385 : /* Pop the transaction state stack */
1386 182 : transInvalInfo = myInfo->parent;
1387 :
1388 : /* Need not free anything else explicitly */
1389 182 : pfree(myInfo);
1390 : }
1391 : }
1392 :
1393 : /*
1394 : * CommandEndInvalidationMessages
1395 : * Process queued-up invalidation messages at end of one command
1396 : * in a transaction.
1397 : *
1398 : * Here, we send no messages to the shared queue, since we don't know yet if
1399 : * we will commit. We do need to locally process the CurrentCmdInvalidMsgs
1400 : * list, so as to flush our caches of any entries we have outdated in the
1401 : * current command. We then move the current-cmd list over to become part
1402 : * of the prior-cmds list.
1403 : *
1404 : * Note:
1405 : * This should be called during CommandCounterIncrement(),
1406 : * after we have advanced the command ID.
1407 : */
1408 : void
1409 1142886 : CommandEndInvalidationMessages(void)
1410 : {
1411 : /*
1412 : * You might think this shouldn't be called outside any transaction, but
1413 : * bootstrap does it, and also ABORT issued when not in a transaction. So
1414 : * just quietly return if no state to work on.
1415 : */
1416 1142886 : if (transInvalInfo == NULL)
1417 366488 : return;
1418 :
1419 776398 : ProcessInvalidationMessages(&transInvalInfo->ii.CurrentCmdInvalidMsgs,
1420 : LocalExecuteInvalidationMessage);
1421 :
1422 : /* WAL Log per-command invalidation messages for wal_level=logical */
1423 776392 : if (XLogLogicalInfoActive())
1424 8866 : LogLogicalInvalidations();
1425 :
1426 776392 : AppendInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs,
1427 776392 : &transInvalInfo->ii.CurrentCmdInvalidMsgs);
1428 : }
1429 :
1430 :
1431 : /*
1432 : * CacheInvalidateHeapTupleCommon
1433 : * Common logic for end-of-command and inplace variants.
1434 : */
1435 : static void
1436 22560582 : CacheInvalidateHeapTupleCommon(Relation relation,
1437 : HeapTuple tuple,
1438 : HeapTuple newtuple,
1439 : InvalidationInfo *(*prepare_callback) (void))
1440 : {
1441 : InvalidationInfo *info;
1442 : Oid tupleRelId;
1443 : Oid databaseId;
1444 : Oid relationId;
1445 :
1446 : /* PrepareToInvalidateCacheTuple() needs relcache */
1447 22560582 : AssertCouldGetRelation();
1448 :
1449 : /* Do nothing during bootstrap */
1450 22560582 : if (IsBootstrapProcessingMode())
1451 1286446 : return;
1452 :
1453 : /*
1454 : * We only need to worry about invalidation for tuples that are in system
1455 : * catalogs; user-relation tuples are never in catcaches and can't affect
1456 : * the relcache either.
1457 : */
1458 21274136 : if (!IsCatalogRelation(relation))
1459 17042168 : return;
1460 :
1461 : /*
1462 : * IsCatalogRelation() will return true for TOAST tables of system
1463 : * catalogs, but we don't care about those, either.
1464 : */
1465 4231968 : if (IsToastRelation(relation))
1466 33866 : return;
1467 :
1468 : /* Allocate any required resources. */
1469 4198102 : info = prepare_callback();
1470 :
1471 : /*
1472 : * First let the catcache do its thing
1473 : */
1474 4198102 : tupleRelId = RelationGetRelid(relation);
1475 4198102 : if (RelationInvalidatesSnapshotsOnly(tupleRelId))
1476 : {
1477 1057930 : databaseId = IsSharedRelation(tupleRelId) ? InvalidOid : MyDatabaseId;
1478 1057930 : RegisterSnapshotInvalidation(info, databaseId, tupleRelId);
1479 : }
1480 : else
1481 3140172 : PrepareToInvalidateCacheTuple(relation, tuple, newtuple,
1482 : RegisterCatcacheInvalidation,
1483 : (void *) info);
1484 :
1485 : /*
1486 : * Now, is this tuple one of the primary definers of a relcache entry? See
1487 : * comments in file header for deeper explanation.
1488 : *
1489 : * Note we ignore newtuple here; we assume an update cannot move a tuple
1490 : * from being part of one relcache entry to being part of another.
1491 : */
1492 4198102 : if (tupleRelId == RelationRelationId)
1493 : {
1494 695136 : Form_pg_class classtup = (Form_pg_class) GETSTRUCT(tuple);
1495 :
1496 695136 : relationId = classtup->oid;
1497 695136 : if (classtup->relisshared)
1498 41886 : databaseId = InvalidOid;
1499 : else
1500 653250 : databaseId = MyDatabaseId;
1501 : }
1502 3502966 : else if (tupleRelId == AttributeRelationId)
1503 : {
1504 1132412 : Form_pg_attribute atttup = (Form_pg_attribute) GETSTRUCT(tuple);
1505 :
1506 1132412 : relationId = atttup->attrelid;
1507 :
1508 : /*
1509 : * KLUGE ALERT: we always send the relcache event with MyDatabaseId,
1510 : * even if the rel in question is shared (which we can't easily tell).
1511 : * This essentially means that only backends in this same database
1512 : * will react to the relcache flush request. This is in fact
1513 : * appropriate, since only those backends could see our pg_attribute
1514 : * change anyway. It looks a bit ugly though. (In practice, shared
1515 : * relations can't have schema changes after bootstrap, so we should
1516 : * never come here for a shared rel anyway.)
1517 : */
1518 1132412 : databaseId = MyDatabaseId;
1519 : }
1520 2370554 : else if (tupleRelId == IndexRelationId)
1521 : {
1522 65544 : Form_pg_index indextup = (Form_pg_index) GETSTRUCT(tuple);
1523 :
1524 : /*
1525 : * When a pg_index row is updated, we should send out a relcache inval
1526 : * for the index relation. As above, we don't know the shared status
1527 : * of the index, but in practice it doesn't matter since indexes of
1528 : * shared catalogs can't have such updates.
1529 : */
1530 65544 : relationId = indextup->indexrelid;
1531 65544 : databaseId = MyDatabaseId;
1532 : }
1533 2305010 : else if (tupleRelId == ConstraintRelationId)
1534 : {
1535 84258 : Form_pg_constraint constrtup = (Form_pg_constraint) GETSTRUCT(tuple);
1536 :
1537 : /*
1538 : * Foreign keys are part of relcache entries, too, so send out an
1539 : * inval for the table that the FK applies to.
1540 : */
1541 84258 : if (constrtup->contype == CONSTRAINT_FOREIGN &&
1542 8204 : OidIsValid(constrtup->conrelid))
1543 : {
1544 8204 : relationId = constrtup->conrelid;
1545 8204 : databaseId = MyDatabaseId;
1546 : }
1547 : else
1548 76054 : return;
1549 : }
1550 : else
1551 2220752 : return;
1552 :
1553 : /*
1554 : * Yes. We need to register a relcache invalidation event.
1555 : */
1556 1901296 : RegisterRelcacheInvalidation(info, databaseId, relationId);
1557 : }
1558 :
1559 : /*
1560 : * CacheInvalidateHeapTuple
1561 : * Register the given tuple for invalidation at end of command
1562 : * (ie, current command is creating or outdating this tuple) and end of
1563 : * transaction. Also, detect whether a relcache invalidation is implied.
1564 : *
1565 : * For an insert or delete, tuple is the target tuple and newtuple is NULL.
1566 : * For an update, we are called just once, with tuple being the old tuple
1567 : * version and newtuple the new version. This allows avoidance of duplicate
1568 : * effort during an update.
1569 : */
1570 : void
1571 22278712 : CacheInvalidateHeapTuple(Relation relation,
1572 : HeapTuple tuple,
1573 : HeapTuple newtuple)
1574 : {
1575 22278712 : CacheInvalidateHeapTupleCommon(relation, tuple, newtuple,
1576 : PrepareInvalidationState);
1577 22278712 : }
1578 :
1579 : /*
1580 : * CacheInvalidateHeapTupleInplace
1581 : * Register the given tuple for nontransactional invalidation pertaining
1582 : * to an inplace update. Also, detect whether a relcache invalidation is
1583 : * implied.
1584 : *
1585 : * Like CacheInvalidateHeapTuple(), but for inplace updates.
1586 : */
1587 : void
1588 281870 : CacheInvalidateHeapTupleInplace(Relation relation,
1589 : HeapTuple tuple,
1590 : HeapTuple newtuple)
1591 : {
1592 281870 : CacheInvalidateHeapTupleCommon(relation, tuple, newtuple,
1593 : PrepareInplaceInvalidationState);
1594 281870 : }
1595 :
1596 : /*
1597 : * CacheInvalidateCatalog
1598 : * Register invalidation of the whole content of a system catalog.
1599 : *
1600 : * This is normally used in VACUUM FULL/CLUSTER, where we haven't so much
1601 : * changed any tuples as moved them around. Some uses of catcache entries
1602 : * expect their TIDs to be correct, so we have to blow away the entries.
1603 : *
1604 : * Note: we expect caller to verify that the rel actually is a system
1605 : * catalog. If it isn't, no great harm is done, just a wasted sinval message.
1606 : */
1607 : void
1608 222 : CacheInvalidateCatalog(Oid catalogId)
1609 : {
1610 : Oid databaseId;
1611 :
1612 222 : if (IsSharedRelation(catalogId))
1613 36 : databaseId = InvalidOid;
1614 : else
1615 186 : databaseId = MyDatabaseId;
1616 :
1617 222 : RegisterCatalogInvalidation(PrepareInvalidationState(),
1618 : databaseId, catalogId);
1619 222 : }
1620 :
1621 : /*
1622 : * CacheInvalidateRelcache
1623 : * Register invalidation of the specified relation's relcache entry
1624 : * at end of command.
1625 : *
1626 : * This is used in places that need to force relcache rebuild but aren't
1627 : * changing any of the tuples recognized as contributors to the relcache
1628 : * entry by CacheInvalidateHeapTuple. (An example is dropping an index.)
1629 : */
1630 : void
1631 152756 : CacheInvalidateRelcache(Relation relation)
1632 : {
1633 : Oid databaseId;
1634 : Oid relationId;
1635 :
1636 152756 : relationId = RelationGetRelid(relation);
1637 152756 : if (relation->rd_rel->relisshared)
1638 6950 : databaseId = InvalidOid;
1639 : else
1640 145806 : databaseId = MyDatabaseId;
1641 :
1642 152756 : RegisterRelcacheInvalidation(PrepareInvalidationState(),
1643 : databaseId, relationId);
1644 152756 : }
1645 :
1646 : /*
1647 : * CacheInvalidateRelcacheAll
1648 : * Register invalidation of the whole relcache at the end of command.
1649 : *
1650 : * This is used by alter publication as changes in publications may affect
1651 : * large number of tables.
1652 : */
1653 : void
1654 162 : CacheInvalidateRelcacheAll(void)
1655 : {
1656 162 : RegisterRelcacheInvalidation(PrepareInvalidationState(),
1657 : InvalidOid, InvalidOid);
1658 162 : }
1659 :
1660 : /*
1661 : * CacheInvalidateRelcacheByTuple
1662 : * As above, but relation is identified by passing its pg_class tuple.
1663 : */
1664 : void
1665 73062 : CacheInvalidateRelcacheByTuple(HeapTuple classTuple)
1666 : {
1667 73062 : Form_pg_class classtup = (Form_pg_class) GETSTRUCT(classTuple);
1668 : Oid databaseId;
1669 : Oid relationId;
1670 :
1671 73062 : relationId = classtup->oid;
1672 73062 : if (classtup->relisshared)
1673 1970 : databaseId = InvalidOid;
1674 : else
1675 71092 : databaseId = MyDatabaseId;
1676 73062 : RegisterRelcacheInvalidation(PrepareInvalidationState(),
1677 : databaseId, relationId);
1678 73062 : }
1679 :
1680 : /*
1681 : * CacheInvalidateRelcacheByRelid
1682 : * As above, but relation is identified by passing its OID.
1683 : * This is the least efficient of the three options; use one of
1684 : * the above routines if you have a Relation or pg_class tuple.
1685 : */
1686 : void
1687 28586 : CacheInvalidateRelcacheByRelid(Oid relid)
1688 : {
1689 : HeapTuple tup;
1690 :
1691 28586 : tup = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
1692 28586 : if (!HeapTupleIsValid(tup))
1693 0 : elog(ERROR, "cache lookup failed for relation %u", relid);
1694 28586 : CacheInvalidateRelcacheByTuple(tup);
1695 28586 : ReleaseSysCache(tup);
1696 28586 : }
1697 :
1698 : /*
1699 : * CacheInvalidateRelSync
1700 : * Register invalidation of the cache in logical decoding output plugin
1701 : * for a database.
1702 : *
1703 : * This type of invalidation message is used for the specific purpose of output
1704 : * plugins. Processes which do not decode WALs would do nothing even when it
1705 : * receives the message.
1706 : */
1707 : void
1708 12 : CacheInvalidateRelSync(Oid relid)
1709 : {
1710 12 : RegisterRelsyncInvalidation(PrepareInvalidationState(),
1711 : MyDatabaseId, relid);
1712 12 : }
1713 :
1714 : /*
1715 : * CacheInvalidateRelSyncAll
1716 : * Register invalidation of the whole cache in logical decoding output
1717 : * plugin.
1718 : */
1719 : void
1720 6 : CacheInvalidateRelSyncAll(void)
1721 : {
1722 6 : CacheInvalidateRelSync(InvalidOid);
1723 6 : }
1724 :
1725 : /*
1726 : * CacheInvalidateSmgr
1727 : * Register invalidation of smgr references to a physical relation.
1728 : *
1729 : * Sending this type of invalidation msg forces other backends to close open
1730 : * smgr entries for the rel. This should be done to flush dangling open-file
1731 : * references when the physical rel is being dropped or truncated. Because
1732 : * these are nontransactional (i.e., not-rollback-able) operations, we just
1733 : * send the inval message immediately without any queuing.
1734 : *
1735 : * Note: in most cases there will have been a relcache flush issued against
1736 : * the rel at the logical level. We need a separate smgr-level flush because
1737 : * it is possible for backends to have open smgr entries for rels they don't
1738 : * have a relcache entry for, e.g. because the only thing they ever did with
1739 : * the rel is write out dirty shared buffers.
1740 : *
1741 : * Note: because these messages are nontransactional, they won't be captured
1742 : * in commit/abort WAL entries. Instead, calls to CacheInvalidateSmgr()
1743 : * should happen in low-level smgr.c routines, which are executed while
1744 : * replaying WAL as well as when creating it.
1745 : *
1746 : * Note: In order to avoid bloating SharedInvalidationMessage, we store only
1747 : * three bytes of the ProcNumber using what would otherwise be padding space.
1748 : * Thus, the maximum possible ProcNumber is 2^23-1.
1749 : */
1750 : void
1751 99604 : CacheInvalidateSmgr(RelFileLocatorBackend rlocator)
1752 : {
1753 : SharedInvalidationMessage msg;
1754 :
1755 : /* verify optimization stated above stays valid */
1756 : StaticAssertStmt(MAX_BACKENDS_BITS <= 23,
1757 : "MAX_BACKENDS_BITS is too big for inval.c");
1758 :
1759 99604 : msg.sm.id = SHAREDINVALSMGR_ID;
1760 99604 : msg.sm.backend_hi = rlocator.backend >> 16;
1761 99604 : msg.sm.backend_lo = rlocator.backend & 0xffff;
1762 99604 : msg.sm.rlocator = rlocator.locator;
1763 : /* check AddCatcacheInvalidationMessage() for an explanation */
1764 : VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
1765 :
1766 99604 : SendSharedInvalidMessages(&msg, 1);
1767 99604 : }
1768 :
1769 : /*
1770 : * CacheInvalidateRelmap
1771 : * Register invalidation of the relation mapping for a database,
1772 : * or for the shared catalogs if databaseId is zero.
1773 : *
1774 : * Sending this type of invalidation msg forces other backends to re-read
1775 : * the indicated relation mapping file. It is also necessary to send a
1776 : * relcache inval for the specific relations whose mapping has been altered,
1777 : * else the relcache won't get updated with the new filenode data.
1778 : *
1779 : * Note: because these messages are nontransactional, they won't be captured
1780 : * in commit/abort WAL entries. Instead, calls to CacheInvalidateRelmap()
1781 : * should happen in low-level relmapper.c routines, which are executed while
1782 : * replaying WAL as well as when creating it.
1783 : */
1784 : void
1785 402 : CacheInvalidateRelmap(Oid databaseId)
1786 : {
1787 : SharedInvalidationMessage msg;
1788 :
1789 402 : msg.rm.id = SHAREDINVALRELMAP_ID;
1790 402 : msg.rm.dbId = databaseId;
1791 : /* check AddCatcacheInvalidationMessage() for an explanation */
1792 : VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
1793 :
1794 402 : SendSharedInvalidMessages(&msg, 1);
1795 402 : }
1796 :
1797 :
1798 : /*
1799 : * CacheRegisterSyscacheCallback
1800 : * Register the specified function to be called for all future
1801 : * invalidation events in the specified cache. The cache ID and the
1802 : * hash value of the tuple being invalidated will be passed to the
1803 : * function.
1804 : *
1805 : * NOTE: Hash value zero will be passed if a cache reset request is received.
1806 : * In this case the called routines should flush all cached state.
1807 : * Yes, there's a possibility of a false match to zero, but it doesn't seem
1808 : * worth troubling over, especially since most of the current callees just
1809 : * flush all cached state anyway.
1810 : */
1811 : void
1812 594058 : CacheRegisterSyscacheCallback(int cacheid,
1813 : SyscacheCallbackFunction func,
1814 : Datum arg)
1815 : {
1816 594058 : if (cacheid < 0 || cacheid >= SysCacheSize)
1817 0 : elog(FATAL, "invalid cache ID: %d", cacheid);
1818 594058 : if (syscache_callback_count >= MAX_SYSCACHE_CALLBACKS)
1819 0 : elog(FATAL, "out of syscache_callback_list slots");
1820 :
1821 594058 : if (syscache_callback_links[cacheid] == 0)
1822 : {
1823 : /* first callback for this cache */
1824 420108 : syscache_callback_links[cacheid] = syscache_callback_count + 1;
1825 : }
1826 : else
1827 : {
1828 : /* add to end of chain, so that older callbacks are called first */
1829 173950 : int i = syscache_callback_links[cacheid] - 1;
1830 :
1831 207520 : while (syscache_callback_list[i].link > 0)
1832 33570 : i = syscache_callback_list[i].link - 1;
1833 173950 : syscache_callback_list[i].link = syscache_callback_count + 1;
1834 : }
1835 :
1836 594058 : syscache_callback_list[syscache_callback_count].id = cacheid;
1837 594058 : syscache_callback_list[syscache_callback_count].link = 0;
1838 594058 : syscache_callback_list[syscache_callback_count].function = func;
1839 594058 : syscache_callback_list[syscache_callback_count].arg = arg;
1840 :
1841 594058 : ++syscache_callback_count;
1842 594058 : }
1843 :
1844 : /*
1845 : * CacheRegisterRelcacheCallback
1846 : * Register the specified function to be called for all future
1847 : * relcache invalidation events. The OID of the relation being
1848 : * invalidated will be passed to the function.
1849 : *
1850 : * NOTE: InvalidOid will be passed if a cache reset request is received.
1851 : * In this case the called routines should flush all cached state.
1852 : */
1853 : void
1854 47574 : CacheRegisterRelcacheCallback(RelcacheCallbackFunction func,
1855 : Datum arg)
1856 : {
1857 47574 : if (relcache_callback_count >= MAX_RELCACHE_CALLBACKS)
1858 0 : elog(FATAL, "out of relcache_callback_list slots");
1859 :
1860 47574 : relcache_callback_list[relcache_callback_count].function = func;
1861 47574 : relcache_callback_list[relcache_callback_count].arg = arg;
1862 :
1863 47574 : ++relcache_callback_count;
1864 47574 : }
1865 :
1866 : /*
1867 : * CacheRegisterRelSyncCallback
1868 : * Register the specified function to be called for all future
1869 : * relsynccache invalidation events.
1870 : *
1871 : * This function is intended to be call from the logical decoding output
1872 : * plugins.
1873 : */
1874 : void
1875 736 : CacheRegisterRelSyncCallback(RelSyncCallbackFunction func,
1876 : Datum arg)
1877 : {
1878 736 : if (relsync_callback_count >= MAX_RELSYNC_CALLBACKS)
1879 0 : elog(FATAL, "out of relsync_callback_list slots");
1880 :
1881 736 : relsync_callback_list[relsync_callback_count].function = func;
1882 736 : relsync_callback_list[relsync_callback_count].arg = arg;
1883 :
1884 736 : ++relsync_callback_count;
1885 736 : }
1886 :
1887 : /*
1888 : * CallSyscacheCallbacks
1889 : *
1890 : * This is exported so that CatalogCacheFlushCatalog can call it, saving
1891 : * this module from knowing which catcache IDs correspond to which catalogs.
1892 : */
1893 : void
1894 20450134 : CallSyscacheCallbacks(int cacheid, uint32 hashvalue)
1895 : {
1896 : int i;
1897 :
1898 20450134 : if (cacheid < 0 || cacheid >= SysCacheSize)
1899 0 : elog(ERROR, "invalid cache ID: %d", cacheid);
1900 :
1901 20450134 : i = syscache_callback_links[cacheid] - 1;
1902 23330716 : while (i >= 0)
1903 : {
1904 2880582 : struct SYSCACHECALLBACK *ccitem = syscache_callback_list + i;
1905 :
1906 : Assert(ccitem->id == cacheid);
1907 2880582 : ccitem->function(ccitem->arg, cacheid, hashvalue);
1908 2880582 : i = ccitem->link - 1;
1909 : }
1910 20450134 : }
1911 :
1912 : /*
1913 : * CallSyscacheCallbacks
1914 : */
1915 : void
1916 62 : CallRelSyncCallbacks(Oid relid)
1917 : {
1918 104 : for (int i = 0; i < relsync_callback_count; i++)
1919 : {
1920 42 : struct RELSYNCCALLBACK *ccitem = relsync_callback_list + i;
1921 :
1922 42 : ccitem->function(ccitem->arg, relid);
1923 : }
1924 62 : }
1925 :
1926 : /*
1927 : * LogLogicalInvalidations
1928 : *
1929 : * Emit WAL for invalidations caused by the current command.
1930 : *
1931 : * This is currently only used for logging invalidations at the command end
1932 : * or at commit time if any invalidations are pending.
1933 : */
1934 : void
1935 33220 : LogLogicalInvalidations(void)
1936 : {
1937 : xl_xact_invals xlrec;
1938 : InvalidationMsgsGroup *group;
1939 : int nmsgs;
1940 :
1941 : /* Quick exit if we haven't done anything with invalidation messages. */
1942 33220 : if (transInvalInfo == NULL)
1943 20916 : return;
1944 :
1945 12304 : group = &transInvalInfo->ii.CurrentCmdInvalidMsgs;
1946 12304 : nmsgs = NumMessagesInGroup(group);
1947 :
1948 12304 : if (nmsgs > 0)
1949 : {
1950 : /* prepare record */
1951 9718 : memset(&xlrec, 0, MinSizeOfXactInvals);
1952 9718 : xlrec.nmsgs = nmsgs;
1953 :
1954 : /* perform insertion */
1955 9718 : XLogBeginInsert();
1956 9718 : XLogRegisterData(&xlrec, MinSizeOfXactInvals);
1957 9718 : ProcessMessageSubGroupMulti(group, CatCacheMsgs,
1958 : XLogRegisterData(msgs,
1959 : n * sizeof(SharedInvalidationMessage)));
1960 9718 : ProcessMessageSubGroupMulti(group, RelCacheMsgs,
1961 : XLogRegisterData(msgs,
1962 : n * sizeof(SharedInvalidationMessage)));
1963 9718 : XLogInsert(RM_XACT_ID, XLOG_XACT_INVALIDATIONS);
1964 : }
1965 : }
|