Line data Source code
1 : /*-------------------------------------------------------------------------
2 : * relation.c
3 : * PostgreSQL logical replication relation mapping cache
4 : *
5 : * Copyright (c) 2016-2025, PostgreSQL Global Development Group
6 : *
7 : * IDENTIFICATION
8 : * src/backend/replication/logical/relation.c
9 : *
10 : * NOTES
11 : * Routines in this file mainly have to do with mapping the properties
12 : * of local replication target relations to the properties of their
13 : * remote counterpart.
14 : *
15 : *-------------------------------------------------------------------------
16 : */
17 :
18 : #include "postgres.h"
19 :
20 : #include "access/amapi.h"
21 : #include "access/genam.h"
22 : #include "access/table.h"
23 : #include "catalog/namespace.h"
24 : #include "catalog/pg_subscription_rel.h"
25 : #include "executor/executor.h"
26 : #include "nodes/makefuncs.h"
27 : #include "replication/logicalrelation.h"
28 : #include "replication/worker_internal.h"
29 : #include "utils/inval.h"
30 : #include "utils/lsyscache.h"
31 : #include "utils/syscache.h"
32 :
33 :
34 : static MemoryContext LogicalRepRelMapContext = NULL;
35 :
36 : static HTAB *LogicalRepRelMap = NULL;
37 :
38 : /*
39 : * Partition map (LogicalRepPartMap)
40 : *
41 : * When a partitioned table is used as replication target, replicated
42 : * operations are actually performed on its leaf partitions, which requires
43 : * the partitions to also be mapped to the remote relation. Parent's entry
44 : * (LogicalRepRelMapEntry) cannot be used as-is for all partitions, because
45 : * individual partitions may have different attribute numbers, which means
46 : * attribute mappings to remote relation's attributes must be maintained
47 : * separately for each partition.
48 : */
49 : static MemoryContext LogicalRepPartMapContext = NULL;
50 : static HTAB *LogicalRepPartMap = NULL;
51 : typedef struct LogicalRepPartMapEntry
52 : {
53 : Oid partoid; /* LogicalRepPartMap's key */
54 : LogicalRepRelMapEntry relmapentry;
55 : } LogicalRepPartMapEntry;
56 :
57 : static Oid FindLogicalRepLocalIndex(Relation localrel, LogicalRepRelation *remoterel,
58 : AttrMap *attrMap);
59 :
60 : /*
61 : * Relcache invalidation callback for our relation map cache.
62 : */
63 : static void
64 1420 : logicalrep_relmap_invalidate_cb(Datum arg, Oid reloid)
65 : {
66 : LogicalRepRelMapEntry *entry;
67 :
68 : /* Just to be sure. */
69 1420 : if (LogicalRepRelMap == NULL)
70 0 : return;
71 :
72 1420 : if (reloid != InvalidOid)
73 : {
74 : HASH_SEQ_STATUS status;
75 :
76 1420 : hash_seq_init(&status, LogicalRepRelMap);
77 :
78 : /* TODO, use inverse lookup hashtable? */
79 6164 : while ((entry = (LogicalRepRelMapEntry *) hash_seq_search(&status)) != NULL)
80 : {
81 5012 : if (entry->localreloid == reloid)
82 : {
83 268 : entry->localrelvalid = false;
84 268 : hash_seq_term(&status);
85 268 : break;
86 : }
87 : }
88 : }
89 : else
90 : {
91 : /* invalidate all cache entries */
92 : HASH_SEQ_STATUS status;
93 :
94 0 : hash_seq_init(&status, LogicalRepRelMap);
95 :
96 0 : while ((entry = (LogicalRepRelMapEntry *) hash_seq_search(&status)) != NULL)
97 0 : entry->localrelvalid = false;
98 : }
99 : }
100 :
101 : /*
102 : * Initialize the relation map cache.
103 : */
104 : static void
105 736 : logicalrep_relmap_init(void)
106 : {
107 : HASHCTL ctl;
108 :
109 736 : if (!LogicalRepRelMapContext)
110 736 : LogicalRepRelMapContext =
111 736 : AllocSetContextCreate(CacheMemoryContext,
112 : "LogicalRepRelMapContext",
113 : ALLOCSET_DEFAULT_SIZES);
114 :
115 : /* Initialize the relation hash table. */
116 736 : ctl.keysize = sizeof(LogicalRepRelId);
117 736 : ctl.entrysize = sizeof(LogicalRepRelMapEntry);
118 736 : ctl.hcxt = LogicalRepRelMapContext;
119 :
120 736 : LogicalRepRelMap = hash_create("logicalrep relation map cache", 128, &ctl,
121 : HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
122 :
123 : /* Watch for invalidation events. */
124 736 : CacheRegisterRelcacheCallback(logicalrep_relmap_invalidate_cb,
125 : (Datum) 0);
126 736 : }
127 :
128 : /*
129 : * Free the entry of a relation map cache.
130 : */
131 : static void
132 282 : logicalrep_relmap_free_entry(LogicalRepRelMapEntry *entry)
133 : {
134 : LogicalRepRelation *remoterel;
135 :
136 282 : remoterel = &entry->remoterel;
137 :
138 282 : pfree(remoterel->nspname);
139 282 : pfree(remoterel->relname);
140 :
141 282 : if (remoterel->natts > 0)
142 : {
143 : int i;
144 :
145 848 : for (i = 0; i < remoterel->natts; i++)
146 566 : pfree(remoterel->attnames[i]);
147 :
148 282 : pfree(remoterel->attnames);
149 282 : pfree(remoterel->atttyps);
150 : }
151 282 : bms_free(remoterel->attkeys);
152 :
153 282 : if (entry->attrmap)
154 238 : free_attrmap(entry->attrmap);
155 282 : }
156 :
157 : /*
158 : * Add new entry or update existing entry in the relation map cache.
159 : *
160 : * Called when new relation mapping is sent by the publisher to update
161 : * our expected view of incoming data from said publisher.
162 : */
163 : void
164 1232 : logicalrep_relmap_update(LogicalRepRelation *remoterel)
165 : {
166 : MemoryContext oldctx;
167 : LogicalRepRelMapEntry *entry;
168 : bool found;
169 : int i;
170 :
171 1232 : if (LogicalRepRelMap == NULL)
172 736 : logicalrep_relmap_init();
173 :
174 : /*
175 : * HASH_ENTER returns the existing entry if present or creates a new one.
176 : */
177 1232 : entry = hash_search(LogicalRepRelMap, &remoterel->remoteid,
178 : HASH_ENTER, &found);
179 :
180 1232 : if (found)
181 266 : logicalrep_relmap_free_entry(entry);
182 :
183 1232 : memset(entry, 0, sizeof(LogicalRepRelMapEntry));
184 :
185 : /* Make cached copy of the data */
186 1232 : oldctx = MemoryContextSwitchTo(LogicalRepRelMapContext);
187 1232 : entry->remoterel.remoteid = remoterel->remoteid;
188 1232 : entry->remoterel.nspname = pstrdup(remoterel->nspname);
189 1232 : entry->remoterel.relname = pstrdup(remoterel->relname);
190 1232 : entry->remoterel.natts = remoterel->natts;
191 1232 : entry->remoterel.attnames = palloc(remoterel->natts * sizeof(char *));
192 1232 : entry->remoterel.atttyps = palloc(remoterel->natts * sizeof(Oid));
193 3474 : for (i = 0; i < remoterel->natts; i++)
194 : {
195 2242 : entry->remoterel.attnames[i] = pstrdup(remoterel->attnames[i]);
196 2242 : entry->remoterel.atttyps[i] = remoterel->atttyps[i];
197 : }
198 1232 : entry->remoterel.replident = remoterel->replident;
199 :
200 : /*
201 : * XXX The walsender currently does not transmit the relkind of the remote
202 : * relation when replicating changes. Since we support replicating only
203 : * table changes at present, we default to initializing relkind as
204 : * RELKIND_RELATION. This is needed in CheckSubscriptionRelkind() to check
205 : * if the publisher and subscriber relation kinds are compatible.
206 : */
207 1232 : entry->remoterel.relkind =
208 1232 : (remoterel->relkind == 0) ? RELKIND_RELATION : remoterel->relkind;
209 :
210 1232 : entry->remoterel.attkeys = bms_copy(remoterel->attkeys);
211 1232 : MemoryContextSwitchTo(oldctx);
212 1232 : }
213 :
214 : /*
215 : * Find attribute index in TupleDesc struct by attribute name.
216 : *
217 : * Returns -1 if not found.
218 : */
219 : static int
220 2580 : logicalrep_rel_att_by_name(LogicalRepRelation *remoterel, const char *attname)
221 : {
222 : int i;
223 :
224 4890 : for (i = 0; i < remoterel->natts; i++)
225 : {
226 4332 : if (strcmp(remoterel->attnames[i], attname) == 0)
227 2022 : return i;
228 : }
229 :
230 558 : return -1;
231 : }
232 :
233 : /*
234 : * Returns a comma-separated string of attribute names based on the provided
235 : * relation and bitmap indicating which attributes to include.
236 : */
237 : static char *
238 4 : logicalrep_get_attrs_str(LogicalRepRelation *remoterel, Bitmapset *atts)
239 : {
240 : StringInfoData attsbuf;
241 4 : int attcnt = 0;
242 4 : int i = -1;
243 :
244 : Assert(!bms_is_empty(atts));
245 :
246 4 : initStringInfo(&attsbuf);
247 :
248 12 : while ((i = bms_next_member(atts, i)) >= 0)
249 : {
250 8 : attcnt++;
251 8 : if (attcnt > 1)
252 : /* translator: This is a separator in a list of entity names. */
253 4 : appendStringInfoString(&attsbuf, _(", "));
254 :
255 8 : appendStringInfo(&attsbuf, _("\"%s\""), remoterel->attnames[i]);
256 : }
257 :
258 4 : return attsbuf.data;
259 : }
260 :
261 : /*
262 : * If attempting to replicate missing or generated columns, report an error.
263 : * Prioritize 'missing' errors if both occur though the prioritization is
264 : * arbitrary.
265 : */
266 : static void
267 1114 : logicalrep_report_missing_or_gen_attrs(LogicalRepRelation *remoterel,
268 : Bitmapset *missingatts,
269 : Bitmapset *generatedatts)
270 : {
271 1114 : if (!bms_is_empty(missingatts))
272 2 : ereport(ERROR,
273 : errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
274 : errmsg_plural("logical replication target relation \"%s.%s\" is missing replicated column: %s",
275 : "logical replication target relation \"%s.%s\" is missing replicated columns: %s",
276 : bms_num_members(missingatts),
277 : remoterel->nspname,
278 : remoterel->relname,
279 : logicalrep_get_attrs_str(remoterel,
280 : missingatts)));
281 :
282 1112 : if (!bms_is_empty(generatedatts))
283 2 : ereport(ERROR,
284 : errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
285 : errmsg_plural("logical replication target relation \"%s.%s\" has incompatible generated column: %s",
286 : "logical replication target relation \"%s.%s\" has incompatible generated columns: %s",
287 : bms_num_members(generatedatts),
288 : remoterel->nspname,
289 : remoterel->relname,
290 : logicalrep_get_attrs_str(remoterel,
291 : generatedatts)));
292 1110 : }
293 :
294 : /*
295 : * Check if replica identity matches and mark the updatable flag.
296 : *
297 : * We allow for stricter replica identity (fewer columns) on subscriber as
298 : * that will not stop us from finding unique tuple. IE, if publisher has
299 : * identity (id,timestamp) and subscriber just (id) this will not be a
300 : * problem, but in the opposite scenario it will.
301 : *
302 : * We just mark the relation entry as not updatable here if the local
303 : * replica identity is found to be insufficient for applying
304 : * updates/deletes (inserts don't care!) and leave it to
305 : * check_relation_updatable() to throw the actual error if needed.
306 : */
307 : static void
308 1140 : logicalrep_rel_mark_updatable(LogicalRepRelMapEntry *entry)
309 : {
310 : Bitmapset *idkey;
311 1140 : LogicalRepRelation *remoterel = &entry->remoterel;
312 : int i;
313 :
314 1140 : entry->updatable = true;
315 :
316 1140 : idkey = RelationGetIndexAttrBitmap(entry->localrel,
317 : INDEX_ATTR_BITMAP_IDENTITY_KEY);
318 : /* fallback to PK if no replica identity */
319 1140 : if (idkey == NULL)
320 : {
321 406 : idkey = RelationGetIndexAttrBitmap(entry->localrel,
322 : INDEX_ATTR_BITMAP_PRIMARY_KEY);
323 :
324 : /*
325 : * If no replica identity index and no PK, the published table must
326 : * have replica identity FULL.
327 : */
328 406 : if (idkey == NULL && remoterel->replident != REPLICA_IDENTITY_FULL)
329 256 : entry->updatable = false;
330 : }
331 :
332 1140 : i = -1;
333 1876 : while ((i = bms_next_member(idkey, i)) >= 0)
334 : {
335 768 : int attnum = i + FirstLowInvalidHeapAttributeNumber;
336 :
337 768 : if (!AttrNumberIsForUserDefinedAttr(attnum))
338 0 : ereport(ERROR,
339 : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
340 : errmsg("logical replication target relation \"%s.%s\" uses "
341 : "system columns in REPLICA IDENTITY index",
342 : remoterel->nspname, remoterel->relname)));
343 :
344 768 : attnum = AttrNumberGetAttrOffset(attnum);
345 :
346 768 : if (entry->attrmap->attnums[attnum] < 0 ||
347 766 : !bms_is_member(entry->attrmap->attnums[attnum], remoterel->attkeys))
348 : {
349 32 : entry->updatable = false;
350 32 : break;
351 : }
352 : }
353 1140 : }
354 :
355 : /*
356 : * Open the local relation associated with the remote one.
357 : *
358 : * Rebuilds the Relcache mapping if it was invalidated by local DDL.
359 : */
360 : LogicalRepRelMapEntry *
361 296646 : logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode)
362 : {
363 : LogicalRepRelMapEntry *entry;
364 : bool found;
365 : LogicalRepRelation *remoterel;
366 :
367 296646 : if (LogicalRepRelMap == NULL)
368 0 : logicalrep_relmap_init();
369 :
370 : /* Search for existing entry. */
371 296646 : entry = hash_search(LogicalRepRelMap, &remoteid,
372 : HASH_FIND, &found);
373 :
374 296646 : if (!found)
375 0 : elog(ERROR, "no relation map entry for remote relation ID %u",
376 : remoteid);
377 :
378 296646 : remoterel = &entry->remoterel;
379 :
380 : /* Ensure we don't leak a relcache refcount. */
381 296646 : if (entry->localrel)
382 0 : elog(ERROR, "remote relation ID %u is already open", remoteid);
383 :
384 : /*
385 : * When opening and locking a relation, pending invalidation messages are
386 : * processed which can invalidate the relation. Hence, if the entry is
387 : * currently considered valid, try to open the local relation by OID and
388 : * see if invalidation ensues.
389 : */
390 296646 : if (entry->localrelvalid)
391 : {
392 295520 : entry->localrel = try_table_open(entry->localreloid, lockmode);
393 295520 : if (!entry->localrel)
394 : {
395 : /* Table was renamed or dropped. */
396 0 : entry->localrelvalid = false;
397 : }
398 295520 : else if (!entry->localrelvalid)
399 : {
400 : /* Note we release the no-longer-useful lock here. */
401 0 : table_close(entry->localrel, lockmode);
402 0 : entry->localrel = NULL;
403 : }
404 : }
405 :
406 : /*
407 : * If the entry has been marked invalid since we last had lock on it,
408 : * re-open the local relation by name and rebuild all derived data.
409 : */
410 296646 : if (!entry->localrelvalid)
411 : {
412 : Oid relid;
413 : TupleDesc desc;
414 : MemoryContext oldctx;
415 : int i;
416 : Bitmapset *missingatts;
417 1126 : Bitmapset *generatedattrs = NULL;
418 :
419 : /* Release the no-longer-useful attrmap, if any. */
420 1126 : if (entry->attrmap)
421 : {
422 26 : free_attrmap(entry->attrmap);
423 26 : entry->attrmap = NULL;
424 : }
425 :
426 : /* Try to find and lock the relation by name. */
427 1126 : relid = RangeVarGetRelid(makeRangeVar(remoterel->nspname,
428 : remoterel->relname, -1),
429 : lockmode, true);
430 1126 : if (!OidIsValid(relid))
431 12 : ereport(ERROR,
432 : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
433 : errmsg("logical replication target relation \"%s.%s\" does not exist",
434 : remoterel->nspname, remoterel->relname)));
435 1114 : entry->localrel = table_open(relid, NoLock);
436 1114 : entry->localreloid = relid;
437 :
438 : /* Check for supported relkind. */
439 1114 : CheckSubscriptionRelkind(entry->localrel->rd_rel->relkind,
440 1114 : remoterel->relkind,
441 1114 : remoterel->nspname, remoterel->relname);
442 :
443 : /*
444 : * Build the mapping of local attribute numbers to remote attribute
445 : * numbers and validate that we don't miss any replicated columns as
446 : * that would result in potentially unwanted data loss.
447 : */
448 1114 : desc = RelationGetDescr(entry->localrel);
449 1114 : oldctx = MemoryContextSwitchTo(LogicalRepRelMapContext);
450 1114 : entry->attrmap = make_attrmap(desc->natts);
451 1114 : MemoryContextSwitchTo(oldctx);
452 :
453 : /* check and report missing attrs, if any */
454 1114 : missingatts = bms_add_range(NULL, 0, remoterel->natts - 1);
455 3698 : for (i = 0; i < desc->natts; i++)
456 : {
457 : int attnum;
458 2584 : Form_pg_attribute attr = TupleDescAttr(desc, i);
459 :
460 2584 : if (attr->attisdropped)
461 : {
462 4 : entry->attrmap->attnums[i] = -1;
463 4 : continue;
464 : }
465 :
466 2580 : attnum = logicalrep_rel_att_by_name(remoterel,
467 2580 : NameStr(attr->attname));
468 :
469 2580 : entry->attrmap->attnums[i] = attnum;
470 2580 : if (attnum >= 0)
471 : {
472 : /* Remember which subscriber columns are generated. */
473 2022 : if (attr->attgenerated)
474 4 : generatedattrs = bms_add_member(generatedattrs, attnum);
475 :
476 2022 : missingatts = bms_del_member(missingatts, attnum);
477 : }
478 : }
479 :
480 1114 : logicalrep_report_missing_or_gen_attrs(remoterel, missingatts,
481 : generatedattrs);
482 :
483 : /* be tidy */
484 1110 : bms_free(generatedattrs);
485 1110 : bms_free(missingatts);
486 :
487 : /*
488 : * Set if the table's replica identity is enough to apply
489 : * update/delete.
490 : */
491 1110 : logicalrep_rel_mark_updatable(entry);
492 :
493 : /*
494 : * Finding a usable index is an infrequent task. It occurs when an
495 : * operation is first performed on the relation, or after invalidation
496 : * of the relation cache entry (such as ANALYZE or CREATE/DROP index
497 : * on the relation).
498 : */
499 1110 : entry->localindexoid = FindLogicalRepLocalIndex(entry->localrel, remoterel,
500 : entry->attrmap);
501 :
502 1110 : entry->localrelvalid = true;
503 : }
504 :
505 296630 : if (entry->state != SUBREL_STATE_READY)
506 1210 : entry->state = GetSubscriptionRelState(MySubscription->oid,
507 : entry->localreloid,
508 : &entry->statelsn);
509 :
510 296630 : return entry;
511 : }
512 :
513 : /*
514 : * Close the previously opened logical relation.
515 : */
516 : void
517 296536 : logicalrep_rel_close(LogicalRepRelMapEntry *rel, LOCKMODE lockmode)
518 : {
519 296536 : table_close(rel->localrel, lockmode);
520 296536 : rel->localrel = NULL;
521 296536 : }
522 :
523 : /*
524 : * Partition cache: look up partition LogicalRepRelMapEntry's
525 : *
526 : * Unlike relation map cache, this is keyed by partition OID, not remote
527 : * relation OID, because we only have to use this cache in the case where
528 : * partitions are not directly mapped to any remote relation, such as when
529 : * replication is occurring with one of their ancestors as target.
530 : */
531 :
532 : /*
533 : * Relcache invalidation callback
534 : */
535 : static void
536 576 : logicalrep_partmap_invalidate_cb(Datum arg, Oid reloid)
537 : {
538 : LogicalRepPartMapEntry *entry;
539 :
540 : /* Just to be sure. */
541 576 : if (LogicalRepPartMap == NULL)
542 0 : return;
543 :
544 576 : if (reloid != InvalidOid)
545 : {
546 : HASH_SEQ_STATUS status;
547 :
548 576 : hash_seq_init(&status, LogicalRepPartMap);
549 :
550 : /* TODO, use inverse lookup hashtable? */
551 1644 : while ((entry = (LogicalRepPartMapEntry *) hash_seq_search(&status)) != NULL)
552 : {
553 1080 : if (entry->relmapentry.localreloid == reloid)
554 : {
555 12 : entry->relmapentry.localrelvalid = false;
556 12 : hash_seq_term(&status);
557 12 : break;
558 : }
559 : }
560 : }
561 : else
562 : {
563 : /* invalidate all cache entries */
564 : HASH_SEQ_STATUS status;
565 :
566 0 : hash_seq_init(&status, LogicalRepPartMap);
567 :
568 0 : while ((entry = (LogicalRepPartMapEntry *) hash_seq_search(&status)) != NULL)
569 0 : entry->relmapentry.localrelvalid = false;
570 : }
571 : }
572 :
573 : /*
574 : * Reset the entries in the partition map that refer to remoterel.
575 : *
576 : * Called when new relation mapping is sent by the publisher to update our
577 : * expected view of incoming data from said publisher.
578 : *
579 : * Note that we don't update the remoterel information in the entry here,
580 : * we will update the information in logicalrep_partition_open to avoid
581 : * unnecessary work.
582 : */
583 : void
584 840 : logicalrep_partmap_reset_relmap(LogicalRepRelation *remoterel)
585 : {
586 : HASH_SEQ_STATUS status;
587 : LogicalRepPartMapEntry *part_entry;
588 : LogicalRepRelMapEntry *entry;
589 :
590 840 : if (LogicalRepPartMap == NULL)
591 772 : return;
592 :
593 68 : hash_seq_init(&status, LogicalRepPartMap);
594 174 : while ((part_entry = (LogicalRepPartMapEntry *) hash_seq_search(&status)) != NULL)
595 : {
596 106 : entry = &part_entry->relmapentry;
597 :
598 106 : if (entry->remoterel.remoteid != remoterel->remoteid)
599 90 : continue;
600 :
601 16 : logicalrep_relmap_free_entry(entry);
602 :
603 16 : memset(entry, 0, sizeof(LogicalRepRelMapEntry));
604 : }
605 : }
606 :
607 : /*
608 : * Initialize the partition map cache.
609 : */
610 : static void
611 12 : logicalrep_partmap_init(void)
612 : {
613 : HASHCTL ctl;
614 :
615 12 : if (!LogicalRepPartMapContext)
616 12 : LogicalRepPartMapContext =
617 12 : AllocSetContextCreate(CacheMemoryContext,
618 : "LogicalRepPartMapContext",
619 : ALLOCSET_DEFAULT_SIZES);
620 :
621 : /* Initialize the relation hash table. */
622 12 : ctl.keysize = sizeof(Oid); /* partition OID */
623 12 : ctl.entrysize = sizeof(LogicalRepPartMapEntry);
624 12 : ctl.hcxt = LogicalRepPartMapContext;
625 :
626 12 : LogicalRepPartMap = hash_create("logicalrep partition map cache", 64, &ctl,
627 : HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
628 :
629 : /* Watch for invalidation events. */
630 12 : CacheRegisterRelcacheCallback(logicalrep_partmap_invalidate_cb,
631 : (Datum) 0);
632 12 : }
633 :
634 : /*
635 : * logicalrep_partition_open
636 : *
637 : * Returned entry reuses most of the values of the root table's entry, save
638 : * the attribute map, which can be different for the partition. However,
639 : * we must physically copy all the data, in case the root table's entry
640 : * gets freed/rebuilt.
641 : *
642 : * Note there's no logicalrep_partition_close, because the caller closes the
643 : * component relation.
644 : */
645 : LogicalRepRelMapEntry *
646 60 : logicalrep_partition_open(LogicalRepRelMapEntry *root,
647 : Relation partrel, AttrMap *map)
648 : {
649 : LogicalRepRelMapEntry *entry;
650 : LogicalRepPartMapEntry *part_entry;
651 60 : LogicalRepRelation *remoterel = &root->remoterel;
652 60 : Oid partOid = RelationGetRelid(partrel);
653 60 : AttrMap *attrmap = root->attrmap;
654 : bool found;
655 : MemoryContext oldctx;
656 :
657 60 : if (LogicalRepPartMap == NULL)
658 12 : logicalrep_partmap_init();
659 :
660 : /* Search for existing entry. */
661 60 : part_entry = (LogicalRepPartMapEntry *) hash_search(LogicalRepPartMap,
662 : &partOid,
663 : HASH_ENTER, &found);
664 :
665 60 : entry = &part_entry->relmapentry;
666 :
667 : /*
668 : * We must always overwrite entry->localrel with the latest partition
669 : * Relation pointer, because the Relation pointed to by the old value may
670 : * have been cleared after the caller would have closed the partition
671 : * relation after the last use of this entry. Note that localrelvalid is
672 : * only updated by the relcache invalidation callback, so it may still be
673 : * true irrespective of whether the Relation pointed to by localrel has
674 : * been cleared or not.
675 : */
676 60 : if (found && entry->localrelvalid)
677 : {
678 30 : entry->localrel = partrel;
679 30 : return entry;
680 : }
681 :
682 : /* Switch to longer-lived context. */
683 30 : oldctx = MemoryContextSwitchTo(LogicalRepPartMapContext);
684 :
685 30 : if (!found)
686 : {
687 18 : memset(part_entry, 0, sizeof(LogicalRepPartMapEntry));
688 18 : part_entry->partoid = partOid;
689 : }
690 :
691 : /* Release the no-longer-useful attrmap, if any. */
692 30 : if (entry->attrmap)
693 : {
694 2 : free_attrmap(entry->attrmap);
695 2 : entry->attrmap = NULL;
696 : }
697 :
698 30 : if (!entry->remoterel.remoteid)
699 : {
700 : int i;
701 :
702 : /* Remote relation is copied as-is from the root entry. */
703 28 : entry->remoterel.remoteid = remoterel->remoteid;
704 28 : entry->remoterel.nspname = pstrdup(remoterel->nspname);
705 28 : entry->remoterel.relname = pstrdup(remoterel->relname);
706 28 : entry->remoterel.natts = remoterel->natts;
707 28 : entry->remoterel.attnames = palloc(remoterel->natts * sizeof(char *));
708 28 : entry->remoterel.atttyps = palloc(remoterel->natts * sizeof(Oid));
709 88 : for (i = 0; i < remoterel->natts; i++)
710 : {
711 60 : entry->remoterel.attnames[i] = pstrdup(remoterel->attnames[i]);
712 60 : entry->remoterel.atttyps[i] = remoterel->atttyps[i];
713 : }
714 28 : entry->remoterel.replident = remoterel->replident;
715 28 : entry->remoterel.attkeys = bms_copy(remoterel->attkeys);
716 : }
717 :
718 30 : entry->localrel = partrel;
719 30 : entry->localreloid = partOid;
720 :
721 : /*
722 : * If the partition's attributes don't match the root relation's, we'll
723 : * need to make a new attrmap which maps partition attribute numbers to
724 : * remoterel's, instead of the original which maps root relation's
725 : * attribute numbers to remoterel's.
726 : *
727 : * Note that 'map' which comes from the tuple routing data structure
728 : * contains 1-based attribute numbers (of the parent relation). However,
729 : * the map in 'entry', a logical replication data structure, contains
730 : * 0-based attribute numbers (of the remote relation).
731 : */
732 30 : if (map)
733 : {
734 : AttrNumber attno;
735 :
736 16 : entry->attrmap = make_attrmap(map->maplen);
737 68 : for (attno = 0; attno < entry->attrmap->maplen; attno++)
738 : {
739 52 : AttrNumber root_attno = map->attnums[attno];
740 :
741 : /* 0 means it's a dropped attribute. See comments atop AttrMap. */
742 52 : if (root_attno == 0)
743 4 : entry->attrmap->attnums[attno] = -1;
744 : else
745 48 : entry->attrmap->attnums[attno] = attrmap->attnums[root_attno - 1];
746 : }
747 : }
748 : else
749 : {
750 : /* Lacking copy_attmap, do this the hard way. */
751 14 : entry->attrmap = make_attrmap(attrmap->maplen);
752 14 : memcpy(entry->attrmap->attnums, attrmap->attnums,
753 14 : attrmap->maplen * sizeof(AttrNumber));
754 : }
755 :
756 : /* Set if the table's replica identity is enough to apply update/delete. */
757 30 : logicalrep_rel_mark_updatable(entry);
758 :
759 : /* state and statelsn are left set to 0. */
760 30 : MemoryContextSwitchTo(oldctx);
761 :
762 : /*
763 : * Finding a usable index is an infrequent task. It occurs when an
764 : * operation is first performed on the relation, or after invalidation of
765 : * the relation cache entry (such as ANALYZE or CREATE/DROP index on the
766 : * relation).
767 : *
768 : * We also prefer to run this code on the oldctx so that we do not leak
769 : * anything in the LogicalRepPartMapContext (hence CacheMemoryContext).
770 : */
771 30 : entry->localindexoid = FindLogicalRepLocalIndex(partrel, remoterel,
772 : entry->attrmap);
773 :
774 30 : entry->localrelvalid = true;
775 :
776 30 : return entry;
777 : }
778 :
779 : /*
780 : * Returns the oid of an index that can be used by the apply worker to scan
781 : * the relation.
782 : *
783 : * We expect to call this function when REPLICA IDENTITY FULL is defined for
784 : * the remote relation.
785 : *
786 : * If no suitable index is found, returns InvalidOid.
787 : */
788 : static Oid
789 126 : FindUsableIndexForReplicaIdentityFull(Relation localrel, AttrMap *attrmap)
790 : {
791 126 : List *idxlist = RelationGetIndexList(localrel);
792 :
793 228 : foreach_oid(idxoid, idxlist)
794 : {
795 : bool isUsableIdx;
796 : Relation idxRel;
797 :
798 40 : idxRel = index_open(idxoid, AccessShareLock);
799 40 : isUsableIdx = IsIndexUsableForReplicaIdentityFull(idxRel, attrmap);
800 40 : index_close(idxRel, AccessShareLock);
801 :
802 : /* Return the first eligible index found */
803 40 : if (isUsableIdx)
804 32 : return idxoid;
805 : }
806 :
807 94 : return InvalidOid;
808 : }
809 :
810 : /*
811 : * Returns true if the index is usable for replica identity full.
812 : *
813 : * The index must have an equal strategy for each key column, be non-partial,
814 : * and the leftmost field must be a column (not an expression) that references
815 : * the remote relation column. These limitations help to keep the index scan
816 : * similar to PK/RI index scans.
817 : *
818 : * attrmap is a map of local attributes to remote ones. We can consult this
819 : * map to check whether the local index attribute has a corresponding remote
820 : * attribute.
821 : *
822 : * Note that the limitations of index scans for replica identity full only
823 : * adheres to a subset of the limitations of PK/RI. For example, we support
824 : * columns that are marked as [NULL] or we are not interested in the [NOT
825 : * DEFERRABLE] aspect of constraints here. It works for us because we always
826 : * compare the tuples for non-PK/RI index scans. See
827 : * RelationFindReplTupleByIndex().
828 : *
829 : * XXX: To support partial indexes, the required changes are likely to be larger.
830 : * If none of the tuples satisfy the expression for the index scan, we fall-back
831 : * to sequential execution, which might not be a good idea in some cases.
832 : */
833 : bool
834 40 : IsIndexUsableForReplicaIdentityFull(Relation idxrel, AttrMap *attrmap)
835 : {
836 : AttrNumber keycol;
837 : oidvector *indclass;
838 :
839 : /* The index must not be a partial index */
840 40 : if (!heap_attisnull(idxrel->rd_indextuple, Anum_pg_index_indpred, NULL))
841 4 : return false;
842 :
843 : Assert(idxrel->rd_index->indnatts >= 1);
844 :
845 36 : indclass = (oidvector *) DatumGetPointer(SysCacheGetAttrNotNull(INDEXRELID,
846 36 : idxrel->rd_indextuple,
847 : Anum_pg_index_indclass));
848 :
849 : /* Ensure that the index has a valid equal strategy for each key column */
850 104 : for (int i = 0; i < idxrel->rd_index->indnkeyatts; i++)
851 : {
852 : Oid opfamily;
853 :
854 68 : opfamily = get_opclass_family(indclass->values[i]);
855 68 : if (IndexAmTranslateCompareType(COMPARE_EQ, idxrel->rd_rel->relam, opfamily, true) == InvalidStrategy)
856 0 : return false;
857 : }
858 :
859 : /*
860 : * For indexes other than PK and REPLICA IDENTITY, we need to match the
861 : * local and remote tuples. The equality routine tuples_equal() cannot
862 : * accept a data type where the type cache cannot provide an equality
863 : * operator.
864 : */
865 104 : for (int i = 0; i < idxrel->rd_att->natts; i++)
866 : {
867 : TypeCacheEntry *typentry;
868 :
869 68 : typentry = lookup_type_cache(TupleDescAttr(idxrel->rd_att, i)->atttypid, TYPECACHE_EQ_OPR_FINFO);
870 68 : if (!OidIsValid(typentry->eq_opr_finfo.fn_oid))
871 0 : return false;
872 : }
873 :
874 : /* The leftmost index field must not be an expression */
875 36 : keycol = idxrel->rd_index->indkey.values[0];
876 36 : if (!AttributeNumberIsValid(keycol))
877 4 : return false;
878 :
879 : /*
880 : * And the leftmost index field must reference the remote relation column.
881 : * This is because if it doesn't, the sequential scan is favorable over
882 : * index scan in most cases.
883 : */
884 32 : if (attrmap->maplen <= AttrNumberGetAttrOffset(keycol) ||
885 32 : attrmap->attnums[AttrNumberGetAttrOffset(keycol)] < 0)
886 0 : return false;
887 :
888 : /*
889 : * The given index access method must implement "amgettuple", which will
890 : * be used later to fetch the tuples. See RelationFindReplTupleByIndex().
891 : */
892 32 : if (GetIndexAmRoutineByAmId(idxrel->rd_rel->relam, false)->amgettuple == NULL)
893 0 : return false;
894 :
895 32 : return true;
896 : }
897 :
898 : /*
899 : * Return the OID of the replica identity index if one is defined;
900 : * the OID of the PK if one exists and is not deferrable;
901 : * otherwise, InvalidOid.
902 : */
903 : Oid
904 145284 : GetRelationIdentityOrPK(Relation rel)
905 : {
906 : Oid idxoid;
907 :
908 145284 : idxoid = RelationGetReplicaIndex(rel);
909 :
910 145284 : if (!OidIsValid(idxoid))
911 434 : idxoid = RelationGetPrimaryKeyIndex(rel, false);
912 :
913 145284 : return idxoid;
914 : }
915 :
916 : /*
917 : * Returns the index oid if we can use an index for subscriber. Otherwise,
918 : * returns InvalidOid.
919 : */
920 : static Oid
921 1140 : FindLogicalRepLocalIndex(Relation localrel, LogicalRepRelation *remoterel,
922 : AttrMap *attrMap)
923 : {
924 : Oid idxoid;
925 :
926 : /*
927 : * We never need index oid for partitioned tables, always rely on leaf
928 : * partition's index.
929 : */
930 1140 : if (localrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
931 132 : return InvalidOid;
932 :
933 : /*
934 : * Simple case, we already have a primary key or a replica identity index.
935 : */
936 1008 : idxoid = GetRelationIdentityOrPK(localrel);
937 1008 : if (OidIsValid(idxoid))
938 654 : return idxoid;
939 :
940 354 : if (remoterel->replident == REPLICA_IDENTITY_FULL)
941 : {
942 : /*
943 : * We are looking for one more opportunity for using an index. If
944 : * there are any indexes defined on the local relation, try to pick a
945 : * suitable index.
946 : *
947 : * The index selection safely assumes that all the columns are going
948 : * to be available for the index scan given that remote relation has
949 : * replica identity full.
950 : *
951 : * Note that we are not using the planner to find the cheapest method
952 : * to scan the relation as that would require us to either use lower
953 : * level planner functions which would be a maintenance burden in the
954 : * long run or use the full-fledged planner which could cause
955 : * overhead.
956 : */
957 126 : return FindUsableIndexForReplicaIdentityFull(localrel, attrMap);
958 : }
959 :
960 228 : return InvalidOid;
961 : }
|