LCOV - code coverage report
Current view: top level - src/backend/replication/logical - tablesync.c (source / functions) Hit Total Coverage
Test: PostgreSQL 19devel Lines: 444 487 91.2 %
Date: 2025-11-08 11:17:53 Functions: 16 16 100.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  * tablesync.c
       3             :  *    PostgreSQL logical replication: initial table data synchronization
       4             :  *
       5             :  * Copyright (c) 2012-2025, PostgreSQL Global Development Group
       6             :  *
       7             :  * IDENTIFICATION
       8             :  *    src/backend/replication/logical/tablesync.c
       9             :  *
      10             :  * NOTES
      11             :  *    This file contains code for initial table data synchronization for
      12             :  *    logical replication.
      13             :  *
      14             :  *    The initial data synchronization is done separately for each table,
      15             :  *    in a separate apply worker that only fetches the initial snapshot data
      16             :  *    from the publisher and then synchronizes the position in the stream with
      17             :  *    the leader apply worker.
      18             :  *
      19             :  *    There are several reasons for doing the synchronization this way:
      20             :  *     - It allows us to parallelize the initial data synchronization
      21             :  *       which lowers the time needed for it to happen.
      22             :  *     - The initial synchronization does not have to hold the xid and LSN
      23             :  *       for the time it takes to copy data of all tables, causing less
      24             :  *       bloat and lower disk consumption compared to doing the
      25             :  *       synchronization in a single process for the whole database.
      26             :  *     - It allows us to synchronize any tables added after the initial
      27             :  *       synchronization has finished.
      28             :  *
      29             :  *    The stream position synchronization works in multiple steps:
      30             :  *     - Apply worker requests a tablesync worker to start, setting the new
      31             :  *       table state to INIT.
      32             :  *     - Tablesync worker starts; changes table state from INIT to DATASYNC while
      33             :  *       copying.
      34             :  *     - Tablesync worker does initial table copy; there is a FINISHEDCOPY (sync
      35             :  *       worker specific) state to indicate when the copy phase has completed, so
      36             :  *       if the worker crashes with this (non-memory) state then the copy will not
      37             :  *       be re-attempted.
      38             :  *     - Tablesync worker then sets table state to SYNCWAIT; waits for state change.
      39             :  *     - Apply worker periodically checks for tables in SYNCWAIT state.  When
      40             :  *       any appear, it sets the table state to CATCHUP and starts loop-waiting
      41             :  *       until either the table state is set to SYNCDONE or the sync worker
      42             :  *       exits.
      43             :  *     - After the sync worker has seen the state change to CATCHUP, it will
      44             :  *       read the stream and apply changes (acting like an apply worker) until
      45             :  *       it catches up to the specified stream position.  Then it sets the
      46             :  *       state to SYNCDONE.  There might be zero changes applied between
      47             :  *       CATCHUP and SYNCDONE, because the sync worker might be ahead of the
      48             :  *       apply worker.
      49             :  *     - Once the state is set to SYNCDONE, the apply will continue tracking
      50             :  *       the table until it reaches the SYNCDONE stream position, at which
      51             :  *       point it sets state to READY and stops tracking.  Again, there might
      52             :  *       be zero changes in between.
      53             :  *
      54             :  *    So the state progression is always: INIT -> DATASYNC -> FINISHEDCOPY
      55             :  *    -> SYNCWAIT -> CATCHUP -> SYNCDONE -> READY.
      56             :  *
      57             :  *    The catalog pg_subscription_rel is used to keep information about
      58             :  *    subscribed tables and their state.  The catalog holds all states
      59             :  *    except SYNCWAIT and CATCHUP which are only in shared memory.
      60             :  *
      61             :  *    Example flows look like this:
      62             :  *     - Apply is in front:
      63             :  *        sync:8
      64             :  *          -> set in catalog FINISHEDCOPY
      65             :  *          -> set in memory SYNCWAIT
      66             :  *        apply:10
      67             :  *          -> set in memory CATCHUP
      68             :  *          -> enter wait-loop
      69             :  *        sync:10
      70             :  *          -> set in catalog SYNCDONE
      71             :  *          -> exit
      72             :  *        apply:10
      73             :  *          -> exit wait-loop
      74             :  *          -> continue rep
      75             :  *        apply:11
      76             :  *          -> set in catalog READY
      77             :  *
      78             :  *     - Sync is in front:
      79             :  *        sync:10
      80             :  *          -> set in catalog FINISHEDCOPY
      81             :  *          -> set in memory SYNCWAIT
      82             :  *        apply:8
      83             :  *          -> set in memory CATCHUP
      84             :  *          -> continue per-table filtering
      85             :  *        sync:10
      86             :  *          -> set in catalog SYNCDONE
      87             :  *          -> exit
      88             :  *        apply:10
      89             :  *          -> set in catalog READY
      90             :  *          -> stop per-table filtering
      91             :  *          -> continue rep
      92             :  *-------------------------------------------------------------------------
      93             :  */
      94             : 
      95             : #include "postgres.h"
      96             : 
      97             : #include "access/table.h"
      98             : #include "access/xact.h"
      99             : #include "catalog/indexing.h"
     100             : #include "catalog/pg_subscription_rel.h"
     101             : #include "catalog/pg_type.h"
     102             : #include "commands/copy.h"
     103             : #include "miscadmin.h"
     104             : #include "nodes/makefuncs.h"
     105             : #include "parser/parse_relation.h"
     106             : #include "pgstat.h"
     107             : #include "replication/logicallauncher.h"
     108             : #include "replication/logicalrelation.h"
     109             : #include "replication/logicalworker.h"
     110             : #include "replication/origin.h"
     111             : #include "replication/slot.h"
     112             : #include "replication/walreceiver.h"
     113             : #include "replication/worker_internal.h"
     114             : #include "storage/ipc.h"
     115             : #include "storage/lmgr.h"
     116             : #include "utils/acl.h"
     117             : #include "utils/array.h"
     118             : #include "utils/builtins.h"
     119             : #include "utils/lsyscache.h"
     120             : #include "utils/rls.h"
     121             : #include "utils/snapmgr.h"
     122             : #include "utils/syscache.h"
     123             : #include "utils/usercontext.h"
     124             : 
     125             : List       *table_states_not_ready = NIL;
     126             : 
     127             : static StringInfo copybuf = NULL;
     128             : 
     129             : /*
     130             :  * Wait until the relation sync state is set in the catalog to the expected
     131             :  * one; return true when it happens.
     132             :  *
     133             :  * Returns false if the table sync worker or the table itself have
     134             :  * disappeared, or the table state has been reset.
     135             :  *
     136             :  * Currently, this is used in the apply worker when transitioning from
     137             :  * CATCHUP state to SYNCDONE.
     138             :  */
     139             : static bool
     140         364 : wait_for_table_state_change(Oid relid, char expected_state)
     141             : {
     142             :     char        state;
     143             : 
     144             :     for (;;)
     145         442 :     {
     146             :         LogicalRepWorker *worker;
     147             :         XLogRecPtr  statelsn;
     148             : 
     149         806 :         CHECK_FOR_INTERRUPTS();
     150             : 
     151         806 :         InvalidateCatalogSnapshot();
     152         806 :         state = GetSubscriptionRelState(MyLogicalRepWorker->subid,
     153             :                                         relid, &statelsn);
     154             : 
     155         806 :         if (state == SUBREL_STATE_UNKNOWN)
     156           0 :             break;
     157             : 
     158         806 :         if (state == expected_state)
     159         364 :             return true;
     160             : 
     161             :         /* Check if the sync worker is still running and bail if not. */
     162         442 :         LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
     163         442 :         worker = logicalrep_worker_find(WORKERTYPE_TABLESYNC,
     164         442 :                                         MyLogicalRepWorker->subid, relid,
     165             :                                         false);
     166         442 :         LWLockRelease(LogicalRepWorkerLock);
     167         442 :         if (!worker)
     168           0 :             break;
     169             : 
     170         442 :         (void) WaitLatch(MyLatch,
     171             :                          WL_LATCH_SET | WL_TIMEOUT | WL_EXIT_ON_PM_DEATH,
     172             :                          1000L, WAIT_EVENT_LOGICAL_SYNC_STATE_CHANGE);
     173             : 
     174         442 :         ResetLatch(MyLatch);
     175             :     }
     176             : 
     177           0 :     return false;
     178             : }
     179             : 
     180             : /*
     181             :  * Wait until the apply worker changes the state of our synchronization
     182             :  * worker to the expected one.
     183             :  *
     184             :  * Used when transitioning from SYNCWAIT state to CATCHUP.
     185             :  *
     186             :  * Returns false if the apply worker has disappeared.
     187             :  */
     188             : static bool
     189         368 : wait_for_worker_state_change(char expected_state)
     190             : {
     191             :     int         rc;
     192             : 
     193             :     for (;;)
     194         370 :     {
     195             :         LogicalRepWorker *worker;
     196             : 
     197         738 :         CHECK_FOR_INTERRUPTS();
     198             : 
     199             :         /*
     200             :          * Done if already in correct state.  (We assume this fetch is atomic
     201             :          * enough to not give a misleading answer if we do it with no lock.)
     202             :          */
     203         738 :         if (MyLogicalRepWorker->relstate == expected_state)
     204         368 :             return true;
     205             : 
     206             :         /*
     207             :          * Bail out if the apply worker has died, else signal it we're
     208             :          * waiting.
     209             :          */
     210         370 :         LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
     211         370 :         worker = logicalrep_worker_find(WORKERTYPE_APPLY,
     212         370 :                                         MyLogicalRepWorker->subid, InvalidOid,
     213             :                                         false);
     214         370 :         if (worker && worker->proc)
     215         370 :             logicalrep_worker_wakeup_ptr(worker);
     216         370 :         LWLockRelease(LogicalRepWorkerLock);
     217         370 :         if (!worker)
     218           0 :             break;
     219             : 
     220             :         /*
     221             :          * Wait.  We expect to get a latch signal back from the apply worker,
     222             :          * but use a timeout in case it dies without sending one.
     223             :          */
     224         370 :         rc = WaitLatch(MyLatch,
     225             :                        WL_LATCH_SET | WL_TIMEOUT | WL_EXIT_ON_PM_DEATH,
     226             :                        1000L, WAIT_EVENT_LOGICAL_SYNC_STATE_CHANGE);
     227             : 
     228         370 :         if (rc & WL_LATCH_SET)
     229         370 :             ResetLatch(MyLatch);
     230             :     }
     231             : 
     232           0 :     return false;
     233             : }
     234             : 
     235             : /*
     236             :  * Handle table synchronization cooperation from the synchronization
     237             :  * worker.
     238             :  *
     239             :  * If the sync worker is in CATCHUP state and reached (or passed) the
     240             :  * predetermined synchronization point in the WAL stream, mark the table as
     241             :  * SYNCDONE and finish.
     242             :  */
     243             : void
     244         452 : ProcessSyncingTablesForSync(XLogRecPtr current_lsn)
     245             : {
     246         452 :     SpinLockAcquire(&MyLogicalRepWorker->relmutex);
     247             : 
     248         452 :     if (MyLogicalRepWorker->relstate == SUBREL_STATE_CATCHUP &&
     249         452 :         current_lsn >= MyLogicalRepWorker->relstate_lsn)
     250             :     {
     251             :         TimeLineID  tli;
     252         368 :         char        syncslotname[NAMEDATALEN] = {0};
     253         368 :         char        originname[NAMEDATALEN] = {0};
     254             : 
     255         368 :         MyLogicalRepWorker->relstate = SUBREL_STATE_SYNCDONE;
     256         368 :         MyLogicalRepWorker->relstate_lsn = current_lsn;
     257             : 
     258         368 :         SpinLockRelease(&MyLogicalRepWorker->relmutex);
     259             : 
     260             :         /*
     261             :          * UpdateSubscriptionRelState must be called within a transaction.
     262             :          */
     263         368 :         if (!IsTransactionState())
     264         368 :             StartTransactionCommand();
     265             : 
     266         368 :         UpdateSubscriptionRelState(MyLogicalRepWorker->subid,
     267         368 :                                    MyLogicalRepWorker->relid,
     268         368 :                                    MyLogicalRepWorker->relstate,
     269         368 :                                    MyLogicalRepWorker->relstate_lsn,
     270             :                                    false);
     271             : 
     272             :         /*
     273             :          * End streaming so that LogRepWorkerWalRcvConn can be used to drop
     274             :          * the slot.
     275             :          */
     276         368 :         walrcv_endstreaming(LogRepWorkerWalRcvConn, &tli);
     277             : 
     278             :         /*
     279             :          * Cleanup the tablesync slot.
     280             :          *
     281             :          * This has to be done after updating the state because otherwise if
     282             :          * there is an error while doing the database operations we won't be
     283             :          * able to rollback dropped slot.
     284             :          */
     285         368 :         ReplicationSlotNameForTablesync(MyLogicalRepWorker->subid,
     286         368 :                                         MyLogicalRepWorker->relid,
     287             :                                         syncslotname,
     288             :                                         sizeof(syncslotname));
     289             : 
     290             :         /*
     291             :          * It is important to give an error if we are unable to drop the slot,
     292             :          * otherwise, it won't be dropped till the corresponding subscription
     293             :          * is dropped. So passing missing_ok = false.
     294             :          */
     295         368 :         ReplicationSlotDropAtPubNode(LogRepWorkerWalRcvConn, syncslotname, false);
     296             : 
     297         368 :         CommitTransactionCommand();
     298         368 :         pgstat_report_stat(false);
     299             : 
     300             :         /*
     301             :          * Start a new transaction to clean up the tablesync origin tracking.
     302             :          * This transaction will be ended within the FinishSyncWorker(). Now,
     303             :          * even, if we fail to remove this here, the apply worker will ensure
     304             :          * to clean it up afterward.
     305             :          *
     306             :          * We need to do this after the table state is set to SYNCDONE.
     307             :          * Otherwise, if an error occurs while performing the database
     308             :          * operation, the worker will be restarted and the in-memory state of
     309             :          * replication progress (remote_lsn) won't be rolled-back which would
     310             :          * have been cleared before restart. So, the restarted worker will use
     311             :          * invalid replication progress state resulting in replay of
     312             :          * transactions that have already been applied.
     313             :          */
     314         368 :         StartTransactionCommand();
     315             : 
     316         368 :         ReplicationOriginNameForLogicalRep(MyLogicalRepWorker->subid,
     317         368 :                                            MyLogicalRepWorker->relid,
     318             :                                            originname,
     319             :                                            sizeof(originname));
     320             : 
     321             :         /*
     322             :          * Resetting the origin session removes the ownership of the slot.
     323             :          * This is needed to allow the origin to be dropped.
     324             :          */
     325         368 :         replorigin_session_reset();
     326         368 :         replorigin_session_origin = InvalidRepOriginId;
     327         368 :         replorigin_session_origin_lsn = InvalidXLogRecPtr;
     328         368 :         replorigin_session_origin_timestamp = 0;
     329             : 
     330             :         /*
     331             :          * Drop the tablesync's origin tracking if exists.
     332             :          *
     333             :          * There is a chance that the user is concurrently performing refresh
     334             :          * for the subscription where we remove the table state and its origin
     335             :          * or the apply worker would have removed this origin. So passing
     336             :          * missing_ok = true.
     337             :          */
     338         368 :         replorigin_drop_by_name(originname, true, false);
     339             : 
     340         368 :         FinishSyncWorker();
     341             :     }
     342             :     else
     343          84 :         SpinLockRelease(&MyLogicalRepWorker->relmutex);
     344          84 : }
     345             : 
     346             : /*
     347             :  * Handle table synchronization cooperation from the apply worker.
     348             :  *
     349             :  * Walk over all subscription tables that are individually tracked by the
     350             :  * apply process (currently, all that have state other than
     351             :  * SUBREL_STATE_READY) and manage synchronization for them.
     352             :  *
     353             :  * If there are tables that need synchronizing and are not being synchronized
     354             :  * yet, start sync workers for them (if there are free slots for sync
     355             :  * workers).  To prevent starting the sync worker for the same relation at a
     356             :  * high frequency after a failure, we store its last start time with each sync
     357             :  * state info.  We start the sync worker for the same relation after waiting
     358             :  * at least wal_retrieve_retry_interval.
     359             :  *
     360             :  * For tables that are being synchronized already, check if sync workers
     361             :  * either need action from the apply worker or have finished.  This is the
     362             :  * SYNCWAIT to CATCHUP transition.
     363             :  *
     364             :  * If the synchronization position is reached (SYNCDONE), then the table can
     365             :  * be marked as READY and is no longer tracked.
     366             :  */
     367             : void
     368       21638 : ProcessSyncingTablesForApply(XLogRecPtr current_lsn)
     369             : {
     370             :     struct tablesync_start_time_mapping
     371             :     {
     372             :         Oid         relid;
     373             :         TimestampTz last_start_time;
     374             :     };
     375             :     static HTAB *last_start_times = NULL;
     376             :     ListCell   *lc;
     377             :     bool        started_tx;
     378       21638 :     bool        should_exit = false;
     379       21638 :     Relation    rel = NULL;
     380             : 
     381             :     Assert(!IsTransactionState());
     382             : 
     383             :     /* We need up-to-date sync state info for subscription tables here. */
     384       21638 :     FetchRelationStates(NULL, NULL, &started_tx);
     385             : 
     386             :     /*
     387             :      * Prepare a hash table for tracking last start times of workers, to avoid
     388             :      * immediate restarts.  We don't need it if there are no tables that need
     389             :      * syncing.
     390             :      */
     391       21638 :     if (table_states_not_ready != NIL && !last_start_times)
     392         242 :     {
     393             :         HASHCTL     ctl;
     394             : 
     395         242 :         ctl.keysize = sizeof(Oid);
     396         242 :         ctl.entrysize = sizeof(struct tablesync_start_time_mapping);
     397         242 :         last_start_times = hash_create("Logical replication table sync worker start times",
     398             :                                        256, &ctl, HASH_ELEM | HASH_BLOBS);
     399             :     }
     400             : 
     401             :     /*
     402             :      * Clean up the hash table when we're done with all tables (just to
     403             :      * release the bit of memory).
     404             :      */
     405       21396 :     else if (table_states_not_ready == NIL && last_start_times)
     406             :     {
     407         180 :         hash_destroy(last_start_times);
     408         180 :         last_start_times = NULL;
     409             :     }
     410             : 
     411             :     /*
     412             :      * Process all tables that are being synchronized.
     413             :      */
     414       24974 :     foreach(lc, table_states_not_ready)
     415             :     {
     416        3340 :         SubscriptionRelState *rstate = (SubscriptionRelState *) lfirst(lc);
     417             : 
     418        3340 :         if (!started_tx)
     419             :         {
     420         566 :             StartTransactionCommand();
     421         566 :             started_tx = true;
     422             :         }
     423             : 
     424             :         Assert(get_rel_relkind(rstate->relid) != RELKIND_SEQUENCE);
     425             : 
     426        3340 :         if (rstate->state == SUBREL_STATE_SYNCDONE)
     427             :         {
     428             :             /*
     429             :              * Apply has caught up to the position where the table sync has
     430             :              * finished.  Mark the table as ready so that the apply will just
     431             :              * continue to replicate it normally.
     432             :              */
     433         362 :             if (current_lsn >= rstate->lsn)
     434             :             {
     435             :                 char        originname[NAMEDATALEN];
     436             : 
     437         360 :                 rstate->state = SUBREL_STATE_READY;
     438         360 :                 rstate->lsn = current_lsn;
     439             : 
     440             :                 /*
     441             :                  * Remove the tablesync origin tracking if exists.
     442             :                  *
     443             :                  * There is a chance that the user is concurrently performing
     444             :                  * refresh for the subscription where we remove the table
     445             :                  * state and its origin or the tablesync worker would have
     446             :                  * already removed this origin. We can't rely on tablesync
     447             :                  * worker to remove the origin tracking as if there is any
     448             :                  * error while dropping we won't restart it to drop the
     449             :                  * origin. So passing missing_ok = true.
     450             :                  *
     451             :                  * Lock the subscription and origin in the same order as we
     452             :                  * are doing during DDL commands to avoid deadlocks. See
     453             :                  * AlterSubscription_refresh.
     454             :                  */
     455         360 :                 LockSharedObject(SubscriptionRelationId, MyLogicalRepWorker->subid,
     456             :                                  0, AccessShareLock);
     457             : 
     458         360 :                 if (!rel)
     459         358 :                     rel = table_open(SubscriptionRelRelationId, RowExclusiveLock);
     460             : 
     461         360 :                 ReplicationOriginNameForLogicalRep(MyLogicalRepWorker->subid,
     462             :                                                    rstate->relid,
     463             :                                                    originname,
     464             :                                                    sizeof(originname));
     465         360 :                 replorigin_drop_by_name(originname, true, false);
     466             : 
     467             :                 /*
     468             :                  * Update the state to READY only after the origin cleanup.
     469             :                  */
     470         360 :                 UpdateSubscriptionRelState(MyLogicalRepWorker->subid,
     471         360 :                                            rstate->relid, rstate->state,
     472             :                                            rstate->lsn, true);
     473             :             }
     474             :         }
     475             :         else
     476             :         {
     477             :             LogicalRepWorker *syncworker;
     478             : 
     479             :             /*
     480             :              * Look for a sync worker for this relation.
     481             :              */
     482        2978 :             LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
     483             : 
     484        2978 :             syncworker = logicalrep_worker_find(WORKERTYPE_TABLESYNC,
     485        2978 :                                                 MyLogicalRepWorker->subid,
     486             :                                                 rstate->relid, false);
     487             : 
     488        2978 :             if (syncworker)
     489             :             {
     490             :                 /* Found one, update our copy of its state */
     491        1366 :                 SpinLockAcquire(&syncworker->relmutex);
     492        1366 :                 rstate->state = syncworker->relstate;
     493        1366 :                 rstate->lsn = syncworker->relstate_lsn;
     494        1366 :                 if (rstate->state == SUBREL_STATE_SYNCWAIT)
     495             :                 {
     496             :                     /*
     497             :                      * Sync worker is waiting for apply.  Tell sync worker it
     498             :                      * can catchup now.
     499             :                      */
     500         364 :                     syncworker->relstate = SUBREL_STATE_CATCHUP;
     501         364 :                     syncworker->relstate_lsn =
     502         364 :                         Max(syncworker->relstate_lsn, current_lsn);
     503             :                 }
     504        1366 :                 SpinLockRelease(&syncworker->relmutex);
     505             : 
     506             :                 /* If we told worker to catch up, wait for it. */
     507        1366 :                 if (rstate->state == SUBREL_STATE_SYNCWAIT)
     508             :                 {
     509             :                     /* Signal the sync worker, as it may be waiting for us. */
     510         364 :                     if (syncworker->proc)
     511         364 :                         logicalrep_worker_wakeup_ptr(syncworker);
     512             : 
     513             :                     /* Now safe to release the LWLock */
     514         364 :                     LWLockRelease(LogicalRepWorkerLock);
     515             : 
     516         364 :                     if (started_tx)
     517             :                     {
     518             :                         /*
     519             :                          * We must commit the existing transaction to release
     520             :                          * the existing locks before entering a busy loop.
     521             :                          * This is required to avoid any undetected deadlocks
     522             :                          * due to any existing lock as deadlock detector won't
     523             :                          * be able to detect the waits on the latch.
     524             :                          *
     525             :                          * Also close any tables prior to the commit.
     526             :                          */
     527         364 :                         if (rel)
     528             :                         {
     529          56 :                             table_close(rel, NoLock);
     530          56 :                             rel = NULL;
     531             :                         }
     532         364 :                         CommitTransactionCommand();
     533         364 :                         pgstat_report_stat(false);
     534             :                     }
     535             : 
     536             :                     /*
     537             :                      * Enter busy loop and wait for synchronization worker to
     538             :                      * reach expected state (or die trying).
     539             :                      */
     540         364 :                     StartTransactionCommand();
     541         364 :                     started_tx = true;
     542             : 
     543         364 :                     wait_for_table_state_change(rstate->relid,
     544             :                                                 SUBREL_STATE_SYNCDONE);
     545             :                 }
     546             :                 else
     547        1002 :                     LWLockRelease(LogicalRepWorkerLock);
     548             :             }
     549             :             else
     550             :             {
     551             :                 /*
     552             :                  * If there is no sync worker for this table yet, count
     553             :                  * running sync workers for this subscription, while we have
     554             :                  * the lock.
     555             :                  */
     556             :                 int         nsyncworkers =
     557        1612 :                     logicalrep_sync_worker_count(MyLogicalRepWorker->subid);
     558             :                 struct tablesync_start_time_mapping *hentry;
     559             :                 bool        found;
     560             : 
     561             :                 /* Now safe to release the LWLock */
     562        1612 :                 LWLockRelease(LogicalRepWorkerLock);
     563             : 
     564        1612 :                 hentry = hash_search(last_start_times, &rstate->relid,
     565             :                                      HASH_ENTER, &found);
     566        1612 :                 if (!found)
     567         382 :                     hentry->last_start_time = 0;
     568             : 
     569        1612 :                 launch_sync_worker(WORKERTYPE_TABLESYNC, nsyncworkers,
     570             :                                    rstate->relid, &hentry->last_start_time);
     571             :             }
     572             :         }
     573             :     }
     574             : 
     575             :     /* Close table if opened */
     576       21634 :     if (rel)
     577         302 :         table_close(rel, NoLock);
     578             : 
     579             : 
     580       21634 :     if (started_tx)
     581             :     {
     582             :         /*
     583             :          * Even when the two_phase mode is requested by the user, it remains
     584             :          * as 'pending' until all tablesyncs have reached READY state.
     585             :          *
     586             :          * When this happens, we restart the apply worker and (if the
     587             :          * conditions are still ok) then the two_phase tri-state will become
     588             :          * 'enabled' at that time.
     589             :          *
     590             :          * Note: If the subscription has no tables then leave the state as
     591             :          * PENDING, which allows ALTER SUBSCRIPTION ... REFRESH PUBLICATION to
     592             :          * work.
     593             :          */
     594        2054 :         if (MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_PENDING)
     595             :         {
     596          74 :             CommandCounterIncrement();  /* make updates visible */
     597          74 :             if (AllTablesyncsReady())
     598             :             {
     599          12 :                 ereport(LOG,
     600             :                         (errmsg("logical replication apply worker for subscription \"%s\" will restart so that two_phase can be enabled",
     601             :                                 MySubscription->name)));
     602          12 :                 should_exit = true;
     603             :             }
     604             :         }
     605             : 
     606        2054 :         CommitTransactionCommand();
     607        2054 :         pgstat_report_stat(true);
     608             :     }
     609             : 
     610       21634 :     if (should_exit)
     611             :     {
     612             :         /*
     613             :          * Reset the last-start time for this worker so that the launcher will
     614             :          * restart it without waiting for wal_retrieve_retry_interval.
     615             :          */
     616          12 :         ApplyLauncherForgetWorkerStartTime(MySubscription->oid);
     617             : 
     618          12 :         proc_exit(0);
     619             :     }
     620       21622 : }
     621             : 
     622             : /*
     623             :  * Create list of columns for COPY based on logical relation mapping.
     624             :  */
     625             : static List *
     626         388 : make_copy_attnamelist(LogicalRepRelMapEntry *rel)
     627             : {
     628         388 :     List       *attnamelist = NIL;
     629             :     int         i;
     630             : 
     631        1040 :     for (i = 0; i < rel->remoterel.natts; i++)
     632             :     {
     633         652 :         attnamelist = lappend(attnamelist,
     634         652 :                               makeString(rel->remoterel.attnames[i]));
     635             :     }
     636             : 
     637             : 
     638         388 :     return attnamelist;
     639             : }
     640             : 
     641             : /*
     642             :  * Data source callback for the COPY FROM, which reads from the remote
     643             :  * connection and passes the data back to our local COPY.
     644             :  */
     645             : static int
     646       27978 : copy_read_data(void *outbuf, int minread, int maxread)
     647             : {
     648       27978 :     int         bytesread = 0;
     649             :     int         avail;
     650             : 
     651             :     /* If there are some leftover data from previous read, use it. */
     652       27978 :     avail = copybuf->len - copybuf->cursor;
     653       27978 :     if (avail)
     654             :     {
     655           0 :         if (avail > maxread)
     656           0 :             avail = maxread;
     657           0 :         memcpy(outbuf, &copybuf->data[copybuf->cursor], avail);
     658           0 :         copybuf->cursor += avail;
     659           0 :         maxread -= avail;
     660           0 :         bytesread += avail;
     661             :     }
     662             : 
     663       27992 :     while (maxread > 0 && bytesread < minread)
     664             :     {
     665       27992 :         pgsocket    fd = PGINVALID_SOCKET;
     666             :         int         len;
     667       27992 :         char       *buf = NULL;
     668             : 
     669             :         for (;;)
     670             :         {
     671             :             /* Try read the data. */
     672       27992 :             len = walrcv_receive(LogRepWorkerWalRcvConn, &buf, &fd);
     673             : 
     674       27992 :             CHECK_FOR_INTERRUPTS();
     675             : 
     676       27992 :             if (len == 0)
     677          14 :                 break;
     678       27978 :             else if (len < 0)
     679       27978 :                 return bytesread;
     680             :             else
     681             :             {
     682             :                 /* Process the data */
     683       27594 :                 copybuf->data = buf;
     684       27594 :                 copybuf->len = len;
     685       27594 :                 copybuf->cursor = 0;
     686             : 
     687       27594 :                 avail = copybuf->len - copybuf->cursor;
     688       27594 :                 if (avail > maxread)
     689           0 :                     avail = maxread;
     690       27594 :                 memcpy(outbuf, &copybuf->data[copybuf->cursor], avail);
     691       27594 :                 outbuf = (char *) outbuf + avail;
     692       27594 :                 copybuf->cursor += avail;
     693       27594 :                 maxread -= avail;
     694       27594 :                 bytesread += avail;
     695             :             }
     696             : 
     697       27594 :             if (maxread <= 0 || bytesread >= minread)
     698       27594 :                 return bytesread;
     699             :         }
     700             : 
     701             :         /*
     702             :          * Wait for more data or latch.
     703             :          */
     704          14 :         (void) WaitLatchOrSocket(MyLatch,
     705             :                                  WL_SOCKET_READABLE | WL_LATCH_SET |
     706             :                                  WL_TIMEOUT | WL_EXIT_ON_PM_DEATH,
     707             :                                  fd, 1000L, WAIT_EVENT_LOGICAL_SYNC_DATA);
     708             : 
     709          14 :         ResetLatch(MyLatch);
     710             :     }
     711             : 
     712           0 :     return bytesread;
     713             : }
     714             : 
     715             : 
     716             : /*
     717             :  * Get information about remote relation in similar fashion the RELATION
     718             :  * message provides during replication.
     719             :  *
     720             :  * This function also returns (a) the relation qualifications to be used in
     721             :  * the COPY command, and (b) whether the remote relation has published any
     722             :  * generated column.
     723             :  */
     724             : static void
     725         392 : fetch_remote_table_info(char *nspname, char *relname, LogicalRepRelation *lrel,
     726             :                         List **qual, bool *gencol_published)
     727             : {
     728             :     WalRcvExecResult *res;
     729             :     StringInfoData cmd;
     730             :     TupleTableSlot *slot;
     731         392 :     Oid         tableRow[] = {OIDOID, CHAROID, CHAROID};
     732         392 :     Oid         attrRow[] = {INT2OID, TEXTOID, OIDOID, BOOLOID, BOOLOID};
     733         392 :     Oid         qualRow[] = {TEXTOID};
     734             :     bool        isnull;
     735             :     int         natt;
     736         392 :     StringInfo  pub_names = NULL;
     737         392 :     Bitmapset  *included_cols = NULL;
     738         392 :     int         server_version = walrcv_server_version(LogRepWorkerWalRcvConn);
     739             : 
     740         392 :     lrel->nspname = nspname;
     741         392 :     lrel->relname = relname;
     742             : 
     743             :     /* First fetch Oid and replica identity. */
     744         392 :     initStringInfo(&cmd);
     745         392 :     appendStringInfo(&cmd, "SELECT c.oid, c.relreplident, c.relkind"
     746             :                      "  FROM pg_catalog.pg_class c"
     747             :                      "  INNER JOIN pg_catalog.pg_namespace n"
     748             :                      "        ON (c.relnamespace = n.oid)"
     749             :                      " WHERE n.nspname = %s"
     750             :                      "   AND c.relname = %s",
     751             :                      quote_literal_cstr(nspname),
     752             :                      quote_literal_cstr(relname));
     753         392 :     res = walrcv_exec(LogRepWorkerWalRcvConn, cmd.data,
     754             :                       lengthof(tableRow), tableRow);
     755             : 
     756         392 :     if (res->status != WALRCV_OK_TUPLES)
     757           0 :         ereport(ERROR,
     758             :                 (errcode(ERRCODE_CONNECTION_FAILURE),
     759             :                  errmsg("could not fetch table info for table \"%s.%s\" from publisher: %s",
     760             :                         nspname, relname, res->err)));
     761             : 
     762         392 :     slot = MakeSingleTupleTableSlot(res->tupledesc, &TTSOpsMinimalTuple);
     763         392 :     if (!tuplestore_gettupleslot(res->tuplestore, true, false, slot))
     764           0 :         ereport(ERROR,
     765             :                 (errcode(ERRCODE_UNDEFINED_OBJECT),
     766             :                  errmsg("table \"%s.%s\" not found on publisher",
     767             :                         nspname, relname)));
     768             : 
     769         392 :     lrel->remoteid = DatumGetObjectId(slot_getattr(slot, 1, &isnull));
     770             :     Assert(!isnull);
     771         392 :     lrel->replident = DatumGetChar(slot_getattr(slot, 2, &isnull));
     772             :     Assert(!isnull);
     773         392 :     lrel->relkind = DatumGetChar(slot_getattr(slot, 3, &isnull));
     774             :     Assert(!isnull);
     775             : 
     776         392 :     ExecDropSingleTupleTableSlot(slot);
     777         392 :     walrcv_clear_result(res);
     778             : 
     779             : 
     780             :     /*
     781             :      * Get column lists for each relation.
     782             :      *
     783             :      * We need to do this before fetching info about column names and types,
     784             :      * so that we can skip columns that should not be replicated.
     785             :      */
     786         392 :     if (server_version >= 150000)
     787             :     {
     788             :         WalRcvExecResult *pubres;
     789             :         TupleTableSlot *tslot;
     790         392 :         Oid         attrsRow[] = {INT2VECTOROID};
     791             : 
     792             :         /* Build the pub_names comma-separated string. */
     793         392 :         pub_names = makeStringInfo();
     794         392 :         GetPublicationsStr(MySubscription->publications, pub_names, true);
     795             : 
     796             :         /*
     797             :          * Fetch info about column lists for the relation (from all the
     798             :          * publications).
     799             :          */
     800         392 :         resetStringInfo(&cmd);
     801         392 :         appendStringInfo(&cmd,
     802             :                          "SELECT DISTINCT"
     803             :                          "  (CASE WHEN (array_length(gpt.attrs, 1) = c.relnatts)"
     804             :                          "   THEN NULL ELSE gpt.attrs END)"
     805             :                          "  FROM pg_publication p,"
     806             :                          "  LATERAL pg_get_publication_tables(p.pubname) gpt,"
     807             :                          "  pg_class c"
     808             :                          " WHERE gpt.relid = %u AND c.oid = gpt.relid"
     809             :                          "   AND p.pubname IN ( %s )",
     810             :                          lrel->remoteid,
     811             :                          pub_names->data);
     812             : 
     813         392 :         pubres = walrcv_exec(LogRepWorkerWalRcvConn, cmd.data,
     814             :                              lengthof(attrsRow), attrsRow);
     815             : 
     816         392 :         if (pubres->status != WALRCV_OK_TUPLES)
     817           0 :             ereport(ERROR,
     818             :                     (errcode(ERRCODE_CONNECTION_FAILURE),
     819             :                      errmsg("could not fetch column list info for table \"%s.%s\" from publisher: %s",
     820             :                             nspname, relname, pubres->err)));
     821             : 
     822             :         /*
     823             :          * We don't support the case where the column list is different for
     824             :          * the same table when combining publications. See comments atop
     825             :          * fetch_relation_list. So there should be only one row returned.
     826             :          * Although we already checked this when creating the subscription, we
     827             :          * still need to check here in case the column list was changed after
     828             :          * creating the subscription and before the sync worker is started.
     829             :          */
     830         392 :         if (tuplestore_tuple_count(pubres->tuplestore) > 1)
     831           0 :             ereport(ERROR,
     832             :                     errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
     833             :                     errmsg("cannot use different column lists for table \"%s.%s\" in different publications",
     834             :                            nspname, relname));
     835             : 
     836             :         /*
     837             :          * Get the column list and build a single bitmap with the attnums.
     838             :          *
     839             :          * If we find a NULL value, it means all the columns should be
     840             :          * replicated.
     841             :          */
     842         392 :         tslot = MakeSingleTupleTableSlot(pubres->tupledesc, &TTSOpsMinimalTuple);
     843         392 :         if (tuplestore_gettupleslot(pubres->tuplestore, true, false, tslot))
     844             :         {
     845         392 :             Datum       cfval = slot_getattr(tslot, 1, &isnull);
     846             : 
     847         392 :             if (!isnull)
     848             :             {
     849             :                 ArrayType  *arr;
     850             :                 int         nelems;
     851             :                 int16      *elems;
     852             : 
     853          44 :                 arr = DatumGetArrayTypeP(cfval);
     854          44 :                 nelems = ARR_DIMS(arr)[0];
     855          44 :                 elems = (int16 *) ARR_DATA_PTR(arr);
     856             : 
     857         118 :                 for (natt = 0; natt < nelems; natt++)
     858          74 :                     included_cols = bms_add_member(included_cols, elems[natt]);
     859             :             }
     860             : 
     861         392 :             ExecClearTuple(tslot);
     862             :         }
     863         392 :         ExecDropSingleTupleTableSlot(tslot);
     864             : 
     865         392 :         walrcv_clear_result(pubres);
     866             :     }
     867             : 
     868             :     /*
     869             :      * Now fetch column names and types.
     870             :      */
     871         392 :     resetStringInfo(&cmd);
     872         392 :     appendStringInfoString(&cmd,
     873             :                            "SELECT a.attnum,"
     874             :                            "       a.attname,"
     875             :                            "       a.atttypid,"
     876             :                            "       a.attnum = ANY(i.indkey)");
     877             : 
     878             :     /* Generated columns can be replicated since version 18. */
     879         392 :     if (server_version >= 180000)
     880         392 :         appendStringInfoString(&cmd, ", a.attgenerated != ''");
     881             : 
     882         784 :     appendStringInfo(&cmd,
     883             :                      "  FROM pg_catalog.pg_attribute a"
     884             :                      "  LEFT JOIN pg_catalog.pg_index i"
     885             :                      "       ON (i.indexrelid = pg_get_replica_identity_index(%u))"
     886             :                      " WHERE a.attnum > 0::pg_catalog.int2"
     887             :                      "   AND NOT a.attisdropped %s"
     888             :                      "   AND a.attrelid = %u"
     889             :                      " ORDER BY a.attnum",
     890             :                      lrel->remoteid,
     891         392 :                      (server_version >= 120000 && server_version < 180000 ?
     892             :                       "AND a.attgenerated = ''" : ""),
     893             :                      lrel->remoteid);
     894         392 :     res = walrcv_exec(LogRepWorkerWalRcvConn, cmd.data,
     895             :                       server_version >= 180000 ? lengthof(attrRow) : lengthof(attrRow) - 1, attrRow);
     896             : 
     897         392 :     if (res->status != WALRCV_OK_TUPLES)
     898           0 :         ereport(ERROR,
     899             :                 (errcode(ERRCODE_CONNECTION_FAILURE),
     900             :                  errmsg("could not fetch table info for table \"%s.%s\" from publisher: %s",
     901             :                         nspname, relname, res->err)));
     902             : 
     903             :     /* We don't know the number of rows coming, so allocate enough space. */
     904         392 :     lrel->attnames = palloc0(MaxTupleAttributeNumber * sizeof(char *));
     905         392 :     lrel->atttyps = palloc0(MaxTupleAttributeNumber * sizeof(Oid));
     906         392 :     lrel->attkeys = NULL;
     907             : 
     908             :     /*
     909             :      * Store the columns as a list of names.  Ignore those that are not
     910             :      * present in the column list, if there is one.
     911             :      */
     912         392 :     natt = 0;
     913         392 :     slot = MakeSingleTupleTableSlot(res->tupledesc, &TTSOpsMinimalTuple);
     914        1118 :     while (tuplestore_gettupleslot(res->tuplestore, true, false, slot))
     915             :     {
     916             :         char       *rel_colname;
     917             :         AttrNumber  attnum;
     918             : 
     919         726 :         attnum = DatumGetInt16(slot_getattr(slot, 1, &isnull));
     920             :         Assert(!isnull);
     921             : 
     922             :         /* If the column is not in the column list, skip it. */
     923         726 :         if (included_cols != NULL && !bms_is_member(attnum, included_cols))
     924             :         {
     925          62 :             ExecClearTuple(slot);
     926          62 :             continue;
     927             :         }
     928             : 
     929         664 :         rel_colname = TextDatumGetCString(slot_getattr(slot, 2, &isnull));
     930             :         Assert(!isnull);
     931             : 
     932         664 :         lrel->attnames[natt] = rel_colname;
     933         664 :         lrel->atttyps[natt] = DatumGetObjectId(slot_getattr(slot, 3, &isnull));
     934             :         Assert(!isnull);
     935             : 
     936         664 :         if (DatumGetBool(slot_getattr(slot, 4, &isnull)))
     937         216 :             lrel->attkeys = bms_add_member(lrel->attkeys, natt);
     938             : 
     939             :         /* Remember if the remote table has published any generated column. */
     940         664 :         if (server_version >= 180000 && !(*gencol_published))
     941             :         {
     942         664 :             *gencol_published = DatumGetBool(slot_getattr(slot, 5, &isnull));
     943             :             Assert(!isnull);
     944             :         }
     945             : 
     946             :         /* Should never happen. */
     947         664 :         if (++natt >= MaxTupleAttributeNumber)
     948           0 :             elog(ERROR, "too many columns in remote table \"%s.%s\"",
     949             :                  nspname, relname);
     950             : 
     951         664 :         ExecClearTuple(slot);
     952             :     }
     953         392 :     ExecDropSingleTupleTableSlot(slot);
     954             : 
     955         392 :     lrel->natts = natt;
     956             : 
     957         392 :     walrcv_clear_result(res);
     958             : 
     959             :     /*
     960             :      * Get relation's row filter expressions. DISTINCT avoids the same
     961             :      * expression of a table in multiple publications from being included
     962             :      * multiple times in the final expression.
     963             :      *
     964             :      * We need to copy the row even if it matches just one of the
     965             :      * publications, so we later combine all the quals with OR.
     966             :      *
     967             :      * For initial synchronization, row filtering can be ignored in following
     968             :      * cases:
     969             :      *
     970             :      * 1) one of the subscribed publications for the table hasn't specified
     971             :      * any row filter
     972             :      *
     973             :      * 2) one of the subscribed publications has puballtables set to true
     974             :      *
     975             :      * 3) one of the subscribed publications is declared as TABLES IN SCHEMA
     976             :      * that includes this relation
     977             :      */
     978         392 :     if (server_version >= 150000)
     979             :     {
     980             :         /* Reuse the already-built pub_names. */
     981             :         Assert(pub_names != NULL);
     982             : 
     983             :         /* Check for row filters. */
     984         392 :         resetStringInfo(&cmd);
     985         392 :         appendStringInfo(&cmd,
     986             :                          "SELECT DISTINCT pg_get_expr(gpt.qual, gpt.relid)"
     987             :                          "  FROM pg_publication p,"
     988             :                          "  LATERAL pg_get_publication_tables(p.pubname) gpt"
     989             :                          " WHERE gpt.relid = %u"
     990             :                          "   AND p.pubname IN ( %s )",
     991             :                          lrel->remoteid,
     992             :                          pub_names->data);
     993             : 
     994         392 :         res = walrcv_exec(LogRepWorkerWalRcvConn, cmd.data, 1, qualRow);
     995             : 
     996         392 :         if (res->status != WALRCV_OK_TUPLES)
     997           0 :             ereport(ERROR,
     998             :                     (errmsg("could not fetch table WHERE clause info for table \"%s.%s\" from publisher: %s",
     999             :                             nspname, relname, res->err)));
    1000             : 
    1001             :         /*
    1002             :          * Multiple row filter expressions for the same table will be combined
    1003             :          * by COPY using OR. If any of the filter expressions for this table
    1004             :          * are null, it means the whole table will be copied. In this case it
    1005             :          * is not necessary to construct a unified row filter expression at
    1006             :          * all.
    1007             :          */
    1008         392 :         slot = MakeSingleTupleTableSlot(res->tupledesc, &TTSOpsMinimalTuple);
    1009         422 :         while (tuplestore_gettupleslot(res->tuplestore, true, false, slot))
    1010             :         {
    1011         400 :             Datum       rf = slot_getattr(slot, 1, &isnull);
    1012             : 
    1013         400 :             if (!isnull)
    1014          30 :                 *qual = lappend(*qual, makeString(TextDatumGetCString(rf)));
    1015             :             else
    1016             :             {
    1017             :                 /* Ignore filters and cleanup as necessary. */
    1018         370 :                 if (*qual)
    1019             :                 {
    1020           6 :                     list_free_deep(*qual);
    1021           6 :                     *qual = NIL;
    1022             :                 }
    1023         370 :                 break;
    1024             :             }
    1025             : 
    1026          30 :             ExecClearTuple(slot);
    1027             :         }
    1028         392 :         ExecDropSingleTupleTableSlot(slot);
    1029             : 
    1030         392 :         walrcv_clear_result(res);
    1031         392 :         destroyStringInfo(pub_names);
    1032             :     }
    1033             : 
    1034         392 :     pfree(cmd.data);
    1035         392 : }
    1036             : 
    1037             : /*
    1038             :  * Copy existing data of a table from publisher.
    1039             :  *
    1040             :  * Caller is responsible for locking the local relation.
    1041             :  */
    1042             : static void
    1043         392 : copy_table(Relation rel)
    1044             : {
    1045             :     LogicalRepRelMapEntry *relmapentry;
    1046             :     LogicalRepRelation lrel;
    1047         392 :     List       *qual = NIL;
    1048             :     WalRcvExecResult *res;
    1049             :     StringInfoData cmd;
    1050             :     CopyFromState cstate;
    1051             :     List       *attnamelist;
    1052             :     ParseState *pstate;
    1053         392 :     List       *options = NIL;
    1054         392 :     bool        gencol_published = false;
    1055             : 
    1056             :     /* Get the publisher relation info. */
    1057         392 :     fetch_remote_table_info(get_namespace_name(RelationGetNamespace(rel)),
    1058         392 :                             RelationGetRelationName(rel), &lrel, &qual,
    1059             :                             &gencol_published);
    1060             : 
    1061             :     /* Put the relation into relmap. */
    1062         392 :     logicalrep_relmap_update(&lrel);
    1063             : 
    1064             :     /* Map the publisher relation to local one. */
    1065         392 :     relmapentry = logicalrep_rel_open(lrel.remoteid, NoLock);
    1066             :     Assert(rel == relmapentry->localrel);
    1067             : 
    1068             :     /* Start copy on the publisher. */
    1069         388 :     initStringInfo(&cmd);
    1070             : 
    1071             :     /* Regular table with no row filter or generated columns */
    1072         388 :     if (lrel.relkind == RELKIND_RELATION && qual == NIL && !gencol_published)
    1073             :     {
    1074         332 :         appendStringInfo(&cmd, "COPY %s",
    1075         332 :                          quote_qualified_identifier(lrel.nspname, lrel.relname));
    1076             : 
    1077             :         /* If the table has columns, then specify the columns */
    1078         332 :         if (lrel.natts)
    1079             :         {
    1080         330 :             appendStringInfoString(&cmd, " (");
    1081             : 
    1082             :             /*
    1083             :              * XXX Do we need to list the columns in all cases? Maybe we're
    1084             :              * replicating all columns?
    1085             :              */
    1086         898 :             for (int i = 0; i < lrel.natts; i++)
    1087             :             {
    1088         568 :                 if (i > 0)
    1089         238 :                     appendStringInfoString(&cmd, ", ");
    1090             : 
    1091         568 :                 appendStringInfoString(&cmd, quote_identifier(lrel.attnames[i]));
    1092             :             }
    1093             : 
    1094         330 :             appendStringInfoChar(&cmd, ')');
    1095             :         }
    1096             : 
    1097         332 :         appendStringInfoString(&cmd, " TO STDOUT");
    1098             :     }
    1099             :     else
    1100             :     {
    1101             :         /*
    1102             :          * For non-tables and tables with row filters, we need to do COPY
    1103             :          * (SELECT ...), but we can't just do SELECT * because we may need to
    1104             :          * copy only subset of columns including generated columns. For tables
    1105             :          * with any row filters, build a SELECT query with OR'ed row filters
    1106             :          * for COPY.
    1107             :          *
    1108             :          * We also need to use this same COPY (SELECT ...) syntax when
    1109             :          * generated columns are published, because copy of generated columns
    1110             :          * is not supported by the normal COPY.
    1111             :          */
    1112          56 :         appendStringInfoString(&cmd, "COPY (SELECT ");
    1113         140 :         for (int i = 0; i < lrel.natts; i++)
    1114             :         {
    1115          84 :             appendStringInfoString(&cmd, quote_identifier(lrel.attnames[i]));
    1116          84 :             if (i < lrel.natts - 1)
    1117          28 :                 appendStringInfoString(&cmd, ", ");
    1118             :         }
    1119             : 
    1120          56 :         appendStringInfoString(&cmd, " FROM ");
    1121             : 
    1122             :         /*
    1123             :          * For regular tables, make sure we don't copy data from a child that
    1124             :          * inherits the named table as those will be copied separately.
    1125             :          */
    1126          56 :         if (lrel.relkind == RELKIND_RELATION)
    1127          22 :             appendStringInfoString(&cmd, "ONLY ");
    1128             : 
    1129          56 :         appendStringInfoString(&cmd, quote_qualified_identifier(lrel.nspname, lrel.relname));
    1130             :         /* list of OR'ed filters */
    1131          56 :         if (qual != NIL)
    1132             :         {
    1133             :             ListCell   *lc;
    1134          22 :             char       *q = strVal(linitial(qual));
    1135             : 
    1136          22 :             appendStringInfo(&cmd, " WHERE %s", q);
    1137          24 :             for_each_from(lc, qual, 1)
    1138             :             {
    1139           2 :                 q = strVal(lfirst(lc));
    1140           2 :                 appendStringInfo(&cmd, " OR %s", q);
    1141             :             }
    1142          22 :             list_free_deep(qual);
    1143             :         }
    1144             : 
    1145          56 :         appendStringInfoString(&cmd, ") TO STDOUT");
    1146             :     }
    1147             : 
    1148             :     /*
    1149             :      * Prior to v16, initial table synchronization will use text format even
    1150             :      * if the binary option is enabled for a subscription.
    1151             :      */
    1152         388 :     if (walrcv_server_version(LogRepWorkerWalRcvConn) >= 160000 &&
    1153         388 :         MySubscription->binary)
    1154             :     {
    1155          10 :         appendStringInfoString(&cmd, " WITH (FORMAT binary)");
    1156          10 :         options = list_make1(makeDefElem("format",
    1157             :                                          (Node *) makeString("binary"), -1));
    1158             :     }
    1159             : 
    1160         388 :     res = walrcv_exec(LogRepWorkerWalRcvConn, cmd.data, 0, NULL);
    1161         388 :     pfree(cmd.data);
    1162         388 :     if (res->status != WALRCV_OK_COPY_OUT)
    1163           0 :         ereport(ERROR,
    1164             :                 (errcode(ERRCODE_CONNECTION_FAILURE),
    1165             :                  errmsg("could not start initial contents copy for table \"%s.%s\": %s",
    1166             :                         lrel.nspname, lrel.relname, res->err)));
    1167         388 :     walrcv_clear_result(res);
    1168             : 
    1169         388 :     copybuf = makeStringInfo();
    1170             : 
    1171         388 :     pstate = make_parsestate(NULL);
    1172         388 :     (void) addRangeTableEntryForRelation(pstate, rel, AccessShareLock,
    1173             :                                          NULL, false, false);
    1174             : 
    1175         388 :     attnamelist = make_copy_attnamelist(relmapentry);
    1176         388 :     cstate = BeginCopyFrom(pstate, rel, NULL, NULL, false, copy_read_data, attnamelist, options);
    1177             : 
    1178             :     /* Do the copy */
    1179         386 :     (void) CopyFrom(cstate);
    1180             : 
    1181         368 :     logicalrep_rel_close(relmapentry, NoLock);
    1182         368 : }
    1183             : 
    1184             : /*
    1185             :  * Determine the tablesync slot name.
    1186             :  *
    1187             :  * The name must not exceed NAMEDATALEN - 1 because of remote node constraints
    1188             :  * on slot name length. We append system_identifier to avoid slot_name
    1189             :  * collision with subscriptions in other clusters. With the current scheme
    1190             :  * pg_%u_sync_%u_UINT64_FORMAT (3 + 10 + 6 + 10 + 20 + '\0'), the maximum
    1191             :  * length of slot_name will be 50.
    1192             :  *
    1193             :  * The returned slot name is stored in the supplied buffer (syncslotname) with
    1194             :  * the given size.
    1195             :  *
    1196             :  * Note: We don't use the subscription slot name as part of tablesync slot name
    1197             :  * because we are responsible for cleaning up these slots and it could become
    1198             :  * impossible to recalculate what name to cleanup if the subscription slot name
    1199             :  * had changed.
    1200             :  */
    1201             : void
    1202         774 : ReplicationSlotNameForTablesync(Oid suboid, Oid relid,
    1203             :                                 char *syncslotname, Size szslot)
    1204             : {
    1205         774 :     snprintf(syncslotname, szslot, "pg_%u_sync_%u_" UINT64_FORMAT, suboid,
    1206             :              relid, GetSystemIdentifier());
    1207         774 : }
    1208             : 
    1209             : /*
    1210             :  * Start syncing the table in the sync worker.
    1211             :  *
    1212             :  * If nothing needs to be done to sync the table, we exit the worker without
    1213             :  * any further action.
    1214             :  *
    1215             :  * The returned slot name is palloc'ed in current memory context.
    1216             :  */
    1217             : static char *
    1218         396 : LogicalRepSyncTableStart(XLogRecPtr *origin_startpos)
    1219             : {
    1220             :     char       *slotname;
    1221             :     char       *err;
    1222             :     char        relstate;
    1223             :     XLogRecPtr  relstate_lsn;
    1224             :     Relation    rel;
    1225             :     AclResult   aclresult;
    1226             :     WalRcvExecResult *res;
    1227             :     char        originname[NAMEDATALEN];
    1228             :     RepOriginId originid;
    1229             :     UserContext ucxt;
    1230             :     bool        must_use_password;
    1231             :     bool        run_as_owner;
    1232             : 
    1233             :     /* Check the state of the table synchronization. */
    1234         396 :     StartTransactionCommand();
    1235         396 :     relstate = GetSubscriptionRelState(MyLogicalRepWorker->subid,
    1236         396 :                                        MyLogicalRepWorker->relid,
    1237             :                                        &relstate_lsn);
    1238         396 :     CommitTransactionCommand();
    1239             : 
    1240             :     /* Is the use of a password mandatory? */
    1241         784 :     must_use_password = MySubscription->passwordrequired &&
    1242         388 :         !MySubscription->ownersuperuser;
    1243             : 
    1244         396 :     SpinLockAcquire(&MyLogicalRepWorker->relmutex);
    1245         396 :     MyLogicalRepWorker->relstate = relstate;
    1246         396 :     MyLogicalRepWorker->relstate_lsn = relstate_lsn;
    1247         396 :     SpinLockRelease(&MyLogicalRepWorker->relmutex);
    1248             : 
    1249             :     /*
    1250             :      * If synchronization is already done or no longer necessary, exit now
    1251             :      * that we've updated shared memory state.
    1252             :      */
    1253         396 :     switch (relstate)
    1254             :     {
    1255           0 :         case SUBREL_STATE_SYNCDONE:
    1256             :         case SUBREL_STATE_READY:
    1257             :         case SUBREL_STATE_UNKNOWN:
    1258           0 :             FinishSyncWorker(); /* doesn't return */
    1259             :     }
    1260             : 
    1261             :     /* Calculate the name of the tablesync slot. */
    1262         396 :     slotname = (char *) palloc(NAMEDATALEN);
    1263         396 :     ReplicationSlotNameForTablesync(MySubscription->oid,
    1264         396 :                                     MyLogicalRepWorker->relid,
    1265             :                                     slotname,
    1266             :                                     NAMEDATALEN);
    1267             : 
    1268             :     /*
    1269             :      * Here we use the slot name instead of the subscription name as the
    1270             :      * application_name, so that it is different from the leader apply worker,
    1271             :      * so that synchronous replication can distinguish them.
    1272             :      */
    1273         396 :     LogRepWorkerWalRcvConn =
    1274         396 :         walrcv_connect(MySubscription->conninfo, true, true,
    1275             :                        must_use_password,
    1276             :                        slotname, &err);
    1277         396 :     if (LogRepWorkerWalRcvConn == NULL)
    1278           0 :         ereport(ERROR,
    1279             :                 (errcode(ERRCODE_CONNECTION_FAILURE),
    1280             :                  errmsg("table synchronization worker for subscription \"%s\" could not connect to the publisher: %s",
    1281             :                         MySubscription->name, err)));
    1282             : 
    1283             :     Assert(MyLogicalRepWorker->relstate == SUBREL_STATE_INIT ||
    1284             :            MyLogicalRepWorker->relstate == SUBREL_STATE_DATASYNC ||
    1285             :            MyLogicalRepWorker->relstate == SUBREL_STATE_FINISHEDCOPY);
    1286             : 
    1287             :     /* Assign the origin tracking record name. */
    1288         396 :     ReplicationOriginNameForLogicalRep(MySubscription->oid,
    1289         396 :                                        MyLogicalRepWorker->relid,
    1290             :                                        originname,
    1291             :                                        sizeof(originname));
    1292             : 
    1293         396 :     if (MyLogicalRepWorker->relstate == SUBREL_STATE_DATASYNC)
    1294             :     {
    1295             :         /*
    1296             :          * We have previously errored out before finishing the copy so the
    1297             :          * replication slot might exist. We want to remove the slot if it
    1298             :          * already exists and proceed.
    1299             :          *
    1300             :          * XXX We could also instead try to drop the slot, last time we failed
    1301             :          * but for that, we might need to clean up the copy state as it might
    1302             :          * be in the middle of fetching the rows. Also, if there is a network
    1303             :          * breakdown then it wouldn't have succeeded so trying it next time
    1304             :          * seems like a better bet.
    1305             :          */
    1306          18 :         ReplicationSlotDropAtPubNode(LogRepWorkerWalRcvConn, slotname, true);
    1307             :     }
    1308         378 :     else if (MyLogicalRepWorker->relstate == SUBREL_STATE_FINISHEDCOPY)
    1309             :     {
    1310             :         /*
    1311             :          * The COPY phase was previously done, but tablesync then crashed
    1312             :          * before it was able to finish normally.
    1313             :          */
    1314           0 :         StartTransactionCommand();
    1315             : 
    1316             :         /*
    1317             :          * The origin tracking name must already exist. It was created first
    1318             :          * time this tablesync was launched.
    1319             :          */
    1320           0 :         originid = replorigin_by_name(originname, false);
    1321           0 :         replorigin_session_setup(originid, 0);
    1322           0 :         replorigin_session_origin = originid;
    1323           0 :         *origin_startpos = replorigin_session_get_progress(false);
    1324             : 
    1325           0 :         CommitTransactionCommand();
    1326             : 
    1327           0 :         goto copy_table_done;
    1328             :     }
    1329             : 
    1330         396 :     SpinLockAcquire(&MyLogicalRepWorker->relmutex);
    1331         396 :     MyLogicalRepWorker->relstate = SUBREL_STATE_DATASYNC;
    1332         396 :     MyLogicalRepWorker->relstate_lsn = InvalidXLogRecPtr;
    1333         396 :     SpinLockRelease(&MyLogicalRepWorker->relmutex);
    1334             : 
    1335             :     /* Update the state and make it visible to others. */
    1336         396 :     StartTransactionCommand();
    1337         396 :     UpdateSubscriptionRelState(MyLogicalRepWorker->subid,
    1338         396 :                                MyLogicalRepWorker->relid,
    1339         396 :                                MyLogicalRepWorker->relstate,
    1340         396 :                                MyLogicalRepWorker->relstate_lsn,
    1341             :                                false);
    1342         394 :     CommitTransactionCommand();
    1343         394 :     pgstat_report_stat(true);
    1344             : 
    1345         394 :     StartTransactionCommand();
    1346             : 
    1347             :     /*
    1348             :      * Use a standard write lock here. It might be better to disallow access
    1349             :      * to the table while it's being synchronized. But we don't want to block
    1350             :      * the main apply process from working and it has to open the relation in
    1351             :      * RowExclusiveLock when remapping remote relation id to local one.
    1352             :      */
    1353         394 :     rel = table_open(MyLogicalRepWorker->relid, RowExclusiveLock);
    1354             : 
    1355             :     /*
    1356             :      * Start a transaction in the remote node in REPEATABLE READ mode.  This
    1357             :      * ensures that both the replication slot we create (see below) and the
    1358             :      * COPY are consistent with each other.
    1359             :      */
    1360         394 :     res = walrcv_exec(LogRepWorkerWalRcvConn,
    1361             :                       "BEGIN READ ONLY ISOLATION LEVEL REPEATABLE READ",
    1362             :                       0, NULL);
    1363         394 :     if (res->status != WALRCV_OK_COMMAND)
    1364           0 :         ereport(ERROR,
    1365             :                 (errcode(ERRCODE_CONNECTION_FAILURE),
    1366             :                  errmsg("table copy could not start transaction on publisher: %s",
    1367             :                         res->err)));
    1368         394 :     walrcv_clear_result(res);
    1369             : 
    1370             :     /*
    1371             :      * Create a new permanent logical decoding slot. This slot will be used
    1372             :      * for the catchup phase after COPY is done, so tell it to use the
    1373             :      * snapshot to make the final data consistent.
    1374             :      */
    1375         394 :     walrcv_create_slot(LogRepWorkerWalRcvConn,
    1376             :                        slotname, false /* permanent */ , false /* two_phase */ ,
    1377             :                        MySubscription->failover,
    1378             :                        CRS_USE_SNAPSHOT, origin_startpos);
    1379             : 
    1380             :     /*
    1381             :      * Setup replication origin tracking. The purpose of doing this before the
    1382             :      * copy is to avoid doing the copy again due to any error in setting up
    1383             :      * origin tracking.
    1384             :      */
    1385         392 :     originid = replorigin_by_name(originname, true);
    1386         392 :     if (!OidIsValid(originid))
    1387             :     {
    1388             :         /*
    1389             :          * Origin tracking does not exist, so create it now.
    1390             :          *
    1391             :          * Then advance to the LSN got from walrcv_create_slot. This is WAL
    1392             :          * logged for the purpose of recovery. Locks are to prevent the
    1393             :          * replication origin from vanishing while advancing.
    1394             :          */
    1395         392 :         originid = replorigin_create(originname);
    1396             : 
    1397         392 :         LockRelationOid(ReplicationOriginRelationId, RowExclusiveLock);
    1398         392 :         replorigin_advance(originid, *origin_startpos, InvalidXLogRecPtr,
    1399             :                            true /* go backward */ , true /* WAL log */ );
    1400         392 :         UnlockRelationOid(ReplicationOriginRelationId, RowExclusiveLock);
    1401             : 
    1402         392 :         replorigin_session_setup(originid, 0);
    1403         392 :         replorigin_session_origin = originid;
    1404             :     }
    1405             :     else
    1406             :     {
    1407           0 :         ereport(ERROR,
    1408             :                 (errcode(ERRCODE_DUPLICATE_OBJECT),
    1409             :                  errmsg("replication origin \"%s\" already exists",
    1410             :                         originname)));
    1411             :     }
    1412             : 
    1413             :     /*
    1414             :      * If the user did not opt to run as the owner of the subscription
    1415             :      * ('run_as_owner'), then copy the table as the owner of the table.
    1416             :      */
    1417         392 :     run_as_owner = MySubscription->runasowner;
    1418         392 :     if (!run_as_owner)
    1419         390 :         SwitchToUntrustedUser(rel->rd_rel->relowner, &ucxt);
    1420             : 
    1421             :     /*
    1422             :      * Check that our table sync worker has permission to insert into the
    1423             :      * target table.
    1424             :      */
    1425         392 :     aclresult = pg_class_aclcheck(RelationGetRelid(rel), GetUserId(),
    1426             :                                   ACL_INSERT);
    1427         392 :     if (aclresult != ACLCHECK_OK)
    1428           0 :         aclcheck_error(aclresult,
    1429           0 :                        get_relkind_objtype(rel->rd_rel->relkind),
    1430           0 :                        RelationGetRelationName(rel));
    1431             : 
    1432             :     /*
    1433             :      * COPY FROM does not honor RLS policies.  That is not a problem for
    1434             :      * subscriptions owned by roles with BYPASSRLS privilege (or superuser,
    1435             :      * who has it implicitly), but other roles should not be able to
    1436             :      * circumvent RLS.  Disallow logical replication into RLS enabled
    1437             :      * relations for such roles.
    1438             :      */
    1439         392 :     if (check_enable_rls(RelationGetRelid(rel), InvalidOid, false) == RLS_ENABLED)
    1440           0 :         ereport(ERROR,
    1441             :                 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
    1442             :                  errmsg("user \"%s\" cannot replicate into relation with row-level security enabled: \"%s\"",
    1443             :                         GetUserNameFromId(GetUserId(), true),
    1444             :                         RelationGetRelationName(rel))));
    1445             : 
    1446             :     /* Now do the initial data copy */
    1447         392 :     PushActiveSnapshot(GetTransactionSnapshot());
    1448         392 :     copy_table(rel);
    1449         368 :     PopActiveSnapshot();
    1450             : 
    1451         368 :     res = walrcv_exec(LogRepWorkerWalRcvConn, "COMMIT", 0, NULL);
    1452         368 :     if (res->status != WALRCV_OK_COMMAND)
    1453           0 :         ereport(ERROR,
    1454             :                 (errcode(ERRCODE_CONNECTION_FAILURE),
    1455             :                  errmsg("table copy could not finish transaction on publisher: %s",
    1456             :                         res->err)));
    1457         368 :     walrcv_clear_result(res);
    1458             : 
    1459         368 :     if (!run_as_owner)
    1460         366 :         RestoreUserContext(&ucxt);
    1461             : 
    1462         368 :     table_close(rel, NoLock);
    1463             : 
    1464             :     /* Make the copy visible. */
    1465         368 :     CommandCounterIncrement();
    1466             : 
    1467             :     /*
    1468             :      * Update the persisted state to indicate the COPY phase is done; make it
    1469             :      * visible to others.
    1470             :      */
    1471         368 :     UpdateSubscriptionRelState(MyLogicalRepWorker->subid,
    1472         368 :                                MyLogicalRepWorker->relid,
    1473             :                                SUBREL_STATE_FINISHEDCOPY,
    1474         368 :                                MyLogicalRepWorker->relstate_lsn,
    1475             :                                false);
    1476             : 
    1477         368 :     CommitTransactionCommand();
    1478             : 
    1479         368 : copy_table_done:
    1480             : 
    1481         368 :     elog(DEBUG1,
    1482             :          "LogicalRepSyncTableStart: '%s' origin_startpos lsn %X/%08X",
    1483             :          originname, LSN_FORMAT_ARGS(*origin_startpos));
    1484             : 
    1485             :     /*
    1486             :      * We are done with the initial data synchronization, update the state.
    1487             :      */
    1488         368 :     SpinLockAcquire(&MyLogicalRepWorker->relmutex);
    1489         368 :     MyLogicalRepWorker->relstate = SUBREL_STATE_SYNCWAIT;
    1490         368 :     MyLogicalRepWorker->relstate_lsn = *origin_startpos;
    1491         368 :     SpinLockRelease(&MyLogicalRepWorker->relmutex);
    1492             : 
    1493             :     /*
    1494             :      * Finally, wait until the leader apply worker tells us to catch up and
    1495             :      * then return to let LogicalRepApplyLoop do it.
    1496             :      */
    1497         368 :     wait_for_worker_state_change(SUBREL_STATE_CATCHUP);
    1498         368 :     return slotname;
    1499             : }
    1500             : 
    1501             : /*
    1502             :  * Execute the initial sync with error handling. Disable the subscription,
    1503             :  * if it's required.
    1504             :  *
    1505             :  * Allocate the slot name in long-lived context on return. Note that we don't
    1506             :  * handle FATAL errors which are probably because of system resource error and
    1507             :  * are not repeatable.
    1508             :  */
    1509             : static void
    1510         396 : start_table_sync(XLogRecPtr *origin_startpos, char **slotname)
    1511             : {
    1512         396 :     char       *sync_slotname = NULL;
    1513             : 
    1514             :     Assert(am_tablesync_worker());
    1515             : 
    1516         396 :     PG_TRY();
    1517             :     {
    1518             :         /* Call initial sync. */
    1519         396 :         sync_slotname = LogicalRepSyncTableStart(origin_startpos);
    1520             :     }
    1521          24 :     PG_CATCH();
    1522             :     {
    1523          24 :         if (MySubscription->disableonerr)
    1524           2 :             DisableSubscriptionAndExit();
    1525             :         else
    1526             :         {
    1527             :             /*
    1528             :              * Report the worker failed during table synchronization. Abort
    1529             :              * the current transaction so that the stats message is sent in an
    1530             :              * idle state.
    1531             :              */
    1532          22 :             AbortOutOfAnyTransaction();
    1533          22 :             pgstat_report_subscription_error(MySubscription->oid,
    1534             :                                              WORKERTYPE_TABLESYNC);
    1535             : 
    1536          22 :             PG_RE_THROW();
    1537             :         }
    1538             :     }
    1539         368 :     PG_END_TRY();
    1540             : 
    1541             :     /* allocate slot name in long-lived context */
    1542         368 :     *slotname = MemoryContextStrdup(ApplyContext, sync_slotname);
    1543         368 :     pfree(sync_slotname);
    1544         368 : }
    1545             : 
    1546             : /*
    1547             :  * Runs the tablesync worker.
    1548             :  *
    1549             :  * It starts syncing tables. After a successful sync, sets streaming options
    1550             :  * and starts streaming to catchup with apply worker.
    1551             :  */
    1552             : static void
    1553         396 : run_tablesync_worker()
    1554             : {
    1555             :     char        originname[NAMEDATALEN];
    1556         396 :     XLogRecPtr  origin_startpos = InvalidXLogRecPtr;
    1557         396 :     char       *slotname = NULL;
    1558             :     WalRcvStreamOptions options;
    1559             : 
    1560         396 :     start_table_sync(&origin_startpos, &slotname);
    1561             : 
    1562         368 :     ReplicationOriginNameForLogicalRep(MySubscription->oid,
    1563         368 :                                        MyLogicalRepWorker->relid,
    1564             :                                        originname,
    1565             :                                        sizeof(originname));
    1566             : 
    1567         368 :     set_apply_error_context_origin(originname);
    1568             : 
    1569         368 :     set_stream_options(&options, slotname, &origin_startpos);
    1570             : 
    1571         368 :     walrcv_startstreaming(LogRepWorkerWalRcvConn, &options);
    1572             : 
    1573             :     /* Apply the changes till we catchup with the apply worker. */
    1574         368 :     start_apply(origin_startpos);
    1575           0 : }
    1576             : 
    1577             : /* Logical Replication Tablesync worker entry point */
    1578             : void
    1579         398 : TableSyncWorkerMain(Datum main_arg)
    1580             : {
    1581         398 :     int         worker_slot = DatumGetInt32(main_arg);
    1582             : 
    1583         398 :     SetupApplyOrSyncWorker(worker_slot);
    1584             : 
    1585         396 :     run_tablesync_worker();
    1586             : 
    1587           0 :     FinishSyncWorker();
    1588             : }
    1589             : 
    1590             : /*
    1591             :  * If the subscription has no tables then return false.
    1592             :  *
    1593             :  * Otherwise, are all tablesyncs READY?
    1594             :  *
    1595             :  * Note: This function is not suitable to be called from outside of apply or
    1596             :  * tablesync workers because MySubscription needs to be already initialized.
    1597             :  */
    1598             : bool
    1599         390 : AllTablesyncsReady(void)
    1600             : {
    1601             :     bool        started_tx;
    1602             :     bool        has_tables;
    1603             : 
    1604             :     /* We need up-to-date sync state info for subscription tables here. */
    1605         390 :     FetchRelationStates(&has_tables, NULL, &started_tx);
    1606             : 
    1607         390 :     if (started_tx)
    1608             :     {
    1609          30 :         CommitTransactionCommand();
    1610          30 :         pgstat_report_stat(true);
    1611             :     }
    1612             : 
    1613             :     /*
    1614             :      * Return false when there are no tables in subscription or not all tables
    1615             :      * are in ready state; true otherwise.
    1616             :      */
    1617         390 :     return has_tables && (table_states_not_ready == NIL);
    1618             : }
    1619             : 
    1620             : /*
    1621             :  * Return whether the subscription currently has any tables.
    1622             :  *
    1623             :  * Note: Unlike HasSubscriptionTables(), this function relies on cached
    1624             :  * information for subscription tables. Additionally, it should not be
    1625             :  * invoked outside of apply or tablesync workers, as MySubscription must be
    1626             :  * initialized first.
    1627             :  */
    1628             : bool
    1629         236 : HasSubscriptionTablesCached(void)
    1630             : {
    1631             :     bool        started_tx;
    1632             :     bool        has_tables;
    1633             : 
    1634             :     /* We need up-to-date subscription tables info here */
    1635         236 :     FetchRelationStates(&has_tables, NULL, &started_tx);
    1636             : 
    1637         236 :     if (started_tx)
    1638             :     {
    1639           0 :         CommitTransactionCommand();
    1640           0 :         pgstat_report_stat(true);
    1641             :     }
    1642             : 
    1643         236 :     return has_tables;
    1644             : }
    1645             : 
    1646             : /*
    1647             :  * Update the two_phase state of the specified subscription in pg_subscription.
    1648             :  */
    1649             : void
    1650          18 : UpdateTwoPhaseState(Oid suboid, char new_state)
    1651             : {
    1652             :     Relation    rel;
    1653             :     HeapTuple   tup;
    1654             :     bool        nulls[Natts_pg_subscription];
    1655             :     bool        replaces[Natts_pg_subscription];
    1656             :     Datum       values[Natts_pg_subscription];
    1657             : 
    1658             :     Assert(new_state == LOGICALREP_TWOPHASE_STATE_DISABLED ||
    1659             :            new_state == LOGICALREP_TWOPHASE_STATE_PENDING ||
    1660             :            new_state == LOGICALREP_TWOPHASE_STATE_ENABLED);
    1661             : 
    1662          18 :     rel = table_open(SubscriptionRelationId, RowExclusiveLock);
    1663          18 :     tup = SearchSysCacheCopy1(SUBSCRIPTIONOID, ObjectIdGetDatum(suboid));
    1664          18 :     if (!HeapTupleIsValid(tup))
    1665           0 :         elog(ERROR,
    1666             :              "cache lookup failed for subscription oid %u",
    1667             :              suboid);
    1668             : 
    1669             :     /* Form a new tuple. */
    1670          18 :     memset(values, 0, sizeof(values));
    1671          18 :     memset(nulls, false, sizeof(nulls));
    1672          18 :     memset(replaces, false, sizeof(replaces));
    1673             : 
    1674             :     /* And update/set two_phase state */
    1675          18 :     values[Anum_pg_subscription_subtwophasestate - 1] = CharGetDatum(new_state);
    1676          18 :     replaces[Anum_pg_subscription_subtwophasestate - 1] = true;
    1677             : 
    1678          18 :     tup = heap_modify_tuple(tup, RelationGetDescr(rel),
    1679             :                             values, nulls, replaces);
    1680          18 :     CatalogTupleUpdate(rel, &tup->t_self, tup);
    1681             : 
    1682          18 :     heap_freetuple(tup);
    1683          18 :     table_close(rel, RowExclusiveLock);
    1684          18 : }

Generated by: LCOV version 1.16