LCOV - code coverage report
Current view: top level - src/bin/pg_upgrade - pg_upgrade.c (source / functions) Hit Total Coverage
Test: PostgreSQL 13devel Lines: 178 199 89.4 %
Date: 2019-11-21 14:06:36 Functions: 10 10 100.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*
       2             :  *  pg_upgrade.c
       3             :  *
       4             :  *  main source file
       5             :  *
       6             :  *  Copyright (c) 2010-2019, PostgreSQL Global Development Group
       7             :  *  src/bin/pg_upgrade/pg_upgrade.c
       8             :  */
       9             : 
      10             : /*
      11             :  *  To simplify the upgrade process, we force certain system values to be
      12             :  *  identical between old and new clusters:
      13             :  *
      14             :  *  We control all assignments of pg_class.oid (and relfilenode) so toast
      15             :  *  oids are the same between old and new clusters.  This is important
      16             :  *  because toast oids are stored as toast pointers in user tables.
      17             :  *
      18             :  *  While pg_class.oid and pg_class.relfilenode are initially the same
      19             :  *  in a cluster, they can diverge due to CLUSTER, REINDEX, or VACUUM
      20             :  *  FULL.  In the new cluster, pg_class.oid and pg_class.relfilenode will
      21             :  *  be the same and will match the old pg_class.oid value.  Because of
      22             :  *  this, old/new pg_class.relfilenode values will not match if CLUSTER,
      23             :  *  REINDEX, or VACUUM FULL have been performed in the old cluster.
      24             :  *
      25             :  *  We control all assignments of pg_type.oid because these oids are stored
      26             :  *  in user composite type values.
      27             :  *
      28             :  *  We control all assignments of pg_enum.oid because these oids are stored
      29             :  *  in user tables as enum values.
      30             :  *
      31             :  *  We control all assignments of pg_authid.oid for historical reasons (the
      32             :  *  oids used to be stored in pg_largeobject_metadata, which is now copied via
      33             :  *  SQL commands), that might change at some point in the future.
      34             :  */
      35             : 
      36             : 
      37             : 
      38             : #include "postgres_fe.h"
      39             : 
      40             : #ifdef HAVE_LANGINFO_H
      41             : #include <langinfo.h>
      42             : #endif
      43             : 
      44             : #include "catalog/pg_class_d.h"
      45             : #include "common/file_perm.h"
      46             : #include "common/logging.h"
      47             : #include "common/restricted_token.h"
      48             : #include "fe_utils/string_utils.h"
      49             : #include "pg_upgrade.h"
      50             : 
      51             : static void prepare_new_cluster(void);
      52             : static void prepare_new_globals(void);
      53             : static void create_new_objects(void);
      54             : static void copy_xact_xlog_xid(void);
      55             : static void set_frozenxids(bool minmxid_only);
      56             : static void setup(char *argv0, bool *live_check);
      57             : static void cleanup(void);
      58             : 
      59             : ClusterInfo old_cluster,
      60             :             new_cluster;
      61             : OSInfo      os_info;
      62             : 
      63             : char       *output_files[] = {
      64             :     SERVER_LOG_FILE,
      65             : #ifdef WIN32
      66             :     /* unique file for pg_ctl start */
      67             :     SERVER_START_LOG_FILE,
      68             : #endif
      69             :     UTILITY_LOG_FILE,
      70             :     INTERNAL_LOG_FILE,
      71             :     NULL
      72             : };
      73             : 
      74             : 
      75             : int
      76           2 : main(int argc, char **argv)
      77             : {
      78           2 :     char       *analyze_script_file_name = NULL;
      79           2 :     char       *deletion_script_file_name = NULL;
      80           2 :     bool        live_check = false;
      81             : 
      82           2 :     pg_logging_init(argv[0]);
      83           2 :     set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("pg_upgrade"));
      84             : 
      85             :     /* Set default restrictive mask until new cluster permissions are read */
      86           2 :     umask(PG_MODE_MASK_OWNER);
      87             : 
      88           2 :     parseCommandLine(argc, argv);
      89             : 
      90           2 :     get_restricted_token();
      91             : 
      92           2 :     adjust_data_dir(&old_cluster);
      93           2 :     adjust_data_dir(&new_cluster);
      94             : 
      95           2 :     setup(argv[0], &live_check);
      96             : 
      97           2 :     output_check_banner(live_check);
      98             : 
      99           2 :     check_cluster_versions();
     100             : 
     101           2 :     get_sock_dir(&old_cluster, live_check);
     102           2 :     get_sock_dir(&new_cluster, false);
     103             : 
     104           2 :     check_cluster_compatibility(live_check);
     105             : 
     106             :     /* Set mask based on PGDATA permissions */
     107           2 :     if (!GetDataDirectoryCreatePerm(new_cluster.pgdata))
     108           0 :         pg_fatal("could not read permissions of directory \"%s\": %s\n",
     109           0 :                  new_cluster.pgdata, strerror(errno));
     110             : 
     111           2 :     umask(pg_mode_mask);
     112             : 
     113           2 :     check_and_dump_old_cluster(live_check);
     114             : 
     115             : 
     116             :     /* -- NEW -- */
     117           2 :     start_postmaster(&new_cluster, true);
     118             : 
     119           2 :     check_new_cluster();
     120           2 :     report_clusters_compatible();
     121             : 
     122           2 :     pg_log(PG_REPORT,
     123             :            "\n"
     124             :            "Performing Upgrade\n"
     125             :            "------------------\n");
     126             : 
     127           2 :     prepare_new_cluster();
     128             : 
     129           2 :     stop_postmaster(false);
     130             : 
     131             :     /*
     132             :      * Destructive Changes to New Cluster
     133             :      */
     134             : 
     135           2 :     copy_xact_xlog_xid();
     136             : 
     137             :     /* New now using xids of the old system */
     138             : 
     139             :     /* -- NEW -- */
     140           2 :     start_postmaster(&new_cluster, true);
     141             : 
     142           2 :     prepare_new_globals();
     143             : 
     144           2 :     create_new_objects();
     145             : 
     146           2 :     stop_postmaster(false);
     147             : 
     148             :     /*
     149             :      * Most failures happen in create_new_objects(), which has completed at
     150             :      * this point.  We do this here because it is just before linking, which
     151             :      * will link the old and new cluster data files, preventing the old
     152             :      * cluster from being safely started once the new cluster is started.
     153             :      */
     154           2 :     if (user_opts.transfer_mode == TRANSFER_MODE_LINK)
     155           0 :         disable_old_cluster();
     156             : 
     157           2 :     transfer_all_new_tablespaces(&old_cluster.dbarr, &new_cluster.dbarr,
     158             :                                  old_cluster.pgdata, new_cluster.pgdata);
     159             : 
     160             :     /*
     161             :      * Assuming OIDs are only used in system tables, there is no need to
     162             :      * restore the OID counter because we have not transferred any OIDs from
     163             :      * the old system, but we do it anyway just in case.  We do it late here
     164             :      * because there is no need to have the schema load use new oids.
     165             :      */
     166           2 :     prep_status("Setting next OID for new cluster");
     167           2 :     exec_prog(UTILITY_LOG_FILE, NULL, true, true,
     168             :               "\"%s/pg_resetwal\" -o %u \"%s\"",
     169             :               new_cluster.bindir, old_cluster.controldata.chkpnt_nxtoid,
     170             :               new_cluster.pgdata);
     171           2 :     check_ok();
     172             : 
     173           2 :     prep_status("Sync data directory to disk");
     174           2 :     exec_prog(UTILITY_LOG_FILE, NULL, true, true,
     175             :               "\"%s/initdb\" --sync-only \"%s\"", new_cluster.bindir,
     176             :               new_cluster.pgdata);
     177           2 :     check_ok();
     178             : 
     179           2 :     create_script_for_cluster_analyze(&analyze_script_file_name);
     180           2 :     create_script_for_old_cluster_deletion(&deletion_script_file_name);
     181             : 
     182           2 :     issue_warnings_and_set_wal_level();
     183             : 
     184           2 :     pg_log(PG_REPORT,
     185             :            "\n"
     186             :            "Upgrade Complete\n"
     187             :            "----------------\n");
     188             : 
     189           2 :     output_completion_banner(analyze_script_file_name,
     190             :                              deletion_script_file_name);
     191             : 
     192           2 :     pg_free(analyze_script_file_name);
     193           2 :     pg_free(deletion_script_file_name);
     194             : 
     195           2 :     cleanup();
     196             : 
     197           2 :     return 0;
     198             : }
     199             : 
     200             : 
     201             : static void
     202           2 : setup(char *argv0, bool *live_check)
     203             : {
     204             :     /*
     205             :      * make sure the user has a clean environment, otherwise, we may confuse
     206             :      * libpq when we connect to one (or both) of the servers.
     207             :      */
     208           2 :     check_pghost_envvar();
     209             : 
     210             :     /*
     211             :      * In case the user hasn't specified the directory for the new binaries
     212             :      * with -B, default to using the path of the currently executed pg_upgrade
     213             :      * binary.
     214             :      */
     215           2 :     if (!new_cluster.bindir)
     216             :     {
     217             :         char        exec_path[MAXPGPATH];
     218             : 
     219           2 :         if (find_my_exec(argv0, exec_path) < 0)
     220           0 :             pg_fatal("%s: could not find own program executable\n", argv0);
     221             :         /* Trim off program name and keep just path */
     222           2 :         *last_dir_separator(exec_path) = '\0';
     223           2 :         canonicalize_path(exec_path);
     224           2 :         new_cluster.bindir = pg_strdup(exec_path);
     225             :     }
     226             : 
     227           2 :     verify_directories();
     228             : 
     229             :     /* no postmasters should be running, except for a live check */
     230           2 :     if (pid_lock_file_exists(old_cluster.pgdata))
     231             :     {
     232             :         /*
     233             :          * If we have a postmaster.pid file, try to start the server.  If it
     234             :          * starts, the pid file was stale, so stop the server.  If it doesn't
     235             :          * start, assume the server is running.  If the pid file is left over
     236             :          * from a server crash, this also allows any committed transactions
     237             :          * stored in the WAL to be replayed so they are not lost, because WAL
     238             :          * files are not transferred from old to new servers.  We later check
     239             :          * for a clean shutdown.
     240             :          */
     241           0 :         if (start_postmaster(&old_cluster, false))
     242           0 :             stop_postmaster(false);
     243             :         else
     244             :         {
     245           0 :             if (!user_opts.check)
     246           0 :                 pg_fatal("There seems to be a postmaster servicing the old cluster.\n"
     247             :                          "Please shutdown that postmaster and try again.\n");
     248             :             else
     249           0 :                 *live_check = true;
     250             :         }
     251             :     }
     252             : 
     253             :     /* same goes for the new postmaster */
     254           2 :     if (pid_lock_file_exists(new_cluster.pgdata))
     255             :     {
     256           0 :         if (start_postmaster(&new_cluster, false))
     257           0 :             stop_postmaster(false);
     258             :         else
     259           0 :             pg_fatal("There seems to be a postmaster servicing the new cluster.\n"
     260             :                      "Please shutdown that postmaster and try again.\n");
     261             :     }
     262           2 : }
     263             : 
     264             : 
     265             : static void
     266           2 : prepare_new_cluster(void)
     267             : {
     268             :     /*
     269             :      * It would make more sense to freeze after loading the schema, but that
     270             :      * would cause us to lose the frozenxids restored by the load. We use
     271             :      * --analyze so autovacuum doesn't update statistics later
     272             :      */
     273           2 :     prep_status("Analyzing all rows in the new cluster");
     274           2 :     exec_prog(UTILITY_LOG_FILE, NULL, true, true,
     275             :               "\"%s/vacuumdb\" %s --all --analyze %s",
     276             :               new_cluster.bindir, cluster_conn_opts(&new_cluster),
     277           2 :               log_opts.verbose ? "--verbose" : "");
     278           2 :     check_ok();
     279             : 
     280             :     /*
     281             :      * We do freeze after analyze so pg_statistic is also frozen. template0 is
     282             :      * not frozen here, but data rows were frozen by initdb, and we set its
     283             :      * datfrozenxid, relfrozenxids, and relminmxid later to match the new xid
     284             :      * counter later.
     285             :      */
     286           2 :     prep_status("Freezing all rows in the new cluster");
     287           2 :     exec_prog(UTILITY_LOG_FILE, NULL, true, true,
     288             :               "\"%s/vacuumdb\" %s --all --freeze %s",
     289             :               new_cluster.bindir, cluster_conn_opts(&new_cluster),
     290           2 :               log_opts.verbose ? "--verbose" : "");
     291           2 :     check_ok();
     292           2 : }
     293             : 
     294             : 
     295             : static void
     296           2 : prepare_new_globals(void)
     297             : {
     298             :     /*
     299             :      * Before we restore anything, set frozenxids of initdb-created tables.
     300             :      */
     301           2 :     set_frozenxids(false);
     302             : 
     303             :     /*
     304             :      * Now restore global objects (roles and tablespaces).
     305             :      */
     306           2 :     prep_status("Restoring global objects in the new cluster");
     307             : 
     308           2 :     exec_prog(UTILITY_LOG_FILE, NULL, true, true,
     309             :               "\"%s/psql\" " EXEC_PSQL_ARGS " %s -f \"%s\"",
     310             :               new_cluster.bindir, cluster_conn_opts(&new_cluster),
     311             :               GLOBALS_DUMP_FILE);
     312           2 :     check_ok();
     313           2 : }
     314             : 
     315             : 
     316             : static void
     317           2 : create_new_objects(void)
     318             : {
     319             :     int         dbnum;
     320             : 
     321           2 :     prep_status("Restoring database schemas in the new cluster\n");
     322             : 
     323             :     /*
     324             :      * We cannot process the template1 database concurrently with others,
     325             :      * because when it's transiently dropped, connection attempts would fail.
     326             :      * So handle it in a separate non-parallelized pass.
     327             :      */
     328          24 :     for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
     329             :     {
     330             :         char        sql_file_name[MAXPGPATH],
     331             :                     log_file_name[MAXPGPATH];
     332          12 :         DbInfo     *old_db = &old_cluster.dbarr.dbs[dbnum];
     333             :         const char *create_opts;
     334             : 
     335             :         /* Process only template1 in this pass */
     336          12 :         if (strcmp(old_db->db_name, "template1") != 0)
     337          10 :             continue;
     338             : 
     339           2 :         pg_log(PG_STATUS, "%s", old_db->db_name);
     340           2 :         snprintf(sql_file_name, sizeof(sql_file_name), DB_DUMP_FILE_MASK, old_db->db_oid);
     341           2 :         snprintf(log_file_name, sizeof(log_file_name), DB_DUMP_LOG_FILE_MASK, old_db->db_oid);
     342             : 
     343             :         /*
     344             :          * template1 and postgres databases will already exist in the target
     345             :          * installation, so tell pg_restore to drop and recreate them;
     346             :          * otherwise we would fail to propagate their database-level
     347             :          * properties.
     348             :          */
     349           2 :         create_opts = "--clean --create";
     350             : 
     351           2 :         exec_prog(log_file_name,
     352             :                   NULL,
     353             :                   true,
     354             :                   true,
     355             :                   "\"%s/pg_restore\" %s %s --exit-on-error --verbose "
     356             :                   "--dbname postgres \"%s\"",
     357             :                   new_cluster.bindir,
     358             :                   cluster_conn_opts(&new_cluster),
     359             :                   create_opts,
     360             :                   sql_file_name);
     361             : 
     362           2 :         break;                  /* done once we've processed template1 */
     363             :     }
     364             : 
     365          14 :     for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
     366             :     {
     367             :         char        sql_file_name[MAXPGPATH],
     368             :                     log_file_name[MAXPGPATH];
     369          12 :         DbInfo     *old_db = &old_cluster.dbarr.dbs[dbnum];
     370             :         const char *create_opts;
     371             : 
     372             :         /* Skip template1 in this pass */
     373          12 :         if (strcmp(old_db->db_name, "template1") == 0)
     374           2 :             continue;
     375             : 
     376          10 :         pg_log(PG_STATUS, "%s", old_db->db_name);
     377          10 :         snprintf(sql_file_name, sizeof(sql_file_name), DB_DUMP_FILE_MASK, old_db->db_oid);
     378          10 :         snprintf(log_file_name, sizeof(log_file_name), DB_DUMP_LOG_FILE_MASK, old_db->db_oid);
     379             : 
     380             :         /*
     381             :          * template1 and postgres databases will already exist in the target
     382             :          * installation, so tell pg_restore to drop and recreate them;
     383             :          * otherwise we would fail to propagate their database-level
     384             :          * properties.
     385             :          */
     386          10 :         if (strcmp(old_db->db_name, "postgres") == 0)
     387           2 :             create_opts = "--clean --create";
     388             :         else
     389           8 :             create_opts = "--create";
     390             : 
     391          10 :         parallel_exec_prog(log_file_name,
     392             :                            NULL,
     393             :                            "\"%s/pg_restore\" %s %s --exit-on-error --verbose "
     394             :                            "--dbname template1 \"%s\"",
     395             :                            new_cluster.bindir,
     396             :                            cluster_conn_opts(&new_cluster),
     397             :                            create_opts,
     398             :                            sql_file_name);
     399             :     }
     400             : 
     401             :     /* reap all children */
     402           2 :     while (reap_child(true) == true)
     403             :         ;
     404             : 
     405           2 :     end_progress_output();
     406           2 :     check_ok();
     407             : 
     408             :     /*
     409             :      * We don't have minmxids for databases or relations in pre-9.3 clusters,
     410             :      * so set those after we have restored the schema.
     411             :      */
     412           2 :     if (GET_MAJOR_VERSION(old_cluster.major_version) < 903)
     413           0 :         set_frozenxids(true);
     414             : 
     415             :     /* update new_cluster info now that we have objects in the databases */
     416           2 :     get_db_and_rel_infos(&new_cluster);
     417           2 : }
     418             : 
     419             : /*
     420             :  * Delete the given subdirectory contents from the new cluster
     421             :  */
     422             : static void
     423           6 : remove_new_subdir(const char *subdir, bool rmtopdir)
     424             : {
     425             :     char        new_path[MAXPGPATH];
     426             : 
     427           6 :     prep_status("Deleting files from new %s", subdir);
     428             : 
     429           6 :     snprintf(new_path, sizeof(new_path), "%s/%s", new_cluster.pgdata, subdir);
     430           6 :     if (!rmtree(new_path, rmtopdir))
     431           0 :         pg_fatal("could not delete directory \"%s\"\n", new_path);
     432             : 
     433           6 :     check_ok();
     434           6 : }
     435             : 
     436             : /*
     437             :  * Copy the files from the old cluster into it
     438             :  */
     439             : static void
     440           6 : copy_subdir_files(const char *old_subdir, const char *new_subdir)
     441             : {
     442             :     char        old_path[MAXPGPATH];
     443             :     char        new_path[MAXPGPATH];
     444             : 
     445           6 :     remove_new_subdir(new_subdir, true);
     446             : 
     447           6 :     snprintf(old_path, sizeof(old_path), "%s/%s", old_cluster.pgdata, old_subdir);
     448           6 :     snprintf(new_path, sizeof(new_path), "%s/%s", new_cluster.pgdata, new_subdir);
     449             : 
     450           6 :     prep_status("Copying old %s to new server", old_subdir);
     451             : 
     452           6 :     exec_prog(UTILITY_LOG_FILE, NULL, true, true,
     453             : #ifndef WIN32
     454             :               "cp -Rf \"%s\" \"%s\"",
     455             : #else
     456             :     /* flags: everything, no confirm, quiet, overwrite read-only */
     457             :               "xcopy /e /y /q /r \"%s\" \"%s\\\"",
     458             : #endif
     459             :               old_path, new_path);
     460             : 
     461           6 :     check_ok();
     462           6 : }
     463             : 
     464             : static void
     465           2 : copy_xact_xlog_xid(void)
     466             : {
     467             :     /*
     468             :      * Copy old commit logs to new data dir. pg_clog has been renamed to
     469             :      * pg_xact in post-10 clusters.
     470             :      */
     471           2 :     copy_subdir_files(GET_MAJOR_VERSION(old_cluster.major_version) < 1000 ?
     472             :                       "pg_clog" : "pg_xact",
     473           2 :                       GET_MAJOR_VERSION(new_cluster.major_version) < 1000 ?
     474             :                       "pg_clog" : "pg_xact");
     475             : 
     476             :     /* set the next transaction id and epoch of the new cluster */
     477           2 :     prep_status("Setting next transaction ID and epoch for new cluster");
     478           2 :     exec_prog(UTILITY_LOG_FILE, NULL, true, true,
     479             :               "\"%s/pg_resetwal\" -f -x %u \"%s\"",
     480             :               new_cluster.bindir, old_cluster.controldata.chkpnt_nxtxid,
     481             :               new_cluster.pgdata);
     482           2 :     exec_prog(UTILITY_LOG_FILE, NULL, true, true,
     483             :               "\"%s/pg_resetwal\" -f -e %u \"%s\"",
     484             :               new_cluster.bindir, old_cluster.controldata.chkpnt_nxtepoch,
     485             :               new_cluster.pgdata);
     486             :     /* must reset commit timestamp limits also */
     487           2 :     exec_prog(UTILITY_LOG_FILE, NULL, true, true,
     488             :               "\"%s/pg_resetwal\" -f -c %u,%u \"%s\"",
     489             :               new_cluster.bindir,
     490             :               old_cluster.controldata.chkpnt_nxtxid,
     491             :               old_cluster.controldata.chkpnt_nxtxid,
     492             :               new_cluster.pgdata);
     493           2 :     check_ok();
     494             : 
     495             :     /*
     496             :      * If the old server is before the MULTIXACT_FORMATCHANGE_CAT_VER change
     497             :      * (see pg_upgrade.h) and the new server is after, then we don't copy
     498             :      * pg_multixact files, but we need to reset pg_control so that the new
     499             :      * server doesn't attempt to read multis older than the cutoff value.
     500             :      */
     501           4 :     if (old_cluster.controldata.cat_ver >= MULTIXACT_FORMATCHANGE_CAT_VER &&
     502           2 :         new_cluster.controldata.cat_ver >= MULTIXACT_FORMATCHANGE_CAT_VER)
     503             :     {
     504           2 :         copy_subdir_files("pg_multixact/offsets", "pg_multixact/offsets");
     505           2 :         copy_subdir_files("pg_multixact/members", "pg_multixact/members");
     506             : 
     507           2 :         prep_status("Setting next multixact ID and offset for new cluster");
     508             : 
     509             :         /*
     510             :          * we preserve all files and contents, so we must preserve both "next"
     511             :          * counters here and the oldest multi present on system.
     512             :          */
     513           2 :         exec_prog(UTILITY_LOG_FILE, NULL, true, true,
     514             :                   "\"%s/pg_resetwal\" -O %u -m %u,%u \"%s\"",
     515             :                   new_cluster.bindir,
     516             :                   old_cluster.controldata.chkpnt_nxtmxoff,
     517             :                   old_cluster.controldata.chkpnt_nxtmulti,
     518             :                   old_cluster.controldata.chkpnt_oldstMulti,
     519             :                   new_cluster.pgdata);
     520           2 :         check_ok();
     521             :     }
     522           0 :     else if (new_cluster.controldata.cat_ver >= MULTIXACT_FORMATCHANGE_CAT_VER)
     523             :     {
     524             :         /*
     525             :          * Remove offsets/0000 file created by initdb that no longer matches
     526             :          * the new multi-xid value.  "members" starts at zero so no need to
     527             :          * remove it.
     528             :          */
     529           0 :         remove_new_subdir("pg_multixact/offsets", false);
     530             : 
     531           0 :         prep_status("Setting oldest multixact ID in new cluster");
     532             : 
     533             :         /*
     534             :          * We don't preserve files in this case, but it's important that the
     535             :          * oldest multi is set to the latest value used by the old system, so
     536             :          * that multixact.c returns the empty set for multis that might be
     537             :          * present on disk.  We set next multi to the value following that; it
     538             :          * might end up wrapped around (i.e. 0) if the old cluster had
     539             :          * next=MaxMultiXactId, but multixact.c can cope with that just fine.
     540             :          */
     541           0 :         exec_prog(UTILITY_LOG_FILE, NULL, true, true,
     542             :                   "\"%s/pg_resetwal\" -m %u,%u \"%s\"",
     543             :                   new_cluster.bindir,
     544           0 :                   old_cluster.controldata.chkpnt_nxtmulti + 1,
     545             :                   old_cluster.controldata.chkpnt_nxtmulti,
     546             :                   new_cluster.pgdata);
     547           0 :         check_ok();
     548             :     }
     549             : 
     550             :     /* now reset the wal archives in the new cluster */
     551           2 :     prep_status("Resetting WAL archives");
     552           2 :     exec_prog(UTILITY_LOG_FILE, NULL, true, true,
     553             :     /* use timeline 1 to match controldata and no WAL history file */
     554             :               "\"%s/pg_resetwal\" -l 00000001%s \"%s\"", new_cluster.bindir,
     555             :               old_cluster.controldata.nextxlogfile + 8,
     556             :               new_cluster.pgdata);
     557           2 :     check_ok();
     558           2 : }
     559             : 
     560             : 
     561             : /*
     562             :  *  set_frozenxids()
     563             :  *
     564             :  * This is called on the new cluster before we restore anything, with
     565             :  * minmxid_only = false.  Its purpose is to ensure that all initdb-created
     566             :  * vacuumable tables have relfrozenxid/relminmxid matching the old cluster's
     567             :  * xid/mxid counters.  We also initialize the datfrozenxid/datminmxid of the
     568             :  * built-in databases to match.
     569             :  *
     570             :  * As we create user tables later, their relfrozenxid/relminmxid fields will
     571             :  * be restored properly by the binary-upgrade restore script.  Likewise for
     572             :  * user-database datfrozenxid/datminmxid.  However, if we're upgrading from a
     573             :  * pre-9.3 database, which does not store per-table or per-DB minmxid, then
     574             :  * the relminmxid/datminmxid values filled in by the restore script will just
     575             :  * be zeroes.
     576             :  *
     577             :  * Hence, with a pre-9.3 source database, a second call occurs after
     578             :  * everything is restored, with minmxid_only = true.  This pass will
     579             :  * initialize all tables and databases, both those made by initdb and user
     580             :  * objects, with the desired minmxid value.  frozenxid values are left alone.
     581             :  */
     582             : static void
     583           2 : set_frozenxids(bool minmxid_only)
     584             : {
     585             :     int         dbnum;
     586             :     PGconn     *conn,
     587             :                *conn_template1;
     588             :     PGresult   *dbres;
     589             :     int         ntups;
     590             :     int         i_datname;
     591             :     int         i_datallowconn;
     592             : 
     593           2 :     if (!minmxid_only)
     594           2 :         prep_status("Setting frozenxid and minmxid counters in new cluster");
     595             :     else
     596           0 :         prep_status("Setting minmxid counter in new cluster");
     597             : 
     598           2 :     conn_template1 = connectToServer(&new_cluster, "template1");
     599             : 
     600           2 :     if (!minmxid_only)
     601             :         /* set pg_database.datfrozenxid */
     602           2 :         PQclear(executeQueryOrDie(conn_template1,
     603             :                                   "UPDATE pg_catalog.pg_database "
     604             :                                   "SET datfrozenxid = '%u'",
     605             :                                   old_cluster.controldata.chkpnt_nxtxid));
     606             : 
     607             :     /* set pg_database.datminmxid */
     608           2 :     PQclear(executeQueryOrDie(conn_template1,
     609             :                               "UPDATE pg_catalog.pg_database "
     610             :                               "SET datminmxid = '%u'",
     611             :                               old_cluster.controldata.chkpnt_nxtmulti));
     612             : 
     613             :     /* get database names */
     614           2 :     dbres = executeQueryOrDie(conn_template1,
     615             :                               "SELECT  datname, datallowconn "
     616             :                               "FROM    pg_catalog.pg_database");
     617             : 
     618           2 :     i_datname = PQfnumber(dbres, "datname");
     619           2 :     i_datallowconn = PQfnumber(dbres, "datallowconn");
     620             : 
     621           2 :     ntups = PQntuples(dbres);
     622           8 :     for (dbnum = 0; dbnum < ntups; dbnum++)
     623             :     {
     624           6 :         char       *datname = PQgetvalue(dbres, dbnum, i_datname);
     625           6 :         char       *datallowconn = PQgetvalue(dbres, dbnum, i_datallowconn);
     626             : 
     627             :         /*
     628             :          * We must update databases where datallowconn = false, e.g.
     629             :          * template0, because autovacuum increments their datfrozenxids,
     630             :          * relfrozenxids, and relminmxid even if autovacuum is turned off, and
     631             :          * even though all the data rows are already frozen.  To enable this,
     632             :          * we temporarily change datallowconn.
     633             :          */
     634           6 :         if (strcmp(datallowconn, "f") == 0)
     635           2 :             PQclear(executeQueryOrDie(conn_template1,
     636             :                                       "ALTER DATABASE %s ALLOW_CONNECTIONS = true",
     637             :                                       quote_identifier(datname)));
     638             : 
     639           6 :         conn = connectToServer(&new_cluster, datname);
     640             : 
     641           6 :         if (!minmxid_only)
     642             :             /* set pg_class.relfrozenxid */
     643           6 :             PQclear(executeQueryOrDie(conn,
     644             :                                       "UPDATE  pg_catalog.pg_class "
     645             :                                       "SET relfrozenxid = '%u' "
     646             :             /* only heap, materialized view, and TOAST are vacuumed */
     647             :                                       "WHERE   relkind IN ("
     648             :                                       CppAsString2(RELKIND_RELATION) ", "
     649             :                                       CppAsString2(RELKIND_MATVIEW) ", "
     650             :                                       CppAsString2(RELKIND_TOASTVALUE) ")",
     651             :                                       old_cluster.controldata.chkpnt_nxtxid));
     652             : 
     653             :         /* set pg_class.relminmxid */
     654           6 :         PQclear(executeQueryOrDie(conn,
     655             :                                   "UPDATE  pg_catalog.pg_class "
     656             :                                   "SET relminmxid = '%u' "
     657             :         /* only heap, materialized view, and TOAST are vacuumed */
     658             :                                   "WHERE   relkind IN ("
     659             :                                   CppAsString2(RELKIND_RELATION) ", "
     660             :                                   CppAsString2(RELKIND_MATVIEW) ", "
     661             :                                   CppAsString2(RELKIND_TOASTVALUE) ")",
     662             :                                   old_cluster.controldata.chkpnt_nxtmulti));
     663           6 :         PQfinish(conn);
     664             : 
     665             :         /* Reset datallowconn flag */
     666           6 :         if (strcmp(datallowconn, "f") == 0)
     667           2 :             PQclear(executeQueryOrDie(conn_template1,
     668             :                                       "ALTER DATABASE %s ALLOW_CONNECTIONS = false",
     669             :                                       quote_identifier(datname)));
     670             :     }
     671             : 
     672           2 :     PQclear(dbres);
     673             : 
     674           2 :     PQfinish(conn_template1);
     675             : 
     676           2 :     check_ok();
     677           2 : }
     678             : 
     679             : 
     680             : static void
     681           2 : cleanup(void)
     682             : {
     683           2 :     fclose(log_opts.internal);
     684             : 
     685             :     /* Remove dump and log files? */
     686           2 :     if (!log_opts.retain)
     687             :     {
     688             :         int         dbnum;
     689             :         char      **filename;
     690             : 
     691           8 :         for (filename = output_files; *filename != NULL; filename++)
     692           6 :             unlink(*filename);
     693             : 
     694             :         /* remove dump files */
     695           2 :         unlink(GLOBALS_DUMP_FILE);
     696             : 
     697           2 :         if (old_cluster.dbarr.dbs)
     698          14 :             for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
     699             :             {
     700             :                 char        sql_file_name[MAXPGPATH],
     701             :                             log_file_name[MAXPGPATH];
     702          12 :                 DbInfo     *old_db = &old_cluster.dbarr.dbs[dbnum];
     703             : 
     704          12 :                 snprintf(sql_file_name, sizeof(sql_file_name), DB_DUMP_FILE_MASK, old_db->db_oid);
     705          12 :                 unlink(sql_file_name);
     706             : 
     707          12 :                 snprintf(log_file_name, sizeof(log_file_name), DB_DUMP_LOG_FILE_MASK, old_db->db_oid);
     708          12 :                 unlink(log_file_name);
     709             :             }
     710             :     }
     711           2 : }

Generated by: LCOV version 1.13