Line data Source code
1 : /*
2 : * check.c
3 : *
4 : * server checks and output routines
5 : *
6 : * Copyright (c) 2010-2025, PostgreSQL Global Development Group
7 : * src/bin/pg_upgrade/check.c
8 : */
9 :
10 : #include "postgres_fe.h"
11 :
12 : #include "catalog/pg_authid_d.h"
13 : #include "catalog/pg_class_d.h"
14 : #include "fe_utils/string_utils.h"
15 : #include "pg_upgrade.h"
16 :
17 : static void check_new_cluster_is_empty(void);
18 : static void check_is_install_user(ClusterInfo *cluster);
19 : static void check_for_connection_status(ClusterInfo *cluster);
20 : static void check_for_prepared_transactions(ClusterInfo *cluster);
21 : static void check_for_isn_and_int8_passing_mismatch(ClusterInfo *cluster);
22 : static void check_for_user_defined_postfix_ops(ClusterInfo *cluster);
23 : static void check_for_incompatible_polymorphics(ClusterInfo *cluster);
24 : static void check_for_tables_with_oids(ClusterInfo *cluster);
25 : static void check_for_pg_role_prefix(ClusterInfo *cluster);
26 : static void check_for_new_tablespace_dir(void);
27 : static void check_for_user_defined_encoding_conversions(ClusterInfo *cluster);
28 : static void check_new_cluster_logical_replication_slots(void);
29 : static void check_new_cluster_subscription_configuration(void);
30 : static void check_old_cluster_for_valid_slots(void);
31 : static void check_old_cluster_subscription_state(void);
32 :
33 : /*
34 : * DataTypesUsageChecks - definitions of data type checks for the old cluster
35 : * in order to determine if an upgrade can be performed. See the comment on
36 : * data_types_usage_checks below for a more detailed description.
37 : */
38 : typedef struct
39 : {
40 : /* Status line to print to the user */
41 : const char *status;
42 : /* Filename to store report to */
43 : const char *report_filename;
44 : /* Query to extract the oid of the datatype */
45 : const char *base_query;
46 : /* Text to store to report in case of error */
47 : const char *report_text;
48 : /* The latest version where the check applies */
49 : int threshold_version;
50 : /* A function pointer for determining if the check applies */
51 : DataTypesUsageVersionCheck version_hook;
52 : } DataTypesUsageChecks;
53 :
54 : /*
55 : * Special values for threshold_version for indicating that a check applies to
56 : * all versions, or that a custom function needs to be invoked to determine
57 : * if the check applies.
58 : */
59 : #define MANUAL_CHECK 1
60 : #define ALL_VERSIONS -1
61 :
62 : /*--
63 : * Data type usage checks. Each check for problematic data type usage is
64 : * defined in this array with metadata, SQL query for finding the data type
65 : * and functionality for deciding if the check is applicable to the version
66 : * of the old cluster. The struct members are described in detail below:
67 : *
68 : * status A oneline string which can be printed to the user to
69 : * inform about progress. Should not end with newline.
70 : * report_filename The filename in which the list of problems detected by
71 : * the check will be printed.
72 : * base_query A query which extracts the Oid of the datatype checked
73 : * for.
74 : * report_text The text which will be printed to the user to explain
75 : * what the check did, and why it failed. The text should
76 : * end with a newline, and does not need to refer to the
77 : * report_filename as that is automatically appended to
78 : * the report with the path to the log folder.
79 : * threshold_version The major version of PostgreSQL for which to run the
80 : * check. Iff the old cluster is less than, or equal to,
81 : * the threshold version then the check will be executed.
82 : * If the old version is greater than the threshold then
83 : * the check is skipped. If the threshold_version is set
84 : * to ALL_VERSIONS then it will be run unconditionally,
85 : * if set to MANUAL_CHECK then the version_hook function
86 : * will be executed in order to determine whether or not
87 : * to run.
88 : * version_hook A function pointer to a version check function of type
89 : * DataTypesUsageVersionCheck which is used to determine
90 : * if the check is applicable to the old cluster. If the
91 : * version_hook returns true then the check will be run,
92 : * else it will be skipped. The function will only be
93 : * executed iff threshold_version is set to MANUAL_CHECK.
94 : */
95 : static DataTypesUsageChecks data_types_usage_checks[] =
96 : {
97 : /*
98 : * Look for composite types that were made during initdb *or* belong to
99 : * information_schema; that's important in case information_schema was
100 : * dropped and reloaded.
101 : *
102 : * The cutoff OID here should match the source cluster's value of
103 : * FirstNormalObjectId. We hardcode it rather than using that C #define
104 : * because, if that #define is ever changed, our own version's value is
105 : * NOT what to use. Eventually we may need a test on the source cluster's
106 : * version to select the correct value.
107 : */
108 : {
109 : .status = gettext_noop("Checking for system-defined composite types in user tables"),
110 : .report_filename = "tables_using_composite.txt",
111 : .base_query =
112 : "SELECT t.oid FROM pg_catalog.pg_type t "
113 : "LEFT JOIN pg_catalog.pg_namespace n ON t.typnamespace = n.oid "
114 : " WHERE typtype = 'c' AND (t.oid < 16384 OR nspname = 'information_schema')",
115 : .report_text =
116 : gettext_noop("Your installation contains system-defined composite types in user tables.\n"
117 : "These type OIDs are not stable across PostgreSQL versions,\n"
118 : "so this cluster cannot currently be upgraded. You can drop the\n"
119 : "problem columns and restart the upgrade.\n"),
120 : .threshold_version = ALL_VERSIONS
121 : },
122 :
123 : /*
124 : * 9.3 -> 9.4 Fully implement the 'line' data type in 9.4, which
125 : * previously returned "not enabled" by default and was only functionally
126 : * enabled with a compile-time switch; as of 9.4 "line" has a different
127 : * on-disk representation format.
128 : */
129 : {
130 : .status = gettext_noop("Checking for incompatible \"line\" data type"),
131 : .report_filename = "tables_using_line.txt",
132 : .base_query =
133 : "SELECT 'pg_catalog.line'::pg_catalog.regtype AS oid",
134 : .report_text =
135 : gettext_noop("Your installation contains the \"line\" data type in user tables.\n"
136 : "This data type changed its internal and input/output format\n"
137 : "between your old and new versions so this\n"
138 : "cluster cannot currently be upgraded. You can\n"
139 : "drop the problem columns and restart the upgrade.\n"),
140 : .threshold_version = 903
141 : },
142 :
143 : /*
144 : * pg_upgrade only preserves these system values: pg_class.oid pg_type.oid
145 : * pg_enum.oid
146 : *
147 : * Many of the reg* data types reference system catalog info that is not
148 : * preserved, and hence these data types cannot be used in user tables
149 : * upgraded by pg_upgrade.
150 : */
151 : {
152 : .status = gettext_noop("Checking for reg* data types in user tables"),
153 : .report_filename = "tables_using_reg.txt",
154 :
155 : /*
156 : * Note: older servers will not have all of these reg* types, so we
157 : * have to write the query like this rather than depending on casts to
158 : * regtype.
159 : */
160 : .base_query =
161 : "SELECT oid FROM pg_catalog.pg_type t "
162 : "WHERE t.typnamespace = "
163 : " (SELECT oid FROM pg_catalog.pg_namespace "
164 : " WHERE nspname = 'pg_catalog') "
165 : " AND t.typname IN ( "
166 : /* pg_class.oid is preserved, so 'regclass' is OK */
167 : " 'regcollation', "
168 : " 'regconfig', "
169 : " 'regdictionary', "
170 : " 'regnamespace', "
171 : " 'regoper', "
172 : " 'regoperator', "
173 : " 'regproc', "
174 : " 'regprocedure' "
175 : /* pg_authid.oid is preserved, so 'regrole' is OK */
176 : /* pg_type.oid is (mostly) preserved, so 'regtype' is OK */
177 : " )",
178 : .report_text =
179 : gettext_noop("Your installation contains one of the reg* data types in user tables.\n"
180 : "These data types reference system OIDs that are not preserved by\n"
181 : "pg_upgrade, so this cluster cannot currently be upgraded. You can\n"
182 : "drop the problem columns and restart the upgrade.\n"),
183 : .threshold_version = ALL_VERSIONS
184 : },
185 :
186 : /*
187 : * PG 16 increased the size of the 'aclitem' type, which breaks the
188 : * on-disk format for existing data.
189 : */
190 : {
191 : .status = gettext_noop("Checking for incompatible \"aclitem\" data type"),
192 : .report_filename = "tables_using_aclitem.txt",
193 : .base_query =
194 : "SELECT 'pg_catalog.aclitem'::pg_catalog.regtype AS oid",
195 : .report_text =
196 : gettext_noop("Your installation contains the \"aclitem\" data type in user tables.\n"
197 : "The internal format of \"aclitem\" changed in PostgreSQL version 16\n"
198 : "so this cluster cannot currently be upgraded. You can drop the\n"
199 : "problem columns and restart the upgrade.\n"),
200 : .threshold_version = 1500
201 : },
202 :
203 : /*
204 : * It's no longer allowed to create tables or views with "unknown"-type
205 : * columns. We do not complain about views with such columns, because
206 : * they should get silently converted to "text" columns during the DDL
207 : * dump and reload; it seems unlikely to be worth making users do that by
208 : * hand. However, if there's a table with such a column, the DDL reload
209 : * will fail, so we should pre-detect that rather than failing
210 : * mid-upgrade. Worse, if there's a matview with such a column, the DDL
211 : * reload will silently change it to "text" which won't match the on-disk
212 : * storage (which is like "cstring"). So we *must* reject that.
213 : */
214 : {
215 : .status = gettext_noop("Checking for invalid \"unknown\" user columns"),
216 : .report_filename = "tables_using_unknown.txt",
217 : .base_query =
218 : "SELECT 'pg_catalog.unknown'::pg_catalog.regtype AS oid",
219 : .report_text =
220 : gettext_noop("Your installation contains the \"unknown\" data type in user tables.\n"
221 : "This data type is no longer allowed in tables, so this cluster\n"
222 : "cannot currently be upgraded. You can drop the problem columns\n"
223 : "and restart the upgrade.\n"),
224 : .threshold_version = 906
225 : },
226 :
227 : /*
228 : * PG 12 changed the 'sql_identifier' type storage to be based on name,
229 : * not varchar, which breaks on-disk format for existing data. So we need
230 : * to prevent upgrade when used in user objects (tables, indexes, ...). In
231 : * 12, the sql_identifier data type was switched from name to varchar,
232 : * which does affect the storage (name is by-ref, but not varlena). This
233 : * means user tables using sql_identifier for columns are broken because
234 : * the on-disk format is different.
235 : */
236 : {
237 : .status = gettext_noop("Checking for invalid \"sql_identifier\" user columns"),
238 : .report_filename = "tables_using_sql_identifier.txt",
239 : .base_query =
240 : "SELECT 'information_schema.sql_identifier'::pg_catalog.regtype AS oid",
241 : .report_text =
242 : gettext_noop("Your installation contains the \"sql_identifier\" data type in user tables.\n"
243 : "The on-disk format for this data type has changed, so this\n"
244 : "cluster cannot currently be upgraded. You can drop the problem\n"
245 : "columns and restart the upgrade.\n"),
246 : .threshold_version = 1100
247 : },
248 :
249 : /*
250 : * JSONB changed its storage format during 9.4 beta, so check for it.
251 : */
252 : {
253 : .status = gettext_noop("Checking for incompatible \"jsonb\" data type in user tables"),
254 : .report_filename = "tables_using_jsonb.txt",
255 : .base_query =
256 : "SELECT 'pg_catalog.jsonb'::pg_catalog.regtype AS oid",
257 : .report_text =
258 : gettext_noop("Your installation contains the \"jsonb\" data type in user tables.\n"
259 : "The internal format of \"jsonb\" changed during 9.4 beta so this\n"
260 : "cluster cannot currently be upgraded. You can drop the problem \n"
261 : "columns and restart the upgrade.\n"),
262 : .threshold_version = MANUAL_CHECK,
263 : .version_hook = jsonb_9_4_check_applicable
264 : },
265 :
266 : /*
267 : * PG 12 removed types abstime, reltime, tinterval.
268 : */
269 : {
270 : .status = gettext_noop("Checking for removed \"abstime\" data type in user tables"),
271 : .report_filename = "tables_using_abstime.txt",
272 : .base_query =
273 : "SELECT 'pg_catalog.abstime'::pg_catalog.regtype AS oid",
274 : .report_text =
275 : gettext_noop("Your installation contains the \"abstime\" data type in user tables.\n"
276 : "The \"abstime\" type has been removed in PostgreSQL version 12,\n"
277 : "so this cluster cannot currently be upgraded. You can drop the\n"
278 : "problem columns, or change them to another data type, and restart\n"
279 : "the upgrade.\n"),
280 : .threshold_version = 1100
281 : },
282 : {
283 : .status = gettext_noop("Checking for removed \"reltime\" data type in user tables"),
284 : .report_filename = "tables_using_reltime.txt",
285 : .base_query =
286 : "SELECT 'pg_catalog.reltime'::pg_catalog.regtype AS oid",
287 : .report_text =
288 : gettext_noop("Your installation contains the \"reltime\" data type in user tables.\n"
289 : "The \"reltime\" type has been removed in PostgreSQL version 12,\n"
290 : "so this cluster cannot currently be upgraded. You can drop the\n"
291 : "problem columns, or change them to another data type, and restart\n"
292 : "the upgrade.\n"),
293 : .threshold_version = 1100
294 : },
295 : {
296 : .status = gettext_noop("Checking for removed \"tinterval\" data type in user tables"),
297 : .report_filename = "tables_using_tinterval.txt",
298 : .base_query =
299 : "SELECT 'pg_catalog.tinterval'::pg_catalog.regtype AS oid",
300 : .report_text =
301 : gettext_noop("Your installation contains the \"tinterval\" data type in user tables.\n"
302 : "The \"tinterval\" type has been removed in PostgreSQL version 12,\n"
303 : "so this cluster cannot currently be upgraded. You can drop the\n"
304 : "problem columns, or change them to another data type, and restart\n"
305 : "the upgrade.\n"),
306 : .threshold_version = 1100
307 : },
308 :
309 : /* End of checks marker, must remain last */
310 : {
311 : NULL, NULL, NULL, NULL, 0, NULL
312 : }
313 : };
314 :
315 : /*
316 : * Private state for check_for_data_types_usage()'s UpgradeTask.
317 : */
318 : struct data_type_check_state
319 : {
320 : DataTypesUsageChecks *check; /* the check for this step */
321 : bool result; /* true if check failed for any database */
322 : PQExpBuffer *report; /* buffer for report on failed checks */
323 : };
324 :
325 : /*
326 : * Returns a palloc'd query string for the data type check, for use by
327 : * check_for_data_types_usage()'s UpgradeTask.
328 : */
329 : static char *
330 28 : data_type_check_query(int checknum)
331 : {
332 28 : DataTypesUsageChecks *check = &data_types_usage_checks[checknum];
333 :
334 28 : return psprintf("WITH RECURSIVE oids AS ( "
335 : /* start with the type(s) returned by base_query */
336 : " %s "
337 : " UNION ALL "
338 : " SELECT * FROM ( "
339 : /* inner WITH because we can only reference the CTE once */
340 : " WITH x AS (SELECT oid FROM oids) "
341 : /* domains on any type selected so far */
342 : " SELECT t.oid FROM pg_catalog.pg_type t, x WHERE typbasetype = x.oid AND typtype = 'd' "
343 : " UNION ALL "
344 : /* arrays over any type selected so far */
345 : " SELECT t.oid FROM pg_catalog.pg_type t, x WHERE typelem = x.oid AND typtype = 'b' "
346 : " UNION ALL "
347 : /* composite types containing any type selected so far */
348 : " SELECT t.oid FROM pg_catalog.pg_type t, pg_catalog.pg_class c, pg_catalog.pg_attribute a, x "
349 : " WHERE t.typtype = 'c' AND "
350 : " t.oid = c.reltype AND "
351 : " c.oid = a.attrelid AND "
352 : " NOT a.attisdropped AND "
353 : " a.atttypid = x.oid "
354 : " UNION ALL "
355 : /* ranges containing any type selected so far */
356 : " SELECT t.oid FROM pg_catalog.pg_type t, pg_catalog.pg_range r, x "
357 : " WHERE t.typtype = 'r' AND r.rngtypid = t.oid AND r.rngsubtype = x.oid"
358 : " ) foo "
359 : ") "
360 : /* now look for stored columns of any such type */
361 : "SELECT n.nspname, c.relname, a.attname "
362 : "FROM pg_catalog.pg_class c, "
363 : " pg_catalog.pg_namespace n, "
364 : " pg_catalog.pg_attribute a "
365 : "WHERE c.oid = a.attrelid AND "
366 : " NOT a.attisdropped AND "
367 : " a.atttypid IN (SELECT oid FROM oids) AND "
368 : " c.relkind IN ("
369 : CppAsString2(RELKIND_RELATION) ", "
370 : CppAsString2(RELKIND_MATVIEW) ", "
371 : CppAsString2(RELKIND_INDEX) ") AND "
372 : " c.relnamespace = n.oid AND "
373 : /* exclude possible orphaned temp tables */
374 : " n.nspname !~ '^pg_temp_' AND "
375 : " n.nspname !~ '^pg_toast_temp_' AND "
376 : /* exclude system catalogs, too */
377 : " n.nspname NOT IN ('pg_catalog', 'information_schema')",
378 : check->base_query);
379 : }
380 :
381 : /*
382 : * Callback function for processing results of queries for
383 : * check_for_data_types_usage()'s UpgradeTask. If the query returned any rows
384 : * (i.e., the check failed), write the details to the report file.
385 : */
386 : static void
387 88 : process_data_type_check(DbInfo *dbinfo, PGresult *res, void *arg)
388 : {
389 88 : struct data_type_check_state *state = (struct data_type_check_state *) arg;
390 88 : int ntups = PQntuples(res);
391 : char output_path[MAXPGPATH];
392 88 : int i_nspname = PQfnumber(res, "nspname");
393 88 : int i_relname = PQfnumber(res, "relname");
394 88 : int i_attname = PQfnumber(res, "attname");
395 88 : FILE *script = NULL;
396 :
397 : AssertVariableIsOfType(&process_data_type_check, UpgradeTaskProcessCB);
398 :
399 88 : if (ntups == 0)
400 88 : return;
401 :
402 0 : snprintf(output_path, sizeof(output_path), "%s/%s",
403 : log_opts.basedir,
404 0 : state->check->report_filename);
405 :
406 : /*
407 : * Make sure we have a buffer to save reports to now that we found a first
408 : * failing check.
409 : */
410 0 : if (*state->report == NULL)
411 0 : *state->report = createPQExpBuffer();
412 :
413 : /*
414 : * If this is the first time we see an error for the check in question
415 : * then print a status message of the failure.
416 : */
417 0 : if (!state->result)
418 : {
419 0 : pg_log(PG_REPORT, "failed check: %s", _(state->check->status));
420 0 : appendPQExpBuffer(*state->report, "\n%s\n%s %s\n",
421 0 : _(state->check->report_text),
422 : _("A list of the problem columns is in the file:"),
423 : output_path);
424 : }
425 0 : state->result = true;
426 :
427 0 : if ((script = fopen_priv(output_path, "a")) == NULL)
428 0 : pg_fatal("could not open file \"%s\": %m", output_path);
429 :
430 0 : fprintf(script, "In database: %s\n", dbinfo->db_name);
431 :
432 0 : for (int rowno = 0; rowno < ntups; rowno++)
433 0 : fprintf(script, " %s.%s.%s\n",
434 : PQgetvalue(res, rowno, i_nspname),
435 : PQgetvalue(res, rowno, i_relname),
436 : PQgetvalue(res, rowno, i_attname));
437 :
438 0 : fclose(script);
439 : }
440 :
441 : /*
442 : * check_for_data_types_usage()
443 : * Detect whether there are any stored columns depending on given type(s)
444 : *
445 : * If so, write a report to the given file name and signal a failure to the
446 : * user.
447 : *
448 : * The checks to run are defined in a DataTypesUsageChecks structure where
449 : * each check has a metadata for explaining errors to the user, a base_query,
450 : * a report filename and a function pointer hook for validating if the check
451 : * should be executed given the cluster at hand.
452 : *
453 : * base_query should be a SELECT yielding a single column named "oid",
454 : * containing the pg_type OIDs of one or more types that are known to have
455 : * inconsistent on-disk representations across server versions.
456 : *
457 : * We check for the type(s) in tables, matviews, and indexes, but not views;
458 : * there's no storage involved in a view.
459 : */
460 : static void
461 14 : check_for_data_types_usage(ClusterInfo *cluster)
462 : {
463 14 : PQExpBuffer report = NULL;
464 14 : DataTypesUsageChecks *tmp = data_types_usage_checks;
465 14 : int n_data_types_usage_checks = 0;
466 14 : UpgradeTask *task = upgrade_task_create();
467 14 : char **queries = NULL;
468 : struct data_type_check_state *states;
469 :
470 14 : prep_status("Checking data type usage");
471 :
472 : /* Gather number of checks to perform */
473 154 : while (tmp->status != NULL)
474 : {
475 140 : n_data_types_usage_checks++;
476 140 : tmp++;
477 : }
478 :
479 : /* Allocate memory for queries and for task states */
480 14 : queries = pg_malloc0(sizeof(char *) * n_data_types_usage_checks);
481 14 : states = pg_malloc0(sizeof(struct data_type_check_state) * n_data_types_usage_checks);
482 :
483 154 : for (int i = 0; i < n_data_types_usage_checks; i++)
484 : {
485 140 : DataTypesUsageChecks *check = &data_types_usage_checks[i];
486 :
487 140 : if (check->threshold_version == MANUAL_CHECK)
488 : {
489 : Assert(check->version_hook);
490 :
491 : /*
492 : * Make sure that the check applies to the current cluster version
493 : * and skip it if not.
494 : */
495 14 : if (!check->version_hook(cluster))
496 14 : continue;
497 : }
498 126 : else if (check->threshold_version != ALL_VERSIONS)
499 : {
500 98 : if (GET_MAJOR_VERSION(cluster->major_version) > check->threshold_version)
501 98 : continue;
502 : }
503 : else
504 : Assert(check->threshold_version == ALL_VERSIONS);
505 :
506 28 : queries[i] = data_type_check_query(i);
507 :
508 28 : states[i].check = check;
509 28 : states[i].report = &report;
510 :
511 28 : upgrade_task_add_step(task, queries[i], process_data_type_check,
512 28 : true, &states[i]);
513 : }
514 :
515 : /*
516 : * Connect to each database in the cluster and run all defined checks
517 : * against that database before trying the next one.
518 : */
519 14 : upgrade_task_run(task, cluster);
520 14 : upgrade_task_free(task);
521 :
522 14 : if (report)
523 : {
524 0 : pg_fatal("Data type checks failed: %s", report->data);
525 : destroyPQExpBuffer(report);
526 : }
527 :
528 154 : for (int i = 0; i < n_data_types_usage_checks; i++)
529 : {
530 140 : if (queries[i])
531 28 : pg_free(queries[i]);
532 : }
533 14 : pg_free(queries);
534 14 : pg_free(states);
535 :
536 14 : check_ok();
537 14 : }
538 :
539 : /*
540 : * fix_path_separator
541 : * For non-Windows, just return the argument.
542 : * For Windows convert any forward slash to a backslash
543 : * such as is suitable for arguments to builtin commands
544 : * like RMDIR and DEL.
545 : */
546 : static char *
547 8 : fix_path_separator(char *path)
548 : {
549 : #ifdef WIN32
550 :
551 : char *result;
552 : char *c;
553 :
554 : result = pg_strdup(path);
555 :
556 : for (c = result; *c != '\0'; c++)
557 : if (*c == '/')
558 : *c = '\\';
559 :
560 : return result;
561 : #else
562 :
563 8 : return path;
564 : #endif
565 : }
566 :
567 : void
568 20 : output_check_banner(void)
569 : {
570 20 : if (user_opts.live_check)
571 : {
572 0 : pg_log(PG_REPORT,
573 : "Performing Consistency Checks on Old Live Server\n"
574 : "------------------------------------------------");
575 : }
576 : else
577 : {
578 20 : pg_log(PG_REPORT,
579 : "Performing Consistency Checks\n"
580 : "-----------------------------");
581 : }
582 20 : }
583 :
584 :
585 : void
586 20 : check_and_dump_old_cluster(void)
587 : {
588 : /* -- OLD -- */
589 :
590 20 : if (!user_opts.live_check)
591 20 : start_postmaster(&old_cluster, true);
592 :
593 : /*
594 : * First check that all databases allow connections since we'll otherwise
595 : * fail in later stages.
596 : */
597 20 : check_for_connection_status(&old_cluster);
598 :
599 : /*
600 : * Extract a list of databases, tables, and logical replication slots from
601 : * the old cluster.
602 : */
603 18 : get_db_rel_and_slot_infos(&old_cluster);
604 :
605 18 : init_tablespaces();
606 :
607 18 : get_loadable_libraries();
608 :
609 :
610 : /*
611 : * Check for various failure cases
612 : */
613 18 : check_is_install_user(&old_cluster);
614 18 : check_for_prepared_transactions(&old_cluster);
615 18 : check_for_isn_and_int8_passing_mismatch(&old_cluster);
616 :
617 18 : if (GET_MAJOR_VERSION(old_cluster.major_version) >= 1700)
618 : {
619 : /*
620 : * Logical replication slots can be migrated since PG17. See comments
621 : * atop get_old_cluster_logical_slot_infos().
622 : */
623 18 : check_old_cluster_for_valid_slots();
624 :
625 : /*
626 : * Subscriptions and their dependencies can be migrated since PG17.
627 : * Before that the logical slots are not upgraded, so we will not be
628 : * able to upgrade the logical replication clusters completely.
629 : */
630 16 : get_subscription_count(&old_cluster);
631 16 : check_old_cluster_subscription_state();
632 : }
633 :
634 14 : check_for_data_types_usage(&old_cluster);
635 :
636 : /*
637 : * PG 14 changed the function signature of encoding conversion functions.
638 : * Conversions from older versions cannot be upgraded automatically
639 : * because the user-defined functions used by the encoding conversions
640 : * need to be changed to match the new signature.
641 : */
642 14 : if (GET_MAJOR_VERSION(old_cluster.major_version) <= 1300)
643 0 : check_for_user_defined_encoding_conversions(&old_cluster);
644 :
645 : /*
646 : * Pre-PG 14 allowed user defined postfix operators, which are not
647 : * supported anymore. Verify there are none, iff applicable.
648 : */
649 14 : if (GET_MAJOR_VERSION(old_cluster.major_version) <= 1300)
650 0 : check_for_user_defined_postfix_ops(&old_cluster);
651 :
652 : /*
653 : * PG 14 changed polymorphic functions from anyarray to
654 : * anycompatiblearray.
655 : */
656 14 : if (GET_MAJOR_VERSION(old_cluster.major_version) <= 1300)
657 0 : check_for_incompatible_polymorphics(&old_cluster);
658 :
659 : /*
660 : * Pre-PG 12 allowed tables to be declared WITH OIDS, which is not
661 : * supported anymore. Verify there are none, iff applicable.
662 : */
663 14 : if (GET_MAJOR_VERSION(old_cluster.major_version) <= 1100)
664 0 : check_for_tables_with_oids(&old_cluster);
665 :
666 : /*
667 : * Pre-PG 10 allowed tables with 'unknown' type columns and non WAL logged
668 : * hash indexes
669 : */
670 14 : if (GET_MAJOR_VERSION(old_cluster.major_version) <= 906)
671 : {
672 0 : if (user_opts.check)
673 0 : old_9_6_invalidate_hash_indexes(&old_cluster, true);
674 : }
675 :
676 : /* 9.5 and below should not have roles starting with pg_ */
677 14 : if (GET_MAJOR_VERSION(old_cluster.major_version) <= 905)
678 0 : check_for_pg_role_prefix(&old_cluster);
679 :
680 : /*
681 : * While not a check option, we do this now because this is the only time
682 : * the old server is running.
683 : */
684 14 : if (!user_opts.check)
685 10 : generate_old_dump();
686 :
687 14 : if (!user_opts.live_check)
688 14 : stop_postmaster(false);
689 14 : }
690 :
691 :
692 : void
693 14 : check_new_cluster(void)
694 : {
695 14 : get_db_rel_and_slot_infos(&new_cluster);
696 :
697 14 : check_new_cluster_is_empty();
698 :
699 14 : check_loadable_libraries();
700 :
701 14 : switch (user_opts.transfer_mode)
702 : {
703 0 : case TRANSFER_MODE_CLONE:
704 0 : check_file_clone();
705 0 : break;
706 14 : case TRANSFER_MODE_COPY:
707 14 : break;
708 0 : case TRANSFER_MODE_COPY_FILE_RANGE:
709 0 : check_copy_file_range();
710 0 : break;
711 0 : case TRANSFER_MODE_LINK:
712 0 : check_hard_link();
713 0 : break;
714 : }
715 :
716 14 : check_is_install_user(&new_cluster);
717 :
718 14 : check_for_prepared_transactions(&new_cluster);
719 :
720 14 : check_for_new_tablespace_dir();
721 :
722 14 : check_new_cluster_logical_replication_slots();
723 :
724 12 : check_new_cluster_subscription_configuration();
725 10 : }
726 :
727 :
728 : void
729 10 : report_clusters_compatible(void)
730 : {
731 10 : if (user_opts.check)
732 : {
733 2 : pg_log(PG_REPORT, "\n*Clusters are compatible*");
734 : /* stops new cluster */
735 2 : stop_postmaster(false);
736 :
737 2 : cleanup_output_dirs();
738 2 : exit(0);
739 : }
740 :
741 8 : pg_log(PG_REPORT, "\n"
742 : "If pg_upgrade fails after this point, you must re-initdb the\n"
743 : "new cluster before continuing.");
744 8 : }
745 :
746 :
747 : void
748 8 : issue_warnings_and_set_wal_level(void)
749 : {
750 : /*
751 : * We unconditionally start/stop the new server because pg_resetwal -o set
752 : * wal_level to 'minimum'. If the user is upgrading standby servers using
753 : * the rsync instructions, they will need pg_upgrade to write its final
754 : * WAL record showing wal_level as 'replica'.
755 : */
756 8 : start_postmaster(&new_cluster, true);
757 :
758 : /* Reindex hash indexes for old < 10.0 */
759 8 : if (GET_MAJOR_VERSION(old_cluster.major_version) <= 906)
760 0 : old_9_6_invalidate_hash_indexes(&new_cluster, false);
761 :
762 8 : report_extension_updates(&new_cluster);
763 :
764 8 : stop_postmaster(false);
765 8 : }
766 :
767 :
768 : void
769 8 : output_completion_banner(char *deletion_script_file_name)
770 : {
771 : PQExpBufferData user_specification;
772 :
773 8 : initPQExpBuffer(&user_specification);
774 8 : if (os_info.user_specified)
775 : {
776 0 : appendPQExpBufferStr(&user_specification, "-U ");
777 0 : appendShellString(&user_specification, os_info.user);
778 0 : appendPQExpBufferChar(&user_specification, ' ');
779 : }
780 :
781 8 : pg_log(PG_REPORT,
782 : "Optimizer statistics are not transferred by pg_upgrade.\n"
783 : "Once you start the new server, consider running:\n"
784 : " %s/vacuumdb %s--all --analyze-in-stages", new_cluster.bindir, user_specification.data);
785 :
786 8 : if (deletion_script_file_name)
787 8 : pg_log(PG_REPORT,
788 : "Running this script will delete the old cluster's data files:\n"
789 : " %s",
790 : deletion_script_file_name);
791 : else
792 0 : pg_log(PG_REPORT,
793 : "Could not create a script to delete the old cluster's data files\n"
794 : "because user-defined tablespaces or the new cluster's data directory\n"
795 : "exist in the old cluster directory. The old cluster's contents must\n"
796 : "be deleted manually.");
797 :
798 8 : termPQExpBuffer(&user_specification);
799 8 : }
800 :
801 :
802 : void
803 20 : check_cluster_versions(void)
804 : {
805 20 : prep_status("Checking cluster versions");
806 :
807 : /* cluster versions should already have been obtained */
808 : Assert(old_cluster.major_version != 0);
809 : Assert(new_cluster.major_version != 0);
810 :
811 : /*
812 : * We allow upgrades from/to the same major version for alpha/beta
813 : * upgrades
814 : */
815 :
816 20 : if (GET_MAJOR_VERSION(old_cluster.major_version) < 902)
817 0 : pg_fatal("This utility can only upgrade from PostgreSQL version %s and later.",
818 : "9.2");
819 :
820 : /* Only current PG version is supported as a target */
821 20 : if (GET_MAJOR_VERSION(new_cluster.major_version) != GET_MAJOR_VERSION(PG_VERSION_NUM))
822 0 : pg_fatal("This utility can only upgrade to PostgreSQL version %s.",
823 : PG_MAJORVERSION);
824 :
825 : /*
826 : * We can't allow downgrading because we use the target pg_dump, and
827 : * pg_dump cannot operate on newer database versions, only current and
828 : * older versions.
829 : */
830 20 : if (old_cluster.major_version > new_cluster.major_version)
831 0 : pg_fatal("This utility cannot be used to downgrade to older major PostgreSQL versions.");
832 :
833 : /* Ensure binaries match the designated data directories */
834 20 : if (GET_MAJOR_VERSION(old_cluster.major_version) !=
835 20 : GET_MAJOR_VERSION(old_cluster.bin_version))
836 0 : pg_fatal("Old cluster data and binary directories are from different major versions.");
837 20 : if (GET_MAJOR_VERSION(new_cluster.major_version) !=
838 20 : GET_MAJOR_VERSION(new_cluster.bin_version))
839 0 : pg_fatal("New cluster data and binary directories are from different major versions.");
840 :
841 : /*
842 : * Since from version 18, newly created database clusters always have
843 : * 'signed' default char-signedness, it makes less sense to use
844 : * --set-char-signedness option for upgrading from version 18 or later.
845 : * Users who want to change the default char signedness of the new
846 : * cluster, they can use pg_resetwal manually before the upgrade.
847 : */
848 20 : if (GET_MAJOR_VERSION(old_cluster.major_version) >= 1800 &&
849 20 : user_opts.char_signedness != -1)
850 0 : pg_fatal("%s option cannot be used to upgrade from PostgreSQL %s and later.",
851 : "--set-char-signedness", "18");
852 :
853 20 : check_ok();
854 20 : }
855 :
856 :
857 : void
858 20 : check_cluster_compatibility(void)
859 : {
860 : /* get/check pg_control data of servers */
861 20 : get_control_data(&old_cluster);
862 20 : get_control_data(&new_cluster);
863 20 : check_control_data(&old_cluster.controldata, &new_cluster.controldata);
864 :
865 20 : if (user_opts.live_check && old_cluster.port == new_cluster.port)
866 0 : pg_fatal("When checking a live server, "
867 : "the old and new port numbers must be different.");
868 20 : }
869 :
870 :
871 : static void
872 14 : check_new_cluster_is_empty(void)
873 : {
874 : int dbnum;
875 :
876 42 : for (dbnum = 0; dbnum < new_cluster.dbarr.ndbs; dbnum++)
877 : {
878 : int relnum;
879 28 : RelInfoArr *rel_arr = &new_cluster.dbarr.dbs[dbnum].rel_arr;
880 :
881 84 : for (relnum = 0; relnum < rel_arr->nrels;
882 56 : relnum++)
883 : {
884 : /* pg_largeobject and its index should be skipped */
885 56 : if (strcmp(rel_arr->rels[relnum].nspname, "pg_catalog") != 0)
886 0 : pg_fatal("New cluster database \"%s\" is not empty: found relation \"%s.%s\"",
887 0 : new_cluster.dbarr.dbs[dbnum].db_name,
888 0 : rel_arr->rels[relnum].nspname,
889 0 : rel_arr->rels[relnum].relname);
890 : }
891 : }
892 14 : }
893 :
894 : /*
895 : * A previous run of pg_upgrade might have failed and the new cluster
896 : * directory recreated, but they might have forgotten to remove
897 : * the new cluster's tablespace directories. Therefore, check that
898 : * new cluster tablespace directories do not already exist. If
899 : * they do, it would cause an error while restoring global objects.
900 : * This allows the failure to be detected at check time, rather than
901 : * during schema restore.
902 : */
903 : static void
904 14 : check_for_new_tablespace_dir(void)
905 : {
906 : int tblnum;
907 : char new_tablespace_dir[MAXPGPATH];
908 :
909 14 : prep_status("Checking for new cluster tablespace directories");
910 :
911 14 : for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++)
912 : {
913 : struct stat statbuf;
914 :
915 0 : snprintf(new_tablespace_dir, MAXPGPATH, "%s%s",
916 0 : os_info.old_tablespaces[tblnum],
917 : new_cluster.tablespace_suffix);
918 :
919 0 : if (stat(new_tablespace_dir, &statbuf) == 0 || errno != ENOENT)
920 0 : pg_fatal("new cluster tablespace directory already exists: \"%s\"",
921 : new_tablespace_dir);
922 : }
923 :
924 14 : check_ok();
925 14 : }
926 :
927 : /*
928 : * create_script_for_old_cluster_deletion()
929 : *
930 : * This is particularly useful for tablespace deletion.
931 : */
932 : void
933 8 : create_script_for_old_cluster_deletion(char **deletion_script_file_name)
934 : {
935 8 : FILE *script = NULL;
936 : int tblnum;
937 : char old_cluster_pgdata[MAXPGPATH],
938 : new_cluster_pgdata[MAXPGPATH];
939 :
940 8 : *deletion_script_file_name = psprintf("%sdelete_old_cluster.%s",
941 : SCRIPT_PREFIX, SCRIPT_EXT);
942 :
943 8 : strlcpy(old_cluster_pgdata, old_cluster.pgdata, MAXPGPATH);
944 8 : canonicalize_path(old_cluster_pgdata);
945 :
946 8 : strlcpy(new_cluster_pgdata, new_cluster.pgdata, MAXPGPATH);
947 8 : canonicalize_path(new_cluster_pgdata);
948 :
949 : /* Some people put the new data directory inside the old one. */
950 8 : if (path_is_prefix_of_path(old_cluster_pgdata, new_cluster_pgdata))
951 : {
952 0 : pg_log(PG_WARNING,
953 : "\nWARNING: new data directory should not be inside the old data directory, i.e. %s", old_cluster_pgdata);
954 :
955 : /* Unlink file in case it is left over from a previous run. */
956 0 : unlink(*deletion_script_file_name);
957 0 : pg_free(*deletion_script_file_name);
958 0 : *deletion_script_file_name = NULL;
959 0 : return;
960 : }
961 :
962 : /*
963 : * Some users (oddly) create tablespaces inside the cluster data
964 : * directory. We can't create a proper old cluster delete script in that
965 : * case.
966 : */
967 8 : for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++)
968 : {
969 : char old_tablespace_dir[MAXPGPATH];
970 :
971 0 : strlcpy(old_tablespace_dir, os_info.old_tablespaces[tblnum], MAXPGPATH);
972 0 : canonicalize_path(old_tablespace_dir);
973 0 : if (path_is_prefix_of_path(old_cluster_pgdata, old_tablespace_dir))
974 : {
975 : /* reproduce warning from CREATE TABLESPACE that is in the log */
976 0 : pg_log(PG_WARNING,
977 : "\nWARNING: user-defined tablespace locations should not be inside the data directory, i.e. %s", old_tablespace_dir);
978 :
979 : /* Unlink file in case it is left over from a previous run. */
980 0 : unlink(*deletion_script_file_name);
981 0 : pg_free(*deletion_script_file_name);
982 0 : *deletion_script_file_name = NULL;
983 0 : return;
984 : }
985 : }
986 :
987 8 : prep_status("Creating script to delete old cluster");
988 :
989 8 : if ((script = fopen_priv(*deletion_script_file_name, "w")) == NULL)
990 0 : pg_fatal("could not open file \"%s\": %m",
991 : *deletion_script_file_name);
992 :
993 : #ifndef WIN32
994 : /* add shebang header */
995 8 : fprintf(script, "#!/bin/sh\n\n");
996 : #endif
997 :
998 : /* delete old cluster's default tablespace */
999 8 : fprintf(script, RMDIR_CMD " %c%s%c\n", PATH_QUOTE,
1000 : fix_path_separator(old_cluster.pgdata), PATH_QUOTE);
1001 :
1002 : /* delete old cluster's alternate tablespaces */
1003 8 : for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++)
1004 : {
1005 : /*
1006 : * Do the old cluster's per-database directories share a directory
1007 : * with a new version-specific tablespace?
1008 : */
1009 0 : if (strlen(old_cluster.tablespace_suffix) == 0)
1010 : {
1011 : /* delete per-database directories */
1012 : int dbnum;
1013 :
1014 0 : fprintf(script, "\n");
1015 :
1016 0 : for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
1017 0 : fprintf(script, RMDIR_CMD " %c%s%c%u%c\n", PATH_QUOTE,
1018 0 : fix_path_separator(os_info.old_tablespaces[tblnum]),
1019 0 : PATH_SEPARATOR, old_cluster.dbarr.dbs[dbnum].db_oid,
1020 : PATH_QUOTE);
1021 : }
1022 : else
1023 : {
1024 0 : char *suffix_path = pg_strdup(old_cluster.tablespace_suffix);
1025 :
1026 : /*
1027 : * Simply delete the tablespace directory, which might be ".old"
1028 : * or a version-specific subdirectory.
1029 : */
1030 0 : fprintf(script, RMDIR_CMD " %c%s%s%c\n", PATH_QUOTE,
1031 0 : fix_path_separator(os_info.old_tablespaces[tblnum]),
1032 : fix_path_separator(suffix_path), PATH_QUOTE);
1033 0 : pfree(suffix_path);
1034 : }
1035 : }
1036 :
1037 8 : fclose(script);
1038 :
1039 : #ifndef WIN32
1040 8 : if (chmod(*deletion_script_file_name, S_IRWXU) != 0)
1041 0 : pg_fatal("could not add execute permission to file \"%s\": %m",
1042 : *deletion_script_file_name);
1043 : #endif
1044 :
1045 8 : check_ok();
1046 : }
1047 :
1048 :
1049 : /*
1050 : * check_is_install_user()
1051 : *
1052 : * Check we are the install user, and that the new cluster
1053 : * has no other users.
1054 : */
1055 : static void
1056 32 : check_is_install_user(ClusterInfo *cluster)
1057 : {
1058 : PGresult *res;
1059 32 : PGconn *conn = connectToServer(cluster, "template1");
1060 :
1061 32 : prep_status("Checking database user is the install user");
1062 :
1063 : /* Can't use pg_authid because only superusers can view it. */
1064 32 : res = executeQueryOrDie(conn,
1065 : "SELECT rolsuper, oid "
1066 : "FROM pg_catalog.pg_roles "
1067 : "WHERE rolname = current_user "
1068 : "AND rolname !~ '^pg_'");
1069 :
1070 : /*
1071 : * We only allow the install user in the new cluster (see comment below)
1072 : * and we preserve pg_authid.oid, so this must be the install user in the
1073 : * old cluster too.
1074 : */
1075 32 : if (PQntuples(res) != 1 ||
1076 32 : atooid(PQgetvalue(res, 0, 1)) != BOOTSTRAP_SUPERUSERID)
1077 0 : pg_fatal("database user \"%s\" is not the install user",
1078 : os_info.user);
1079 :
1080 32 : PQclear(res);
1081 :
1082 32 : res = executeQueryOrDie(conn,
1083 : "SELECT COUNT(*) "
1084 : "FROM pg_catalog.pg_roles "
1085 : "WHERE rolname !~ '^pg_'");
1086 :
1087 32 : if (PQntuples(res) != 1)
1088 0 : pg_fatal("could not determine the number of users");
1089 :
1090 : /*
1091 : * We only allow the install user in the new cluster because other defined
1092 : * users might match users defined in the old cluster and generate an
1093 : * error during pg_dump restore.
1094 : */
1095 32 : if (cluster == &new_cluster && strcmp(PQgetvalue(res, 0, 0), "1") != 0)
1096 0 : pg_fatal("Only the install user can be defined in the new cluster.");
1097 :
1098 32 : PQclear(res);
1099 :
1100 32 : PQfinish(conn);
1101 :
1102 32 : check_ok();
1103 32 : }
1104 :
1105 :
1106 : /*
1107 : * check_for_connection_status
1108 : *
1109 : * Ensure that all non-template0 databases allow connections since they
1110 : * otherwise won't be restored; and that template0 explicitly doesn't allow
1111 : * connections since it would make pg_dumpall --globals restore fail.
1112 : */
1113 : static void
1114 20 : check_for_connection_status(ClusterInfo *cluster)
1115 : {
1116 : int dbnum;
1117 : PGconn *conn_template1;
1118 : PGresult *dbres;
1119 : int ntups;
1120 : int i_datname;
1121 : int i_datallowconn;
1122 : int i_datconnlimit;
1123 20 : FILE *script = NULL;
1124 : char output_path[MAXPGPATH];
1125 :
1126 20 : prep_status("Checking database connection settings");
1127 :
1128 20 : snprintf(output_path, sizeof(output_path), "%s/%s",
1129 : log_opts.basedir,
1130 : "databases_cannot_connect_to.txt");
1131 :
1132 20 : conn_template1 = connectToServer(cluster, "template1");
1133 :
1134 : /* get database names */
1135 20 : dbres = executeQueryOrDie(conn_template1,
1136 : "SELECT datname, datallowconn, datconnlimit "
1137 : "FROM pg_catalog.pg_database");
1138 :
1139 20 : i_datname = PQfnumber(dbres, "datname");
1140 20 : i_datallowconn = PQfnumber(dbres, "datallowconn");
1141 20 : i_datconnlimit = PQfnumber(dbres, "datconnlimit");
1142 :
1143 20 : ntups = PQntuples(dbres);
1144 106 : for (dbnum = 0; dbnum < ntups; dbnum++)
1145 : {
1146 86 : char *datname = PQgetvalue(dbres, dbnum, i_datname);
1147 86 : char *datallowconn = PQgetvalue(dbres, dbnum, i_datallowconn);
1148 86 : char *datconnlimit = PQgetvalue(dbres, dbnum, i_datconnlimit);
1149 :
1150 86 : if (strcmp(datname, "template0") == 0)
1151 : {
1152 : /* avoid restore failure when pg_dumpall tries to create template0 */
1153 20 : if (strcmp(datallowconn, "t") == 0)
1154 0 : pg_fatal("template0 must not allow connections, "
1155 : "i.e. its pg_database.datallowconn must be false");
1156 : }
1157 : else
1158 : {
1159 : /*
1160 : * Avoid datallowconn == false databases from being skipped on
1161 : * restore, and ensure that no databases are marked invalid with
1162 : * datconnlimit == -2.
1163 : */
1164 66 : if ((strcmp(datallowconn, "f") == 0) || strcmp(datconnlimit, "-2") == 0)
1165 : {
1166 2 : if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
1167 0 : pg_fatal("could not open file \"%s\": %m", output_path);
1168 :
1169 2 : fprintf(script, "%s\n", datname);
1170 : }
1171 : }
1172 : }
1173 :
1174 20 : PQclear(dbres);
1175 :
1176 20 : PQfinish(conn_template1);
1177 :
1178 20 : if (script)
1179 : {
1180 2 : fclose(script);
1181 2 : pg_log(PG_REPORT, "fatal");
1182 2 : pg_fatal("All non-template0 databases must allow connections, i.e. their\n"
1183 : "pg_database.datallowconn must be true and pg_database.datconnlimit\n"
1184 : "must not be -2. Your installation contains non-template0 databases\n"
1185 : "which cannot be connected to. Consider allowing connection for all\n"
1186 : "non-template0 databases or drop the databases which do not allow\n"
1187 : "connections. A list of databases with the problem is in the file:\n"
1188 : " %s", output_path);
1189 : }
1190 : else
1191 18 : check_ok();
1192 18 : }
1193 :
1194 :
1195 : /*
1196 : * check_for_prepared_transactions()
1197 : *
1198 : * Make sure there are no prepared transactions because the storage format
1199 : * might have changed.
1200 : */
1201 : static void
1202 32 : check_for_prepared_transactions(ClusterInfo *cluster)
1203 : {
1204 : PGresult *res;
1205 32 : PGconn *conn = connectToServer(cluster, "template1");
1206 :
1207 32 : prep_status("Checking for prepared transactions");
1208 :
1209 32 : res = executeQueryOrDie(conn,
1210 : "SELECT * "
1211 : "FROM pg_catalog.pg_prepared_xacts");
1212 :
1213 32 : if (PQntuples(res) != 0)
1214 : {
1215 0 : if (cluster == &old_cluster)
1216 0 : pg_fatal("The source cluster contains prepared transactions");
1217 : else
1218 0 : pg_fatal("The target cluster contains prepared transactions");
1219 : }
1220 :
1221 32 : PQclear(res);
1222 :
1223 32 : PQfinish(conn);
1224 :
1225 32 : check_ok();
1226 32 : }
1227 :
1228 : /*
1229 : * Callback function for processing result of query for
1230 : * check_for_isn_and_int8_passing_mismatch()'s UpgradeTask. If the query
1231 : * returned any rows (i.e., the check failed), write the details to the report
1232 : * file.
1233 : */
1234 : static void
1235 0 : process_isn_and_int8_passing_mismatch(DbInfo *dbinfo, PGresult *res, void *arg)
1236 : {
1237 0 : int ntups = PQntuples(res);
1238 0 : int i_nspname = PQfnumber(res, "nspname");
1239 0 : int i_proname = PQfnumber(res, "proname");
1240 0 : UpgradeTaskReport *report = (UpgradeTaskReport *) arg;
1241 :
1242 : AssertVariableIsOfType(&process_isn_and_int8_passing_mismatch,
1243 : UpgradeTaskProcessCB);
1244 :
1245 0 : if (ntups == 0)
1246 0 : return;
1247 :
1248 0 : if (report->file == NULL &&
1249 0 : (report->file = fopen_priv(report->path, "w")) == NULL)
1250 0 : pg_fatal("could not open file \"%s\": %m", report->path);
1251 :
1252 0 : fprintf(report->file, "In database: %s\n", dbinfo->db_name);
1253 :
1254 0 : for (int rowno = 0; rowno < ntups; rowno++)
1255 0 : fprintf(report->file, " %s.%s\n",
1256 : PQgetvalue(res, rowno, i_nspname),
1257 : PQgetvalue(res, rowno, i_proname));
1258 : }
1259 :
1260 : /*
1261 : * check_for_isn_and_int8_passing_mismatch()
1262 : *
1263 : * contrib/isn relies on data type int8, and in 8.4 int8 can now be passed
1264 : * by value. The schema dumps the CREATE TYPE PASSEDBYVALUE setting so
1265 : * it must match for the old and new servers.
1266 : */
1267 : static void
1268 18 : check_for_isn_and_int8_passing_mismatch(ClusterInfo *cluster)
1269 : {
1270 : UpgradeTask *task;
1271 : UpgradeTaskReport report;
1272 18 : const char *query = "SELECT n.nspname, p.proname "
1273 : "FROM pg_catalog.pg_proc p, "
1274 : " pg_catalog.pg_namespace n "
1275 : "WHERE p.pronamespace = n.oid AND "
1276 : " p.probin = '$libdir/isn'";
1277 :
1278 18 : prep_status("Checking for contrib/isn with bigint-passing mismatch");
1279 :
1280 18 : if (old_cluster.controldata.float8_pass_by_value ==
1281 18 : new_cluster.controldata.float8_pass_by_value)
1282 : {
1283 : /* no mismatch */
1284 18 : check_ok();
1285 18 : return;
1286 : }
1287 :
1288 0 : report.file = NULL;
1289 0 : snprintf(report.path, sizeof(report.path), "%s/%s",
1290 : log_opts.basedir,
1291 : "contrib_isn_and_int8_pass_by_value.txt");
1292 :
1293 0 : task = upgrade_task_create();
1294 0 : upgrade_task_add_step(task, query, process_isn_and_int8_passing_mismatch,
1295 : true, &report);
1296 0 : upgrade_task_run(task, cluster);
1297 0 : upgrade_task_free(task);
1298 :
1299 0 : if (report.file)
1300 : {
1301 0 : fclose(report.file);
1302 0 : pg_log(PG_REPORT, "fatal");
1303 0 : pg_fatal("Your installation contains \"contrib/isn\" functions which rely on the\n"
1304 : "bigint data type. Your old and new clusters pass bigint values\n"
1305 : "differently so this cluster cannot currently be upgraded. You can\n"
1306 : "manually dump databases in the old cluster that use \"contrib/isn\"\n"
1307 : "facilities, drop them, perform the upgrade, and then restore them. A\n"
1308 : "list of the problem functions is in the file:\n"
1309 : " %s", report.path);
1310 : }
1311 : else
1312 0 : check_ok();
1313 : }
1314 :
1315 : /*
1316 : * Callback function for processing result of query for
1317 : * check_for_user_defined_postfix_ops()'s UpgradeTask. If the query returned
1318 : * any rows (i.e., the check failed), write the details to the report file.
1319 : */
1320 : static void
1321 0 : process_user_defined_postfix_ops(DbInfo *dbinfo, PGresult *res, void *arg)
1322 : {
1323 0 : UpgradeTaskReport *report = (UpgradeTaskReport *) arg;
1324 0 : int ntups = PQntuples(res);
1325 0 : int i_oproid = PQfnumber(res, "oproid");
1326 0 : int i_oprnsp = PQfnumber(res, "oprnsp");
1327 0 : int i_oprname = PQfnumber(res, "oprname");
1328 0 : int i_typnsp = PQfnumber(res, "typnsp");
1329 0 : int i_typname = PQfnumber(res, "typname");
1330 :
1331 : AssertVariableIsOfType(&process_user_defined_postfix_ops,
1332 : UpgradeTaskProcessCB);
1333 :
1334 0 : if (ntups == 0)
1335 0 : return;
1336 :
1337 0 : if (report->file == NULL &&
1338 0 : (report->file = fopen_priv(report->path, "w")) == NULL)
1339 0 : pg_fatal("could not open file \"%s\": %m", report->path);
1340 :
1341 0 : fprintf(report->file, "In database: %s\n", dbinfo->db_name);
1342 :
1343 0 : for (int rowno = 0; rowno < ntups; rowno++)
1344 0 : fprintf(report->file, " (oid=%s) %s.%s (%s.%s, NONE)\n",
1345 : PQgetvalue(res, rowno, i_oproid),
1346 : PQgetvalue(res, rowno, i_oprnsp),
1347 : PQgetvalue(res, rowno, i_oprname),
1348 : PQgetvalue(res, rowno, i_typnsp),
1349 : PQgetvalue(res, rowno, i_typname));
1350 : }
1351 :
1352 : /*
1353 : * Verify that no user defined postfix operators exist.
1354 : */
1355 : static void
1356 0 : check_for_user_defined_postfix_ops(ClusterInfo *cluster)
1357 : {
1358 : UpgradeTaskReport report;
1359 0 : UpgradeTask *task = upgrade_task_create();
1360 : const char *query;
1361 :
1362 : /*
1363 : * The query below hardcodes FirstNormalObjectId as 16384 rather than
1364 : * interpolating that C #define into the query because, if that #define is
1365 : * ever changed, the cutoff we want to use is the value used by
1366 : * pre-version 14 servers, not that of some future version.
1367 : */
1368 0 : query = "SELECT o.oid AS oproid, "
1369 : " n.nspname AS oprnsp, "
1370 : " o.oprname, "
1371 : " tn.nspname AS typnsp, "
1372 : " t.typname "
1373 : "FROM pg_catalog.pg_operator o, "
1374 : " pg_catalog.pg_namespace n, "
1375 : " pg_catalog.pg_type t, "
1376 : " pg_catalog.pg_namespace tn "
1377 : "WHERE o.oprnamespace = n.oid AND "
1378 : " o.oprleft = t.oid AND "
1379 : " t.typnamespace = tn.oid AND "
1380 : " o.oprright = 0 AND "
1381 : " o.oid >= 16384";
1382 :
1383 0 : prep_status("Checking for user-defined postfix operators");
1384 :
1385 0 : report.file = NULL;
1386 0 : snprintf(report.path, sizeof(report.path), "%s/%s",
1387 : log_opts.basedir,
1388 : "postfix_ops.txt");
1389 :
1390 0 : upgrade_task_add_step(task, query, process_user_defined_postfix_ops,
1391 : true, &report);
1392 0 : upgrade_task_run(task, cluster);
1393 0 : upgrade_task_free(task);
1394 :
1395 0 : if (report.file)
1396 : {
1397 0 : fclose(report.file);
1398 0 : pg_log(PG_REPORT, "fatal");
1399 0 : pg_fatal("Your installation contains user-defined postfix operators, which are not\n"
1400 : "supported anymore. Consider dropping the postfix operators and replacing\n"
1401 : "them with prefix operators or function calls.\n"
1402 : "A list of user-defined postfix operators is in the file:\n"
1403 : " %s", report.path);
1404 : }
1405 : else
1406 0 : check_ok();
1407 0 : }
1408 :
1409 : /*
1410 : * Callback function for processing results of query for
1411 : * check_for_incompatible_polymorphics()'s UpgradeTask. If the query returned
1412 : * any rows (i.e., the check failed), write the details to the report file.
1413 : */
1414 : static void
1415 0 : process_incompat_polymorphics(DbInfo *dbinfo, PGresult *res, void *arg)
1416 : {
1417 0 : UpgradeTaskReport *report = (UpgradeTaskReport *) arg;
1418 0 : int ntups = PQntuples(res);
1419 0 : int i_objkind = PQfnumber(res, "objkind");
1420 0 : int i_objname = PQfnumber(res, "objname");
1421 :
1422 : AssertVariableIsOfType(&process_incompat_polymorphics,
1423 : UpgradeTaskProcessCB);
1424 :
1425 0 : if (ntups == 0)
1426 0 : return;
1427 :
1428 0 : if (report->file == NULL &&
1429 0 : (report->file = fopen_priv(report->path, "w")) == NULL)
1430 0 : pg_fatal("could not open file \"%s\": %m", report->path);
1431 :
1432 0 : fprintf(report->file, "In database: %s\n", dbinfo->db_name);
1433 :
1434 0 : for (int rowno = 0; rowno < ntups; rowno++)
1435 0 : fprintf(report->file, " %s: %s\n",
1436 : PQgetvalue(res, rowno, i_objkind),
1437 : PQgetvalue(res, rowno, i_objname));
1438 : }
1439 :
1440 : /*
1441 : * check_for_incompatible_polymorphics()
1442 : *
1443 : * Make sure nothing is using old polymorphic functions with
1444 : * anyarray/anyelement rather than the new anycompatible variants.
1445 : */
1446 : static void
1447 0 : check_for_incompatible_polymorphics(ClusterInfo *cluster)
1448 : {
1449 : PQExpBufferData old_polymorphics;
1450 0 : UpgradeTask *task = upgrade_task_create();
1451 : UpgradeTaskReport report;
1452 : char *query;
1453 :
1454 0 : prep_status("Checking for incompatible polymorphic functions");
1455 :
1456 0 : report.file = NULL;
1457 0 : snprintf(report.path, sizeof(report.path), "%s/%s",
1458 : log_opts.basedir,
1459 : "incompatible_polymorphics.txt");
1460 :
1461 : /* The set of problematic functions varies a bit in different versions */
1462 0 : initPQExpBuffer(&old_polymorphics);
1463 :
1464 0 : appendPQExpBufferStr(&old_polymorphics,
1465 : "'array_append(anyarray,anyelement)'"
1466 : ", 'array_cat(anyarray,anyarray)'"
1467 : ", 'array_prepend(anyelement,anyarray)'");
1468 :
1469 0 : if (GET_MAJOR_VERSION(cluster->major_version) >= 903)
1470 0 : appendPQExpBufferStr(&old_polymorphics,
1471 : ", 'array_remove(anyarray,anyelement)'"
1472 : ", 'array_replace(anyarray,anyelement,anyelement)'");
1473 :
1474 0 : if (GET_MAJOR_VERSION(cluster->major_version) >= 905)
1475 0 : appendPQExpBufferStr(&old_polymorphics,
1476 : ", 'array_position(anyarray,anyelement)'"
1477 : ", 'array_position(anyarray,anyelement,integer)'"
1478 : ", 'array_positions(anyarray,anyelement)'"
1479 : ", 'width_bucket(anyelement,anyarray)'");
1480 :
1481 : /*
1482 : * The query below hardcodes FirstNormalObjectId as 16384 rather than
1483 : * interpolating that C #define into the query because, if that #define is
1484 : * ever changed, the cutoff we want to use is the value used by
1485 : * pre-version 14 servers, not that of some future version.
1486 : */
1487 :
1488 : /* Aggregate transition functions */
1489 0 : query = psprintf("SELECT 'aggregate' AS objkind, p.oid::regprocedure::text AS objname "
1490 : "FROM pg_proc AS p "
1491 : "JOIN pg_aggregate AS a ON a.aggfnoid=p.oid "
1492 : "JOIN pg_proc AS transfn ON transfn.oid=a.aggtransfn "
1493 : "WHERE p.oid >= 16384 "
1494 : "AND a.aggtransfn = ANY(ARRAY[%s]::regprocedure[]) "
1495 : "AND a.aggtranstype = ANY(ARRAY['anyarray', 'anyelement']::regtype[]) "
1496 :
1497 : /* Aggregate final functions */
1498 : "UNION ALL "
1499 : "SELECT 'aggregate' AS objkind, p.oid::regprocedure::text AS objname "
1500 : "FROM pg_proc AS p "
1501 : "JOIN pg_aggregate AS a ON a.aggfnoid=p.oid "
1502 : "JOIN pg_proc AS finalfn ON finalfn.oid=a.aggfinalfn "
1503 : "WHERE p.oid >= 16384 "
1504 : "AND a.aggfinalfn = ANY(ARRAY[%s]::regprocedure[]) "
1505 : "AND a.aggtranstype = ANY(ARRAY['anyarray', 'anyelement']::regtype[]) "
1506 :
1507 : /* Operators */
1508 : "UNION ALL "
1509 : "SELECT 'operator' AS objkind, op.oid::regoperator::text AS objname "
1510 : "FROM pg_operator AS op "
1511 : "WHERE op.oid >= 16384 "
1512 : "AND oprcode = ANY(ARRAY[%s]::regprocedure[]) "
1513 : "AND oprleft = ANY(ARRAY['anyarray', 'anyelement']::regtype[])",
1514 : old_polymorphics.data,
1515 : old_polymorphics.data,
1516 : old_polymorphics.data);
1517 :
1518 0 : upgrade_task_add_step(task, query, process_incompat_polymorphics,
1519 : true, &report);
1520 0 : upgrade_task_run(task, cluster);
1521 0 : upgrade_task_free(task);
1522 :
1523 0 : if (report.file)
1524 : {
1525 0 : fclose(report.file);
1526 0 : pg_log(PG_REPORT, "fatal");
1527 0 : pg_fatal("Your installation contains user-defined objects that refer to internal\n"
1528 : "polymorphic functions with arguments of type \"anyarray\" or \"anyelement\".\n"
1529 : "These user-defined objects must be dropped before upgrading and restored\n"
1530 : "afterwards, changing them to refer to the new corresponding functions with\n"
1531 : "arguments of type \"anycompatiblearray\" and \"anycompatible\".\n"
1532 : "A list of the problematic objects is in the file:\n"
1533 : " %s", report.path);
1534 : }
1535 : else
1536 0 : check_ok();
1537 :
1538 0 : termPQExpBuffer(&old_polymorphics);
1539 0 : pg_free(query);
1540 0 : }
1541 :
1542 : /*
1543 : * Callback function for processing results of query for
1544 : * check_for_tables_with_oids()'s UpgradeTask. If the query returned any rows
1545 : * (i.e., the check failed), write the details to the report file.
1546 : */
1547 : static void
1548 0 : process_with_oids_check(DbInfo *dbinfo, PGresult *res, void *arg)
1549 : {
1550 0 : UpgradeTaskReport *report = (UpgradeTaskReport *) arg;
1551 0 : int ntups = PQntuples(res);
1552 0 : int i_nspname = PQfnumber(res, "nspname");
1553 0 : int i_relname = PQfnumber(res, "relname");
1554 :
1555 : AssertVariableIsOfType(&process_with_oids_check, UpgradeTaskProcessCB);
1556 :
1557 0 : if (ntups == 0)
1558 0 : return;
1559 :
1560 0 : if (report->file == NULL &&
1561 0 : (report->file = fopen_priv(report->path, "w")) == NULL)
1562 0 : pg_fatal("could not open file \"%s\": %m", report->path);
1563 :
1564 0 : fprintf(report->file, "In database: %s\n", dbinfo->db_name);
1565 :
1566 0 : for (int rowno = 0; rowno < ntups; rowno++)
1567 0 : fprintf(report->file, " %s.%s\n",
1568 : PQgetvalue(res, rowno, i_nspname),
1569 : PQgetvalue(res, rowno, i_relname));
1570 : }
1571 :
1572 : /*
1573 : * Verify that no tables are declared WITH OIDS.
1574 : */
1575 : static void
1576 0 : check_for_tables_with_oids(ClusterInfo *cluster)
1577 : {
1578 : UpgradeTaskReport report;
1579 0 : UpgradeTask *task = upgrade_task_create();
1580 0 : const char *query = "SELECT n.nspname, c.relname "
1581 : "FROM pg_catalog.pg_class c, "
1582 : " pg_catalog.pg_namespace n "
1583 : "WHERE c.relnamespace = n.oid AND "
1584 : " c.relhasoids AND"
1585 : " n.nspname NOT IN ('pg_catalog')";
1586 :
1587 0 : prep_status("Checking for tables WITH OIDS");
1588 :
1589 0 : report.file = NULL;
1590 0 : snprintf(report.path, sizeof(report.path), "%s/%s",
1591 : log_opts.basedir,
1592 : "tables_with_oids.txt");
1593 :
1594 0 : upgrade_task_add_step(task, query, process_with_oids_check,
1595 : true, &report);
1596 0 : upgrade_task_run(task, cluster);
1597 0 : upgrade_task_free(task);
1598 :
1599 0 : if (report.file)
1600 : {
1601 0 : fclose(report.file);
1602 0 : pg_log(PG_REPORT, "fatal");
1603 0 : pg_fatal("Your installation contains tables declared WITH OIDS, which is not\n"
1604 : "supported anymore. Consider removing the oid column using\n"
1605 : " ALTER TABLE ... SET WITHOUT OIDS;\n"
1606 : "A list of tables with the problem is in the file:\n"
1607 : " %s", report.path);
1608 : }
1609 : else
1610 0 : check_ok();
1611 0 : }
1612 :
1613 :
1614 : /*
1615 : * check_for_pg_role_prefix()
1616 : *
1617 : * Versions older than 9.6 should not have any pg_* roles
1618 : */
1619 : static void
1620 0 : check_for_pg_role_prefix(ClusterInfo *cluster)
1621 : {
1622 : PGresult *res;
1623 0 : PGconn *conn = connectToServer(cluster, "template1");
1624 : int ntups;
1625 : int i_roloid;
1626 : int i_rolname;
1627 0 : FILE *script = NULL;
1628 : char output_path[MAXPGPATH];
1629 :
1630 0 : prep_status("Checking for roles starting with \"pg_\"");
1631 :
1632 0 : snprintf(output_path, sizeof(output_path), "%s/%s",
1633 : log_opts.basedir,
1634 : "pg_role_prefix.txt");
1635 :
1636 0 : res = executeQueryOrDie(conn,
1637 : "SELECT oid AS roloid, rolname "
1638 : "FROM pg_catalog.pg_roles "
1639 : "WHERE rolname ~ '^pg_'");
1640 :
1641 0 : ntups = PQntuples(res);
1642 0 : i_roloid = PQfnumber(res, "roloid");
1643 0 : i_rolname = PQfnumber(res, "rolname");
1644 0 : for (int rowno = 0; rowno < ntups; rowno++)
1645 : {
1646 0 : if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
1647 0 : pg_fatal("could not open file \"%s\": %m", output_path);
1648 0 : fprintf(script, "%s (oid=%s)\n",
1649 : PQgetvalue(res, rowno, i_rolname),
1650 : PQgetvalue(res, rowno, i_roloid));
1651 : }
1652 :
1653 0 : PQclear(res);
1654 :
1655 0 : PQfinish(conn);
1656 :
1657 0 : if (script)
1658 : {
1659 0 : fclose(script);
1660 0 : pg_log(PG_REPORT, "fatal");
1661 0 : pg_fatal("Your installation contains roles starting with \"pg_\".\n"
1662 : "\"pg_\" is a reserved prefix for system roles. The cluster\n"
1663 : "cannot be upgraded until these roles are renamed.\n"
1664 : "A list of roles starting with \"pg_\" is in the file:\n"
1665 : " %s", output_path);
1666 : }
1667 : else
1668 0 : check_ok();
1669 0 : }
1670 :
1671 : /*
1672 : * Callback function for processing results of query for
1673 : * check_for_user_defined_encoding_conversions()'s UpgradeTask. If the query
1674 : * returned any rows (i.e., the check failed), write the details to the report
1675 : * file.
1676 : */
1677 : static void
1678 0 : process_user_defined_encoding_conversions(DbInfo *dbinfo, PGresult *res, void *arg)
1679 : {
1680 0 : UpgradeTaskReport *report = (UpgradeTaskReport *) arg;
1681 0 : int ntups = PQntuples(res);
1682 0 : int i_conoid = PQfnumber(res, "conoid");
1683 0 : int i_conname = PQfnumber(res, "conname");
1684 0 : int i_nspname = PQfnumber(res, "nspname");
1685 :
1686 : AssertVariableIsOfType(&process_user_defined_encoding_conversions,
1687 : UpgradeTaskProcessCB);
1688 :
1689 0 : if (ntups == 0)
1690 0 : return;
1691 :
1692 0 : if (report->file == NULL &&
1693 0 : (report->file = fopen_priv(report->path, "w")) == NULL)
1694 0 : pg_fatal("could not open file \"%s\": %m", report->path);
1695 :
1696 0 : fprintf(report->file, "In database: %s\n", dbinfo->db_name);
1697 :
1698 0 : for (int rowno = 0; rowno < ntups; rowno++)
1699 0 : fprintf(report->file, " (oid=%s) %s.%s\n",
1700 : PQgetvalue(res, rowno, i_conoid),
1701 : PQgetvalue(res, rowno, i_nspname),
1702 : PQgetvalue(res, rowno, i_conname));
1703 : }
1704 :
1705 : /*
1706 : * Verify that no user-defined encoding conversions exist.
1707 : */
1708 : static void
1709 0 : check_for_user_defined_encoding_conversions(ClusterInfo *cluster)
1710 : {
1711 : UpgradeTaskReport report;
1712 0 : UpgradeTask *task = upgrade_task_create();
1713 : const char *query;
1714 :
1715 0 : prep_status("Checking for user-defined encoding conversions");
1716 :
1717 0 : report.file = NULL;
1718 0 : snprintf(report.path, sizeof(report.path), "%s/%s",
1719 : log_opts.basedir,
1720 : "encoding_conversions.txt");
1721 :
1722 : /*
1723 : * The query below hardcodes FirstNormalObjectId as 16384 rather than
1724 : * interpolating that C #define into the query because, if that #define is
1725 : * ever changed, the cutoff we want to use is the value used by
1726 : * pre-version 14 servers, not that of some future version.
1727 : */
1728 0 : query = "SELECT c.oid as conoid, c.conname, n.nspname "
1729 : "FROM pg_catalog.pg_conversion c, "
1730 : " pg_catalog.pg_namespace n "
1731 : "WHERE c.connamespace = n.oid AND "
1732 : " c.oid >= 16384";
1733 :
1734 0 : upgrade_task_add_step(task, query,
1735 : process_user_defined_encoding_conversions,
1736 : true, &report);
1737 0 : upgrade_task_run(task, cluster);
1738 0 : upgrade_task_free(task);
1739 :
1740 0 : if (report.file)
1741 : {
1742 0 : fclose(report.file);
1743 0 : pg_log(PG_REPORT, "fatal");
1744 0 : pg_fatal("Your installation contains user-defined encoding conversions.\n"
1745 : "The conversion function parameters changed in PostgreSQL version 14\n"
1746 : "so this cluster cannot currently be upgraded. You can remove the\n"
1747 : "encoding conversions in the old cluster and restart the upgrade.\n"
1748 : "A list of user-defined encoding conversions is in the file:\n"
1749 : " %s", report.path);
1750 : }
1751 : else
1752 0 : check_ok();
1753 0 : }
1754 :
1755 : /*
1756 : * check_new_cluster_logical_replication_slots()
1757 : *
1758 : * Verify that there are no logical replication slots on the new cluster and
1759 : * that the parameter settings necessary for creating slots are sufficient.
1760 : */
1761 : static void
1762 14 : check_new_cluster_logical_replication_slots(void)
1763 : {
1764 : PGresult *res;
1765 : PGconn *conn;
1766 : int nslots_on_old;
1767 : int nslots_on_new;
1768 : int max_replication_slots;
1769 : char *wal_level;
1770 :
1771 : /* Logical slots can be migrated since PG17. */
1772 14 : if (GET_MAJOR_VERSION(old_cluster.major_version) <= 1600)
1773 0 : return;
1774 :
1775 14 : nslots_on_old = count_old_cluster_logical_slots();
1776 :
1777 : /* Quick return if there are no logical slots to be migrated. */
1778 14 : if (nslots_on_old == 0)
1779 10 : return;
1780 :
1781 4 : conn = connectToServer(&new_cluster, "template1");
1782 :
1783 4 : prep_status("Checking for new cluster logical replication slots");
1784 :
1785 4 : res = executeQueryOrDie(conn, "SELECT count(*) "
1786 : "FROM pg_catalog.pg_replication_slots "
1787 : "WHERE slot_type = 'logical' AND "
1788 : "temporary IS FALSE;");
1789 :
1790 4 : if (PQntuples(res) != 1)
1791 0 : pg_fatal("could not count the number of logical replication slots");
1792 :
1793 4 : nslots_on_new = atoi(PQgetvalue(res, 0, 0));
1794 :
1795 4 : if (nslots_on_new)
1796 0 : pg_fatal("expected 0 logical replication slots but found %d",
1797 : nslots_on_new);
1798 :
1799 4 : PQclear(res);
1800 :
1801 4 : res = executeQueryOrDie(conn, "SELECT setting FROM pg_settings "
1802 : "WHERE name IN ('wal_level', 'max_replication_slots') "
1803 : "ORDER BY name DESC;");
1804 :
1805 4 : if (PQntuples(res) != 2)
1806 0 : pg_fatal("could not determine parameter settings on new cluster");
1807 :
1808 4 : wal_level = PQgetvalue(res, 0, 0);
1809 :
1810 4 : if (strcmp(wal_level, "logical") != 0)
1811 0 : pg_fatal("\"wal_level\" must be \"logical\" but is set to \"%s\"",
1812 : wal_level);
1813 :
1814 4 : max_replication_slots = atoi(PQgetvalue(res, 1, 0));
1815 :
1816 4 : if (nslots_on_old > max_replication_slots)
1817 2 : pg_fatal("\"max_replication_slots\" (%d) must be greater than or equal to the number of "
1818 : "logical replication slots (%d) on the old cluster",
1819 : max_replication_slots, nslots_on_old);
1820 :
1821 2 : PQclear(res);
1822 2 : PQfinish(conn);
1823 :
1824 2 : check_ok();
1825 : }
1826 :
1827 : /*
1828 : * check_new_cluster_subscription_configuration()
1829 : *
1830 : * Verify that the max_replication_slots configuration specified is enough for
1831 : * creating the subscriptions. This is required to create the replication
1832 : * origin for each subscription.
1833 : */
1834 : static void
1835 12 : check_new_cluster_subscription_configuration(void)
1836 : {
1837 : PGresult *res;
1838 : PGconn *conn;
1839 : int max_replication_slots;
1840 :
1841 : /* Subscriptions and their dependencies can be migrated since PG17. */
1842 12 : if (GET_MAJOR_VERSION(old_cluster.major_version) < 1700)
1843 0 : return;
1844 :
1845 : /* Quick return if there are no subscriptions to be migrated. */
1846 12 : if (old_cluster.nsubs == 0)
1847 8 : return;
1848 :
1849 4 : prep_status("Checking for new cluster configuration for subscriptions");
1850 :
1851 4 : conn = connectToServer(&new_cluster, "template1");
1852 :
1853 4 : res = executeQueryOrDie(conn, "SELECT setting FROM pg_settings "
1854 : "WHERE name = 'max_replication_slots';");
1855 :
1856 4 : if (PQntuples(res) != 1)
1857 0 : pg_fatal("could not determine parameter settings on new cluster");
1858 :
1859 4 : max_replication_slots = atoi(PQgetvalue(res, 0, 0));
1860 4 : if (old_cluster.nsubs > max_replication_slots)
1861 2 : pg_fatal("\"max_replication_slots\" (%d) must be greater than or equal to the number of "
1862 : "subscriptions (%d) on the old cluster",
1863 : max_replication_slots, old_cluster.nsubs);
1864 :
1865 2 : PQclear(res);
1866 2 : PQfinish(conn);
1867 :
1868 2 : check_ok();
1869 : }
1870 :
1871 : /*
1872 : * check_old_cluster_for_valid_slots()
1873 : *
1874 : * Verify that all the logical slots are valid and have consumed all the WAL
1875 : * before shutdown.
1876 : */
1877 : static void
1878 18 : check_old_cluster_for_valid_slots(void)
1879 : {
1880 : char output_path[MAXPGPATH];
1881 18 : FILE *script = NULL;
1882 :
1883 18 : prep_status("Checking for valid logical replication slots");
1884 :
1885 18 : snprintf(output_path, sizeof(output_path), "%s/%s",
1886 : log_opts.basedir,
1887 : "invalid_logical_slots.txt");
1888 :
1889 70 : for (int dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
1890 : {
1891 52 : LogicalSlotInfoArr *slot_arr = &old_cluster.dbarr.dbs[dbnum].slot_arr;
1892 :
1893 62 : for (int slotnum = 0; slotnum < slot_arr->nslots; slotnum++)
1894 : {
1895 10 : LogicalSlotInfo *slot = &slot_arr->slots[slotnum];
1896 :
1897 : /* Is the slot usable? */
1898 10 : if (slot->invalid)
1899 : {
1900 0 : if (script == NULL &&
1901 0 : (script = fopen_priv(output_path, "w")) == NULL)
1902 0 : pg_fatal("could not open file \"%s\": %m", output_path);
1903 :
1904 0 : fprintf(script, "The slot \"%s\" is invalid\n",
1905 : slot->slotname);
1906 :
1907 0 : continue;
1908 : }
1909 :
1910 : /*
1911 : * Do additional check to ensure that all logical replication
1912 : * slots have consumed all the WAL before shutdown.
1913 : *
1914 : * Note: This can be satisfied only when the old cluster has been
1915 : * shut down, so we skip this for live checks.
1916 : */
1917 10 : if (!user_opts.live_check && !slot->caught_up)
1918 : {
1919 6 : if (script == NULL &&
1920 2 : (script = fopen_priv(output_path, "w")) == NULL)
1921 0 : pg_fatal("could not open file \"%s\": %m", output_path);
1922 :
1923 4 : fprintf(script,
1924 : "The slot \"%s\" has not consumed the WAL yet\n",
1925 : slot->slotname);
1926 : }
1927 : }
1928 : }
1929 :
1930 18 : if (script)
1931 : {
1932 2 : fclose(script);
1933 :
1934 2 : pg_log(PG_REPORT, "fatal");
1935 2 : pg_fatal("Your installation contains logical replication slots that cannot be upgraded.\n"
1936 : "You can remove invalid slots and/or consume the pending WAL for other slots,\n"
1937 : "and then restart the upgrade.\n"
1938 : "A list of the problematic slots is in the file:\n"
1939 : " %s", output_path);
1940 : }
1941 :
1942 16 : check_ok();
1943 16 : }
1944 :
1945 : /*
1946 : * Callback function for processing results of query for
1947 : * check_old_cluster_subscription_state()'s UpgradeTask. If the query returned
1948 : * any rows (i.e., the check failed), write the details to the report file.
1949 : */
1950 : static void
1951 48 : process_old_sub_state_check(DbInfo *dbinfo, PGresult *res, void *arg)
1952 : {
1953 48 : UpgradeTaskReport *report = (UpgradeTaskReport *) arg;
1954 48 : int ntups = PQntuples(res);
1955 48 : int i_srsubstate = PQfnumber(res, "srsubstate");
1956 48 : int i_subname = PQfnumber(res, "subname");
1957 48 : int i_nspname = PQfnumber(res, "nspname");
1958 48 : int i_relname = PQfnumber(res, "relname");
1959 :
1960 : AssertVariableIsOfType(&process_old_sub_state_check, UpgradeTaskProcessCB);
1961 :
1962 48 : if (ntups == 0)
1963 46 : return;
1964 :
1965 2 : if (report->file == NULL &&
1966 0 : (report->file = fopen_priv(report->path, "w")) == NULL)
1967 0 : pg_fatal("could not open file \"%s\": %m", report->path);
1968 :
1969 4 : for (int i = 0; i < ntups; i++)
1970 2 : fprintf(report->file, "The table sync state \"%s\" is not allowed for database:\"%s\" subscription:\"%s\" schema:\"%s\" relation:\"%s\"\n",
1971 : PQgetvalue(res, i, i_srsubstate),
1972 : dbinfo->db_name,
1973 : PQgetvalue(res, i, i_subname),
1974 : PQgetvalue(res, i, i_nspname),
1975 : PQgetvalue(res, i, i_relname));
1976 : }
1977 :
1978 : /*
1979 : * check_old_cluster_subscription_state()
1980 : *
1981 : * Verify that the replication origin corresponding to each of the
1982 : * subscriptions are present and each of the subscribed tables is in
1983 : * 'i' (initialize) or 'r' (ready) state.
1984 : */
1985 : static void
1986 16 : check_old_cluster_subscription_state(void)
1987 : {
1988 16 : UpgradeTask *task = upgrade_task_create();
1989 : UpgradeTaskReport report;
1990 : const char *query;
1991 : PGresult *res;
1992 : PGconn *conn;
1993 : int ntup;
1994 :
1995 16 : prep_status("Checking for subscription state");
1996 :
1997 16 : report.file = NULL;
1998 16 : snprintf(report.path, sizeof(report.path), "%s/%s",
1999 : log_opts.basedir,
2000 : "subs_invalid.txt");
2001 :
2002 : /*
2003 : * Check that all the subscriptions have their respective replication
2004 : * origin. This check only needs to run once.
2005 : */
2006 16 : conn = connectToServer(&old_cluster, old_cluster.dbarr.dbs[0].db_name);
2007 16 : res = executeQueryOrDie(conn,
2008 : "SELECT d.datname, s.subname "
2009 : "FROM pg_catalog.pg_subscription s "
2010 : "LEFT OUTER JOIN pg_catalog.pg_replication_origin o "
2011 : " ON o.roname = 'pg_' || s.oid "
2012 : "INNER JOIN pg_catalog.pg_database d "
2013 : " ON d.oid = s.subdbid "
2014 : "WHERE o.roname IS NULL;");
2015 16 : ntup = PQntuples(res);
2016 18 : for (int i = 0; i < ntup; i++)
2017 : {
2018 2 : if (report.file == NULL &&
2019 2 : (report.file = fopen_priv(report.path, "w")) == NULL)
2020 0 : pg_fatal("could not open file \"%s\": %m", report.path);
2021 2 : fprintf(report.file, "The replication origin is missing for database:\"%s\" subscription:\"%s\"\n",
2022 : PQgetvalue(res, i, 0),
2023 : PQgetvalue(res, i, 1));
2024 : }
2025 16 : PQclear(res);
2026 16 : PQfinish(conn);
2027 :
2028 : /*
2029 : * We don't allow upgrade if there is a risk of dangling slot or origin
2030 : * corresponding to initial sync after upgrade.
2031 : *
2032 : * A slot/origin not created yet refers to the 'i' (initialize) state,
2033 : * while 'r' (ready) state refers to a slot/origin created previously but
2034 : * already dropped. These states are supported for pg_upgrade. The other
2035 : * states listed below are not supported:
2036 : *
2037 : * a) SUBREL_STATE_DATASYNC: A relation upgraded while in this state would
2038 : * retain a replication slot, which could not be dropped by the sync
2039 : * worker spawned after the upgrade because the subscription ID used for
2040 : * the slot name won't match anymore.
2041 : *
2042 : * b) SUBREL_STATE_SYNCDONE: A relation upgraded while in this state would
2043 : * retain the replication origin when there is a failure in tablesync
2044 : * worker immediately after dropping the replication slot in the
2045 : * publisher.
2046 : *
2047 : * c) SUBREL_STATE_FINISHEDCOPY: A tablesync worker spawned to work on a
2048 : * relation upgraded while in this state would expect an origin ID with
2049 : * the OID of the subscription used before the upgrade, causing it to
2050 : * fail.
2051 : *
2052 : * d) SUBREL_STATE_SYNCWAIT, SUBREL_STATE_CATCHUP and
2053 : * SUBREL_STATE_UNKNOWN: These states are not stored in the catalog, so we
2054 : * need not allow these states.
2055 : */
2056 16 : query = "SELECT r.srsubstate, s.subname, n.nspname, c.relname "
2057 : "FROM pg_catalog.pg_subscription_rel r "
2058 : "LEFT JOIN pg_catalog.pg_subscription s"
2059 : " ON r.srsubid = s.oid "
2060 : "LEFT JOIN pg_catalog.pg_class c"
2061 : " ON r.srrelid = c.oid "
2062 : "LEFT JOIN pg_catalog.pg_namespace n"
2063 : " ON c.relnamespace = n.oid "
2064 : "WHERE r.srsubstate NOT IN ('i', 'r') "
2065 : "ORDER BY s.subname";
2066 :
2067 16 : upgrade_task_add_step(task, query, process_old_sub_state_check,
2068 : true, &report);
2069 :
2070 16 : upgrade_task_run(task, &old_cluster);
2071 16 : upgrade_task_free(task);
2072 :
2073 16 : if (report.file)
2074 : {
2075 2 : fclose(report.file);
2076 2 : pg_log(PG_REPORT, "fatal");
2077 2 : pg_fatal("Your installation contains subscriptions without origin or having relations not in i (initialize) or r (ready) state.\n"
2078 : "You can allow the initial sync to finish for all relations and then restart the upgrade.\n"
2079 : "A list of the problematic subscriptions is in the file:\n"
2080 : " %s", report.path);
2081 : }
2082 : else
2083 14 : check_ok();
2084 14 : }
|