Line data Source code
1 : /*
2 : * check.c
3 : *
4 : * server checks and output routines
5 : *
6 : * Copyright (c) 2010-2025, PostgreSQL Global Development Group
7 : * src/bin/pg_upgrade/check.c
8 : */
9 :
10 : #include "postgres_fe.h"
11 :
12 : #include "catalog/pg_authid_d.h"
13 : #include "catalog/pg_class_d.h"
14 : #include "fe_utils/string_utils.h"
15 : #include "pg_upgrade.h"
16 : #include "common/unicode_version.h"
17 :
18 : static void check_new_cluster_is_empty(void);
19 : static void check_is_install_user(ClusterInfo *cluster);
20 : static void check_for_connection_status(ClusterInfo *cluster);
21 : static void check_for_prepared_transactions(ClusterInfo *cluster);
22 : static void check_for_isn_and_int8_passing_mismatch(ClusterInfo *cluster);
23 : static void check_for_user_defined_postfix_ops(ClusterInfo *cluster);
24 : static void check_for_incompatible_polymorphics(ClusterInfo *cluster);
25 : static void check_for_tables_with_oids(ClusterInfo *cluster);
26 : static void check_for_pg_role_prefix(ClusterInfo *cluster);
27 : static void check_for_new_tablespace_dir(void);
28 : static void check_for_user_defined_encoding_conversions(ClusterInfo *cluster);
29 : static void check_for_unicode_update(ClusterInfo *cluster);
30 : static void check_new_cluster_logical_replication_slots(void);
31 : static void check_new_cluster_subscription_configuration(void);
32 : static void check_old_cluster_for_valid_slots(void);
33 : static void check_old_cluster_subscription_state(void);
34 :
35 : /*
36 : * DataTypesUsageChecks - definitions of data type checks for the old cluster
37 : * in order to determine if an upgrade can be performed. See the comment on
38 : * data_types_usage_checks below for a more detailed description.
39 : */
40 : typedef struct
41 : {
42 : /* Status line to print to the user */
43 : const char *status;
44 : /* Filename to store report to */
45 : const char *report_filename;
46 : /* Query to extract the oid of the datatype */
47 : const char *base_query;
48 : /* Text to store to report in case of error */
49 : const char *report_text;
50 : /* The latest version where the check applies */
51 : int threshold_version;
52 : /* A function pointer for determining if the check applies */
53 : DataTypesUsageVersionCheck version_hook;
54 : } DataTypesUsageChecks;
55 :
56 : /*
57 : * Special values for threshold_version for indicating that a check applies to
58 : * all versions, or that a custom function needs to be invoked to determine
59 : * if the check applies.
60 : */
61 : #define MANUAL_CHECK 1
62 : #define ALL_VERSIONS -1
63 :
64 : /*--
65 : * Data type usage checks. Each check for problematic data type usage is
66 : * defined in this array with metadata, SQL query for finding the data type
67 : * and functionality for deciding if the check is applicable to the version
68 : * of the old cluster. The struct members are described in detail below:
69 : *
70 : * status A oneline string which can be printed to the user to
71 : * inform about progress. Should not end with newline.
72 : * report_filename The filename in which the list of problems detected by
73 : * the check will be printed.
74 : * base_query A query which extracts the Oid of the datatype checked
75 : * for.
76 : * report_text The text which will be printed to the user to explain
77 : * what the check did, and why it failed. The text should
78 : * end with a newline, and does not need to refer to the
79 : * report_filename as that is automatically appended to
80 : * the report with the path to the log folder.
81 : * threshold_version The major version of PostgreSQL for which to run the
82 : * check. Iff the old cluster is less than, or equal to,
83 : * the threshold version then the check will be executed.
84 : * If the old version is greater than the threshold then
85 : * the check is skipped. If the threshold_version is set
86 : * to ALL_VERSIONS then it will be run unconditionally,
87 : * if set to MANUAL_CHECK then the version_hook function
88 : * will be executed in order to determine whether or not
89 : * to run.
90 : * version_hook A function pointer to a version check function of type
91 : * DataTypesUsageVersionCheck which is used to determine
92 : * if the check is applicable to the old cluster. If the
93 : * version_hook returns true then the check will be run,
94 : * else it will be skipped. The function will only be
95 : * executed iff threshold_version is set to MANUAL_CHECK.
96 : */
97 : static DataTypesUsageChecks data_types_usage_checks[] =
98 : {
99 : /*
100 : * Look for composite types that were made during initdb *or* belong to
101 : * information_schema; that's important in case information_schema was
102 : * dropped and reloaded.
103 : *
104 : * The cutoff OID here should match the source cluster's value of
105 : * FirstNormalObjectId. We hardcode it rather than using that C #define
106 : * because, if that #define is ever changed, our own version's value is
107 : * NOT what to use. Eventually we may need a test on the source cluster's
108 : * version to select the correct value.
109 : */
110 : {
111 : .status = gettext_noop("Checking for system-defined composite types in user tables"),
112 : .report_filename = "tables_using_composite.txt",
113 : .base_query =
114 : "SELECT t.oid FROM pg_catalog.pg_type t "
115 : "LEFT JOIN pg_catalog.pg_namespace n ON t.typnamespace = n.oid "
116 : " WHERE typtype = 'c' AND (t.oid < 16384 OR nspname = 'information_schema')",
117 : .report_text =
118 : gettext_noop("Your installation contains system-defined composite types in user tables.\n"
119 : "These type OIDs are not stable across PostgreSQL versions,\n"
120 : "so this cluster cannot currently be upgraded. You can drop the\n"
121 : "problem columns and restart the upgrade.\n"),
122 : .threshold_version = ALL_VERSIONS
123 : },
124 :
125 : /*
126 : * 9.3 -> 9.4 Fully implement the 'line' data type in 9.4, which
127 : * previously returned "not enabled" by default and was only functionally
128 : * enabled with a compile-time switch; as of 9.4 "line" has a different
129 : * on-disk representation format.
130 : */
131 : {
132 : .status = gettext_noop("Checking for incompatible \"line\" data type"),
133 : .report_filename = "tables_using_line.txt",
134 : .base_query =
135 : "SELECT 'pg_catalog.line'::pg_catalog.regtype AS oid",
136 : .report_text =
137 : gettext_noop("Your installation contains the \"line\" data type in user tables.\n"
138 : "This data type changed its internal and input/output format\n"
139 : "between your old and new versions so this\n"
140 : "cluster cannot currently be upgraded. You can\n"
141 : "drop the problem columns and restart the upgrade.\n"),
142 : .threshold_version = 903
143 : },
144 :
145 : /*
146 : * pg_upgrade only preserves these system values: pg_class.oid pg_type.oid
147 : * pg_enum.oid
148 : *
149 : * Many of the reg* data types reference system catalog info that is not
150 : * preserved, and hence these data types cannot be used in user tables
151 : * upgraded by pg_upgrade.
152 : */
153 : {
154 : .status = gettext_noop("Checking for reg* data types in user tables"),
155 : .report_filename = "tables_using_reg.txt",
156 :
157 : /*
158 : * Note: older servers will not have all of these reg* types, so we
159 : * have to write the query like this rather than depending on casts to
160 : * regtype.
161 : */
162 : .base_query =
163 : "SELECT oid FROM pg_catalog.pg_type t "
164 : "WHERE t.typnamespace = "
165 : " (SELECT oid FROM pg_catalog.pg_namespace "
166 : " WHERE nspname = 'pg_catalog') "
167 : " AND t.typname IN ( "
168 : /* pg_class.oid is preserved, so 'regclass' is OK */
169 : " 'regcollation', "
170 : " 'regconfig', "
171 : /* pg_database.oid is preserved, so 'regdatabase' is OK */
172 : " 'regdictionary', "
173 : " 'regnamespace', "
174 : " 'regoper', "
175 : " 'regoperator', "
176 : " 'regproc', "
177 : " 'regprocedure' "
178 : /* pg_authid.oid is preserved, so 'regrole' is OK */
179 : /* pg_type.oid is (mostly) preserved, so 'regtype' is OK */
180 : " )",
181 : .report_text =
182 : gettext_noop("Your installation contains one of the reg* data types in user tables.\n"
183 : "These data types reference system OIDs that are not preserved by\n"
184 : "pg_upgrade, so this cluster cannot currently be upgraded. You can\n"
185 : "drop the problem columns and restart the upgrade.\n"),
186 : .threshold_version = ALL_VERSIONS
187 : },
188 :
189 : /*
190 : * PG 16 increased the size of the 'aclitem' type, which breaks the
191 : * on-disk format for existing data.
192 : */
193 : {
194 : .status = gettext_noop("Checking for incompatible \"aclitem\" data type"),
195 : .report_filename = "tables_using_aclitem.txt",
196 : .base_query =
197 : "SELECT 'pg_catalog.aclitem'::pg_catalog.regtype AS oid",
198 : .report_text =
199 : gettext_noop("Your installation contains the \"aclitem\" data type in user tables.\n"
200 : "The internal format of \"aclitem\" changed in PostgreSQL version 16\n"
201 : "so this cluster cannot currently be upgraded. You can drop the\n"
202 : "problem columns and restart the upgrade.\n"),
203 : .threshold_version = 1500
204 : },
205 :
206 : /*
207 : * It's no longer allowed to create tables or views with "unknown"-type
208 : * columns. We do not complain about views with such columns, because
209 : * they should get silently converted to "text" columns during the DDL
210 : * dump and reload; it seems unlikely to be worth making users do that by
211 : * hand. However, if there's a table with such a column, the DDL reload
212 : * will fail, so we should pre-detect that rather than failing
213 : * mid-upgrade. Worse, if there's a matview with such a column, the DDL
214 : * reload will silently change it to "text" which won't match the on-disk
215 : * storage (which is like "cstring"). So we *must* reject that.
216 : */
217 : {
218 : .status = gettext_noop("Checking for invalid \"unknown\" user columns"),
219 : .report_filename = "tables_using_unknown.txt",
220 : .base_query =
221 : "SELECT 'pg_catalog.unknown'::pg_catalog.regtype AS oid",
222 : .report_text =
223 : gettext_noop("Your installation contains the \"unknown\" data type in user tables.\n"
224 : "This data type is no longer allowed in tables, so this cluster\n"
225 : "cannot currently be upgraded. You can drop the problem columns\n"
226 : "and restart the upgrade.\n"),
227 : .threshold_version = 906
228 : },
229 :
230 : /*
231 : * PG 12 changed the 'sql_identifier' type storage to be based on name,
232 : * not varchar, which breaks on-disk format for existing data. So we need
233 : * to prevent upgrade when used in user objects (tables, indexes, ...). In
234 : * 12, the sql_identifier data type was switched from name to varchar,
235 : * which does affect the storage (name is by-ref, but not varlena). This
236 : * means user tables using sql_identifier for columns are broken because
237 : * the on-disk format is different.
238 : */
239 : {
240 : .status = gettext_noop("Checking for invalid \"sql_identifier\" user columns"),
241 : .report_filename = "tables_using_sql_identifier.txt",
242 : .base_query =
243 : "SELECT 'information_schema.sql_identifier'::pg_catalog.regtype AS oid",
244 : .report_text =
245 : gettext_noop("Your installation contains the \"sql_identifier\" data type in user tables.\n"
246 : "The on-disk format for this data type has changed, so this\n"
247 : "cluster cannot currently be upgraded. You can drop the problem\n"
248 : "columns and restart the upgrade.\n"),
249 : .threshold_version = 1100
250 : },
251 :
252 : /*
253 : * JSONB changed its storage format during 9.4 beta, so check for it.
254 : */
255 : {
256 : .status = gettext_noop("Checking for incompatible \"jsonb\" data type in user tables"),
257 : .report_filename = "tables_using_jsonb.txt",
258 : .base_query =
259 : "SELECT 'pg_catalog.jsonb'::pg_catalog.regtype AS oid",
260 : .report_text =
261 : gettext_noop("Your installation contains the \"jsonb\" data type in user tables.\n"
262 : "The internal format of \"jsonb\" changed during 9.4 beta so this\n"
263 : "cluster cannot currently be upgraded. You can drop the problem \n"
264 : "columns and restart the upgrade.\n"),
265 : .threshold_version = MANUAL_CHECK,
266 : .version_hook = jsonb_9_4_check_applicable
267 : },
268 :
269 : /*
270 : * PG 12 removed types abstime, reltime, tinterval.
271 : */
272 : {
273 : .status = gettext_noop("Checking for removed \"abstime\" data type in user tables"),
274 : .report_filename = "tables_using_abstime.txt",
275 : .base_query =
276 : "SELECT 'pg_catalog.abstime'::pg_catalog.regtype AS oid",
277 : .report_text =
278 : gettext_noop("Your installation contains the \"abstime\" data type in user tables.\n"
279 : "The \"abstime\" type has been removed in PostgreSQL version 12,\n"
280 : "so this cluster cannot currently be upgraded. You can drop the\n"
281 : "problem columns, or change them to another data type, and restart\n"
282 : "the upgrade.\n"),
283 : .threshold_version = 1100
284 : },
285 : {
286 : .status = gettext_noop("Checking for removed \"reltime\" data type in user tables"),
287 : .report_filename = "tables_using_reltime.txt",
288 : .base_query =
289 : "SELECT 'pg_catalog.reltime'::pg_catalog.regtype AS oid",
290 : .report_text =
291 : gettext_noop("Your installation contains the \"reltime\" data type in user tables.\n"
292 : "The \"reltime\" type has been removed in PostgreSQL version 12,\n"
293 : "so this cluster cannot currently be upgraded. You can drop the\n"
294 : "problem columns, or change them to another data type, and restart\n"
295 : "the upgrade.\n"),
296 : .threshold_version = 1100
297 : },
298 : {
299 : .status = gettext_noop("Checking for removed \"tinterval\" data type in user tables"),
300 : .report_filename = "tables_using_tinterval.txt",
301 : .base_query =
302 : "SELECT 'pg_catalog.tinterval'::pg_catalog.regtype AS oid",
303 : .report_text =
304 : gettext_noop("Your installation contains the \"tinterval\" data type in user tables.\n"
305 : "The \"tinterval\" type has been removed in PostgreSQL version 12,\n"
306 : "so this cluster cannot currently be upgraded. You can drop the\n"
307 : "problem columns, or change them to another data type, and restart\n"
308 : "the upgrade.\n"),
309 : .threshold_version = 1100
310 : },
311 :
312 : /* End of checks marker, must remain last */
313 : {
314 : NULL, NULL, NULL, NULL, 0, NULL
315 : }
316 : };
317 :
318 : /*
319 : * Private state for check_for_data_types_usage()'s UpgradeTask.
320 : */
321 : struct data_type_check_state
322 : {
323 : DataTypesUsageChecks *check; /* the check for this step */
324 : bool result; /* true if check failed for any database */
325 : PQExpBuffer *report; /* buffer for report on failed checks */
326 : };
327 :
328 : /*
329 : * Returns a palloc'd query string for the data type check, for use by
330 : * check_for_data_types_usage()'s UpgradeTask.
331 : */
332 : static char *
333 48 : data_type_check_query(int checknum)
334 : {
335 48 : DataTypesUsageChecks *check = &data_types_usage_checks[checknum];
336 :
337 48 : return psprintf("WITH RECURSIVE oids AS ( "
338 : /* start with the type(s) returned by base_query */
339 : " %s "
340 : " UNION ALL "
341 : " SELECT * FROM ( "
342 : /* inner WITH because we can only reference the CTE once */
343 : " WITH x AS (SELECT oid FROM oids) "
344 : /* domains on any type selected so far */
345 : " SELECT t.oid FROM pg_catalog.pg_type t, x WHERE typbasetype = x.oid AND typtype = 'd' "
346 : " UNION ALL "
347 : /* arrays over any type selected so far */
348 : " SELECT t.oid FROM pg_catalog.pg_type t, x WHERE typelem = x.oid AND typtype = 'b' "
349 : " UNION ALL "
350 : /* composite types containing any type selected so far */
351 : " SELECT t.oid FROM pg_catalog.pg_type t, pg_catalog.pg_class c, pg_catalog.pg_attribute a, x "
352 : " WHERE t.typtype = 'c' AND "
353 : " t.oid = c.reltype AND "
354 : " c.oid = a.attrelid AND "
355 : " NOT a.attisdropped AND "
356 : " a.atttypid = x.oid "
357 : " UNION ALL "
358 : /* ranges containing any type selected so far */
359 : " SELECT t.oid FROM pg_catalog.pg_type t, pg_catalog.pg_range r, x "
360 : " WHERE t.typtype = 'r' AND r.rngtypid = t.oid AND r.rngsubtype = x.oid"
361 : " ) foo "
362 : ") "
363 : /* now look for stored columns of any such type */
364 : "SELECT n.nspname, c.relname, a.attname "
365 : "FROM pg_catalog.pg_class c, "
366 : " pg_catalog.pg_namespace n, "
367 : " pg_catalog.pg_attribute a "
368 : "WHERE c.oid = a.attrelid AND "
369 : " NOT a.attisdropped AND "
370 : " a.atttypid IN (SELECT oid FROM oids) AND "
371 : " c.relkind IN ("
372 : CppAsString2(RELKIND_RELATION) ", "
373 : CppAsString2(RELKIND_MATVIEW) ", "
374 : CppAsString2(RELKIND_INDEX) ") AND "
375 : " c.relnamespace = n.oid AND "
376 : /* exclude possible orphaned temp tables */
377 : " n.nspname !~ '^pg_temp_' AND "
378 : " n.nspname !~ '^pg_toast_temp_' AND "
379 : /* exclude system catalogs, too */
380 : " n.nspname NOT IN ('pg_catalog', 'information_schema')",
381 : check->base_query);
382 : }
383 :
384 : /*
385 : * Callback function for processing results of queries for
386 : * check_for_data_types_usage()'s UpgradeTask. If the query returned any rows
387 : * (i.e., the check failed), write the details to the report file.
388 : */
389 : static void
390 148 : process_data_type_check(DbInfo *dbinfo, PGresult *res, void *arg)
391 : {
392 148 : struct data_type_check_state *state = (struct data_type_check_state *) arg;
393 148 : int ntups = PQntuples(res);
394 : char output_path[MAXPGPATH];
395 148 : int i_nspname = PQfnumber(res, "nspname");
396 148 : int i_relname = PQfnumber(res, "relname");
397 148 : int i_attname = PQfnumber(res, "attname");
398 148 : FILE *script = NULL;
399 :
400 : AssertVariableIsOfType(&process_data_type_check, UpgradeTaskProcessCB);
401 :
402 148 : if (ntups == 0)
403 148 : return;
404 :
405 0 : snprintf(output_path, sizeof(output_path), "%s/%s",
406 : log_opts.basedir,
407 0 : state->check->report_filename);
408 :
409 : /*
410 : * Make sure we have a buffer to save reports to now that we found a first
411 : * failing check.
412 : */
413 0 : if (*state->report == NULL)
414 0 : *state->report = createPQExpBuffer();
415 :
416 : /*
417 : * If this is the first time we see an error for the check in question
418 : * then print a status message of the failure.
419 : */
420 0 : if (!state->result)
421 : {
422 0 : pg_log(PG_REPORT, "failed check: %s", _(state->check->status));
423 0 : appendPQExpBuffer(*state->report, "\n%s\n%s %s\n",
424 0 : _(state->check->report_text),
425 : _("A list of the problem columns is in the file:"),
426 : output_path);
427 : }
428 0 : state->result = true;
429 :
430 0 : if ((script = fopen_priv(output_path, "a")) == NULL)
431 0 : pg_fatal("could not open file \"%s\": %m", output_path);
432 :
433 0 : fprintf(script, "In database: %s\n", dbinfo->db_name);
434 :
435 0 : for (int rowno = 0; rowno < ntups; rowno++)
436 0 : fprintf(script, " %s.%s.%s\n",
437 : PQgetvalue(res, rowno, i_nspname),
438 : PQgetvalue(res, rowno, i_relname),
439 : PQgetvalue(res, rowno, i_attname));
440 :
441 0 : fclose(script);
442 : }
443 :
444 : /*
445 : * check_for_data_types_usage()
446 : * Detect whether there are any stored columns depending on given type(s)
447 : *
448 : * If so, write a report to the given file name and signal a failure to the
449 : * user.
450 : *
451 : * The checks to run are defined in a DataTypesUsageChecks structure where
452 : * each check has a metadata for explaining errors to the user, a base_query,
453 : * a report filename and a function pointer hook for validating if the check
454 : * should be executed given the cluster at hand.
455 : *
456 : * base_query should be a SELECT yielding a single column named "oid",
457 : * containing the pg_type OIDs of one or more types that are known to have
458 : * inconsistent on-disk representations across server versions.
459 : *
460 : * We check for the type(s) in tables, matviews, and indexes, but not views;
461 : * there's no storage involved in a view.
462 : */
463 : static void
464 24 : check_for_data_types_usage(ClusterInfo *cluster)
465 : {
466 24 : PQExpBuffer report = NULL;
467 24 : DataTypesUsageChecks *tmp = data_types_usage_checks;
468 24 : int n_data_types_usage_checks = 0;
469 24 : UpgradeTask *task = upgrade_task_create();
470 24 : char **queries = NULL;
471 : struct data_type_check_state *states;
472 :
473 24 : prep_status("Checking data type usage");
474 :
475 : /* Gather number of checks to perform */
476 264 : while (tmp->status != NULL)
477 : {
478 240 : n_data_types_usage_checks++;
479 240 : tmp++;
480 : }
481 :
482 : /* Allocate memory for queries and for task states */
483 24 : queries = pg_malloc0(sizeof(char *) * n_data_types_usage_checks);
484 24 : states = pg_malloc0(sizeof(struct data_type_check_state) * n_data_types_usage_checks);
485 :
486 264 : for (int i = 0; i < n_data_types_usage_checks; i++)
487 : {
488 240 : DataTypesUsageChecks *check = &data_types_usage_checks[i];
489 :
490 240 : if (check->threshold_version == MANUAL_CHECK)
491 : {
492 : Assert(check->version_hook);
493 :
494 : /*
495 : * Make sure that the check applies to the current cluster version
496 : * and skip it if not.
497 : */
498 24 : if (!check->version_hook(cluster))
499 24 : continue;
500 : }
501 216 : else if (check->threshold_version != ALL_VERSIONS)
502 : {
503 168 : if (GET_MAJOR_VERSION(cluster->major_version) > check->threshold_version)
504 168 : continue;
505 : }
506 : else
507 : Assert(check->threshold_version == ALL_VERSIONS);
508 :
509 48 : queries[i] = data_type_check_query(i);
510 :
511 48 : states[i].check = check;
512 48 : states[i].report = &report;
513 :
514 48 : upgrade_task_add_step(task, queries[i], process_data_type_check,
515 48 : true, &states[i]);
516 : }
517 :
518 : /*
519 : * Connect to each database in the cluster and run all defined checks
520 : * against that database before trying the next one.
521 : */
522 24 : upgrade_task_run(task, cluster);
523 24 : upgrade_task_free(task);
524 :
525 24 : if (report)
526 : {
527 0 : pg_fatal("Data type checks failed: %s", report->data);
528 : destroyPQExpBuffer(report);
529 : }
530 :
531 264 : for (int i = 0; i < n_data_types_usage_checks; i++)
532 : {
533 240 : if (queries[i])
534 48 : pg_free(queries[i]);
535 : }
536 24 : pg_free(queries);
537 24 : pg_free(states);
538 :
539 24 : check_ok();
540 24 : }
541 :
542 : /*
543 : * fix_path_separator
544 : * For non-Windows, just return the argument.
545 : * For Windows convert any forward slash to a backslash
546 : * such as is suitable for arguments to builtin commands
547 : * like RMDIR and DEL.
548 : */
549 : static char *
550 32 : fix_path_separator(char *path)
551 : {
552 : #ifdef WIN32
553 :
554 : char *result;
555 : char *c;
556 :
557 : result = pg_strdup(path);
558 :
559 : for (c = result; *c != '\0'; c++)
560 : if (*c == '/')
561 : *c = '\\';
562 :
563 : return result;
564 : #else
565 :
566 32 : return path;
567 : #endif
568 : }
569 :
570 : void
571 32 : output_check_banner(void)
572 : {
573 32 : if (user_opts.live_check)
574 : {
575 0 : pg_log(PG_REPORT,
576 : "Performing Consistency Checks on Old Live Server\n"
577 : "------------------------------------------------");
578 : }
579 : else
580 : {
581 32 : pg_log(PG_REPORT,
582 : "Performing Consistency Checks\n"
583 : "-----------------------------");
584 : }
585 32 : }
586 :
587 :
588 : void
589 30 : check_and_dump_old_cluster(void)
590 : {
591 : /* -- OLD -- */
592 :
593 30 : if (!user_opts.live_check)
594 30 : start_postmaster(&old_cluster, true);
595 :
596 : /*
597 : * First check that all databases allow connections since we'll otherwise
598 : * fail in later stages.
599 : */
600 30 : check_for_connection_status(&old_cluster);
601 :
602 : /*
603 : * Extract a list of databases, tables, and logical replication slots from
604 : * the old cluster.
605 : */
606 28 : get_db_rel_and_slot_infos(&old_cluster);
607 :
608 28 : init_tablespaces();
609 :
610 28 : get_loadable_libraries();
611 :
612 :
613 : /*
614 : * Check for various failure cases
615 : */
616 28 : check_is_install_user(&old_cluster);
617 28 : check_for_prepared_transactions(&old_cluster);
618 28 : check_for_isn_and_int8_passing_mismatch(&old_cluster);
619 :
620 28 : if (GET_MAJOR_VERSION(old_cluster.major_version) >= 1700)
621 : {
622 : /*
623 : * Logical replication slots can be migrated since PG17. See comments
624 : * atop get_old_cluster_logical_slot_infos().
625 : */
626 28 : check_old_cluster_for_valid_slots();
627 :
628 : /*
629 : * Subscriptions and their dependencies can be migrated since PG17.
630 : * Before that the logical slots are not upgraded, so we will not be
631 : * able to upgrade the logical replication clusters completely.
632 : */
633 26 : get_subscription_count(&old_cluster);
634 26 : check_old_cluster_subscription_state();
635 : }
636 :
637 24 : check_for_data_types_usage(&old_cluster);
638 :
639 : /*
640 : * Unicode updates can affect some objects that use expressions with
641 : * functions dependent on Unicode.
642 : */
643 24 : check_for_unicode_update(&old_cluster);
644 :
645 : /*
646 : * PG 14 changed the function signature of encoding conversion functions.
647 : * Conversions from older versions cannot be upgraded automatically
648 : * because the user-defined functions used by the encoding conversions
649 : * need to be changed to match the new signature.
650 : */
651 24 : if (GET_MAJOR_VERSION(old_cluster.major_version) <= 1300)
652 0 : check_for_user_defined_encoding_conversions(&old_cluster);
653 :
654 : /*
655 : * Pre-PG 14 allowed user defined postfix operators, which are not
656 : * supported anymore. Verify there are none, iff applicable.
657 : */
658 24 : if (GET_MAJOR_VERSION(old_cluster.major_version) <= 1300)
659 0 : check_for_user_defined_postfix_ops(&old_cluster);
660 :
661 : /*
662 : * PG 14 changed polymorphic functions from anyarray to
663 : * anycompatiblearray.
664 : */
665 24 : if (GET_MAJOR_VERSION(old_cluster.major_version) <= 1300)
666 0 : check_for_incompatible_polymorphics(&old_cluster);
667 :
668 : /*
669 : * Pre-PG 12 allowed tables to be declared WITH OIDS, which is not
670 : * supported anymore. Verify there are none, iff applicable.
671 : */
672 24 : if (GET_MAJOR_VERSION(old_cluster.major_version) <= 1100)
673 0 : check_for_tables_with_oids(&old_cluster);
674 :
675 : /*
676 : * Pre-PG 10 allowed tables with 'unknown' type columns and non WAL logged
677 : * hash indexes
678 : */
679 24 : if (GET_MAJOR_VERSION(old_cluster.major_version) <= 906)
680 : {
681 0 : if (user_opts.check)
682 0 : old_9_6_invalidate_hash_indexes(&old_cluster, true);
683 : }
684 :
685 : /* 9.5 and below should not have roles starting with pg_ */
686 24 : if (GET_MAJOR_VERSION(old_cluster.major_version) <= 905)
687 0 : check_for_pg_role_prefix(&old_cluster);
688 :
689 : /*
690 : * While not a check option, we do this now because this is the only time
691 : * the old server is running.
692 : */
693 24 : if (!user_opts.check)
694 20 : generate_old_dump();
695 :
696 24 : if (!user_opts.live_check)
697 24 : stop_postmaster(false);
698 24 : }
699 :
700 :
701 : void
702 24 : check_new_cluster(void)
703 : {
704 24 : get_db_rel_and_slot_infos(&new_cluster);
705 :
706 24 : check_new_cluster_is_empty();
707 :
708 24 : check_loadable_libraries();
709 :
710 24 : switch (user_opts.transfer_mode)
711 : {
712 2 : case TRANSFER_MODE_CLONE:
713 2 : check_file_clone();
714 0 : break;
715 16 : case TRANSFER_MODE_COPY:
716 16 : break;
717 2 : case TRANSFER_MODE_COPY_FILE_RANGE:
718 2 : check_copy_file_range();
719 2 : break;
720 2 : case TRANSFER_MODE_LINK:
721 2 : check_hard_link(TRANSFER_MODE_LINK);
722 2 : break;
723 2 : case TRANSFER_MODE_SWAP:
724 :
725 : /*
726 : * We do the hard link check for --swap, too, since it's an easy
727 : * way to verify the clusters are in the same file system. This
728 : * allows us to take some shortcuts in the file synchronization
729 : * step. With some more effort, we could probably support the
730 : * separate-file-system use case, but this mode is unlikely to
731 : * offer much benefit if we have to copy the files across file
732 : * system boundaries.
733 : */
734 2 : check_hard_link(TRANSFER_MODE_SWAP);
735 :
736 : /*
737 : * There are a few known issues with using --swap to upgrade from
738 : * versions older than 10. For example, the sequence tuple format
739 : * changed in v10, and the visibility map format changed in 9.6.
740 : * While such problems are not insurmountable (and we may have to
741 : * deal with similar problems in the future, anyway), it doesn't
742 : * seem worth the effort to support swap mode for upgrades from
743 : * long-unsupported versions.
744 : */
745 2 : if (GET_MAJOR_VERSION(old_cluster.major_version) < 1000)
746 0 : pg_fatal("Swap mode can only upgrade clusters from PostgreSQL version %s and later.",
747 : "10");
748 :
749 2 : break;
750 : }
751 :
752 22 : check_is_install_user(&new_cluster);
753 :
754 22 : check_for_prepared_transactions(&new_cluster);
755 :
756 22 : check_for_new_tablespace_dir();
757 :
758 22 : check_new_cluster_logical_replication_slots();
759 :
760 20 : check_new_cluster_subscription_configuration();
761 18 : }
762 :
763 :
764 : void
765 18 : report_clusters_compatible(void)
766 : {
767 18 : if (user_opts.check)
768 : {
769 2 : pg_log(PG_REPORT, "\n*Clusters are compatible*");
770 : /* stops new cluster */
771 2 : stop_postmaster(false);
772 :
773 2 : cleanup_output_dirs();
774 2 : exit(0);
775 : }
776 :
777 16 : pg_log(PG_REPORT, "\n"
778 : "If pg_upgrade fails after this point, you must re-initdb the\n"
779 : "new cluster before continuing.");
780 16 : }
781 :
782 :
783 : void
784 16 : issue_warnings_and_set_wal_level(void)
785 : {
786 : /*
787 : * We unconditionally start/stop the new server because pg_resetwal -o set
788 : * wal_level to 'minimum'. If the user is upgrading standby servers using
789 : * the rsync instructions, they will need pg_upgrade to write its final
790 : * WAL record showing wal_level as 'replica'.
791 : */
792 16 : start_postmaster(&new_cluster, true);
793 :
794 : /* Reindex hash indexes for old < 10.0 */
795 16 : if (GET_MAJOR_VERSION(old_cluster.major_version) <= 906)
796 0 : old_9_6_invalidate_hash_indexes(&new_cluster, false);
797 :
798 16 : report_extension_updates(&new_cluster);
799 :
800 16 : stop_postmaster(false);
801 16 : }
802 :
803 :
804 : void
805 16 : output_completion_banner(char *deletion_script_file_name)
806 : {
807 : PQExpBufferData user_specification;
808 :
809 16 : initPQExpBuffer(&user_specification);
810 16 : if (os_info.user_specified)
811 : {
812 0 : appendPQExpBufferStr(&user_specification, "-U ");
813 0 : appendShellString(&user_specification, os_info.user);
814 0 : appendPQExpBufferChar(&user_specification, ' ');
815 : }
816 :
817 16 : pg_log(PG_REPORT,
818 : "Some statistics are not transferred by pg_upgrade.\n"
819 : "Once you start the new server, consider running these two commands:\n"
820 : " %s/vacuumdb %s--all --analyze-in-stages --missing-stats-only\n"
821 : " %s/vacuumdb %s--all --analyze-only",
822 : new_cluster.bindir, user_specification.data,
823 : new_cluster.bindir, user_specification.data);
824 :
825 16 : if (deletion_script_file_name)
826 16 : pg_log(PG_REPORT,
827 : "Running this script will delete the old cluster's data files:\n"
828 : " %s",
829 : deletion_script_file_name);
830 : else
831 0 : pg_log(PG_REPORT,
832 : "Could not create a script to delete the old cluster's data files\n"
833 : "because user-defined tablespaces or the new cluster's data directory\n"
834 : "exist in the old cluster directory. The old cluster's contents must\n"
835 : "be deleted manually.");
836 :
837 16 : termPQExpBuffer(&user_specification);
838 16 : }
839 :
840 :
841 : void
842 32 : check_cluster_versions(void)
843 : {
844 32 : prep_status("Checking cluster versions");
845 :
846 : /* cluster versions should already have been obtained */
847 : Assert(old_cluster.major_version != 0);
848 : Assert(new_cluster.major_version != 0);
849 :
850 : /*
851 : * We allow upgrades from/to the same major version for alpha/beta
852 : * upgrades
853 : */
854 :
855 32 : if (GET_MAJOR_VERSION(old_cluster.major_version) < 902)
856 0 : pg_fatal("This utility can only upgrade from PostgreSQL version %s and later.",
857 : "9.2");
858 :
859 : /* Only current PG version is supported as a target */
860 32 : if (GET_MAJOR_VERSION(new_cluster.major_version) != GET_MAJOR_VERSION(PG_VERSION_NUM))
861 0 : pg_fatal("This utility can only upgrade to PostgreSQL version %s.",
862 : PG_MAJORVERSION);
863 :
864 : /*
865 : * We can't allow downgrading because we use the target pg_dump, and
866 : * pg_dump cannot operate on newer database versions, only current and
867 : * older versions.
868 : */
869 32 : if (old_cluster.major_version > new_cluster.major_version)
870 0 : pg_fatal("This utility cannot be used to downgrade to older major PostgreSQL versions.");
871 :
872 : /* Ensure binaries match the designated data directories */
873 32 : if (GET_MAJOR_VERSION(old_cluster.major_version) !=
874 32 : GET_MAJOR_VERSION(old_cluster.bin_version))
875 0 : pg_fatal("Old cluster data and binary directories are from different major versions.");
876 32 : if (GET_MAJOR_VERSION(new_cluster.major_version) !=
877 32 : GET_MAJOR_VERSION(new_cluster.bin_version))
878 0 : pg_fatal("New cluster data and binary directories are from different major versions.");
879 :
880 : /*
881 : * Since from version 18, newly created database clusters always have
882 : * 'signed' default char-signedness, it makes less sense to use
883 : * --set-char-signedness option for upgrading from version 18 or later.
884 : * Users who want to change the default char signedness of the new
885 : * cluster, they can use pg_resetwal manually before the upgrade.
886 : */
887 32 : if (GET_MAJOR_VERSION(old_cluster.major_version) >= 1800 &&
888 32 : user_opts.char_signedness != -1)
889 2 : pg_fatal("The option %s cannot be used for upgrades from PostgreSQL %s and later.",
890 : "--set-char-signedness", "18");
891 :
892 30 : check_ok();
893 30 : }
894 :
895 :
896 : void
897 30 : check_cluster_compatibility(void)
898 : {
899 : /* get/check pg_control data of servers */
900 30 : get_control_data(&old_cluster);
901 30 : get_control_data(&new_cluster);
902 30 : check_control_data(&old_cluster.controldata, &new_cluster.controldata);
903 :
904 30 : if (user_opts.live_check && old_cluster.port == new_cluster.port)
905 0 : pg_fatal("When checking a live server, "
906 : "the old and new port numbers must be different.");
907 30 : }
908 :
909 :
910 : static void
911 24 : check_new_cluster_is_empty(void)
912 : {
913 : int dbnum;
914 :
915 72 : for (dbnum = 0; dbnum < new_cluster.dbarr.ndbs; dbnum++)
916 : {
917 : int relnum;
918 48 : RelInfoArr *rel_arr = &new_cluster.dbarr.dbs[dbnum].rel_arr;
919 :
920 144 : for (relnum = 0; relnum < rel_arr->nrels;
921 96 : relnum++)
922 : {
923 : /* pg_largeobject and its index should be skipped */
924 96 : if (strcmp(rel_arr->rels[relnum].nspname, "pg_catalog") != 0)
925 0 : pg_fatal("New cluster database \"%s\" is not empty: found relation \"%s.%s\"",
926 0 : new_cluster.dbarr.dbs[dbnum].db_name,
927 0 : rel_arr->rels[relnum].nspname,
928 0 : rel_arr->rels[relnum].relname);
929 : }
930 : }
931 24 : }
932 :
933 : /*
934 : * A previous run of pg_upgrade might have failed and the new cluster
935 : * directory recreated, but they might have forgotten to remove
936 : * the new cluster's tablespace directories. Therefore, check that
937 : * new cluster tablespace directories do not already exist. If
938 : * they do, it would cause an error while restoring global objects.
939 : * This allows the failure to be detected at check time, rather than
940 : * during schema restore.
941 : */
942 : static void
943 22 : check_for_new_tablespace_dir(void)
944 : {
945 : int tblnum;
946 : char new_tablespace_dir[MAXPGPATH];
947 :
948 22 : prep_status("Checking for new cluster tablespace directories");
949 :
950 22 : for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++)
951 : {
952 : struct stat statbuf;
953 :
954 0 : snprintf(new_tablespace_dir, MAXPGPATH, "%s%s",
955 0 : os_info.old_tablespaces[tblnum],
956 : new_cluster.tablespace_suffix);
957 :
958 0 : if (stat(new_tablespace_dir, &statbuf) == 0 || errno != ENOENT)
959 0 : pg_fatal("new cluster tablespace directory already exists: \"%s\"",
960 : new_tablespace_dir);
961 : }
962 :
963 22 : check_ok();
964 22 : }
965 :
966 : /*
967 : * create_script_for_old_cluster_deletion()
968 : *
969 : * This is particularly useful for tablespace deletion.
970 : */
971 : void
972 16 : create_script_for_old_cluster_deletion(char **deletion_script_file_name)
973 : {
974 16 : FILE *script = NULL;
975 : int tblnum;
976 : char old_cluster_pgdata[MAXPGPATH],
977 : new_cluster_pgdata[MAXPGPATH];
978 : char *old_tblspc_suffix;
979 :
980 16 : *deletion_script_file_name = psprintf("%sdelete_old_cluster.%s",
981 : SCRIPT_PREFIX, SCRIPT_EXT);
982 :
983 16 : strlcpy(old_cluster_pgdata, old_cluster.pgdata, MAXPGPATH);
984 16 : canonicalize_path(old_cluster_pgdata);
985 :
986 16 : strlcpy(new_cluster_pgdata, new_cluster.pgdata, MAXPGPATH);
987 16 : canonicalize_path(new_cluster_pgdata);
988 :
989 : /* Some people put the new data directory inside the old one. */
990 16 : if (path_is_prefix_of_path(old_cluster_pgdata, new_cluster_pgdata))
991 : {
992 0 : pg_log(PG_WARNING,
993 : "\nWARNING: new data directory should not be inside the old data directory, i.e. %s", old_cluster_pgdata);
994 :
995 : /* Unlink file in case it is left over from a previous run. */
996 0 : unlink(*deletion_script_file_name);
997 0 : pg_free(*deletion_script_file_name);
998 0 : *deletion_script_file_name = NULL;
999 0 : return;
1000 : }
1001 :
1002 : /*
1003 : * Some users (oddly) create tablespaces inside the cluster data
1004 : * directory. We can't create a proper old cluster delete script in that
1005 : * case.
1006 : */
1007 16 : for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++)
1008 : {
1009 : char old_tablespace_dir[MAXPGPATH];
1010 :
1011 0 : strlcpy(old_tablespace_dir, os_info.old_tablespaces[tblnum], MAXPGPATH);
1012 0 : canonicalize_path(old_tablespace_dir);
1013 0 : if (path_is_prefix_of_path(old_cluster_pgdata, old_tablespace_dir))
1014 : {
1015 : /* reproduce warning from CREATE TABLESPACE that is in the log */
1016 0 : pg_log(PG_WARNING,
1017 : "\nWARNING: user-defined tablespace locations should not be inside the data directory, i.e. %s", old_tablespace_dir);
1018 :
1019 : /* Unlink file in case it is left over from a previous run. */
1020 0 : unlink(*deletion_script_file_name);
1021 0 : pg_free(*deletion_script_file_name);
1022 0 : *deletion_script_file_name = NULL;
1023 0 : return;
1024 : }
1025 : }
1026 :
1027 16 : prep_status("Creating script to delete old cluster");
1028 :
1029 16 : if ((script = fopen_priv(*deletion_script_file_name, "w")) == NULL)
1030 0 : pg_fatal("could not open file \"%s\": %m",
1031 : *deletion_script_file_name);
1032 :
1033 : #ifndef WIN32
1034 : /* add shebang header */
1035 16 : fprintf(script, "#!/bin/sh\n\n");
1036 : #endif
1037 :
1038 : /* delete old cluster's default tablespace */
1039 16 : fprintf(script, RMDIR_CMD " %c%s%c\n", PATH_QUOTE,
1040 : fix_path_separator(old_cluster.pgdata), PATH_QUOTE);
1041 :
1042 : /* delete old cluster's alternate tablespaces */
1043 16 : old_tblspc_suffix = pg_strdup(old_cluster.tablespace_suffix);
1044 16 : fix_path_separator(old_tblspc_suffix);
1045 16 : for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++)
1046 0 : fprintf(script, RMDIR_CMD " %c%s%s%c\n", PATH_QUOTE,
1047 0 : fix_path_separator(os_info.old_tablespaces[tblnum]),
1048 : old_tblspc_suffix, PATH_QUOTE);
1049 16 : pfree(old_tblspc_suffix);
1050 :
1051 16 : fclose(script);
1052 :
1053 : #ifndef WIN32
1054 16 : if (chmod(*deletion_script_file_name, S_IRWXU) != 0)
1055 0 : pg_fatal("could not add execute permission to file \"%s\": %m",
1056 : *deletion_script_file_name);
1057 : #endif
1058 :
1059 16 : check_ok();
1060 : }
1061 :
1062 :
1063 : /*
1064 : * check_is_install_user()
1065 : *
1066 : * Check we are the install user, and that the new cluster
1067 : * has no other users.
1068 : */
1069 : static void
1070 50 : check_is_install_user(ClusterInfo *cluster)
1071 : {
1072 : PGresult *res;
1073 50 : PGconn *conn = connectToServer(cluster, "template1");
1074 :
1075 50 : prep_status("Checking database user is the install user");
1076 :
1077 : /* Can't use pg_authid because only superusers can view it. */
1078 50 : res = executeQueryOrDie(conn,
1079 : "SELECT rolsuper, oid "
1080 : "FROM pg_catalog.pg_roles "
1081 : "WHERE rolname = current_user "
1082 : "AND rolname !~ '^pg_'");
1083 :
1084 : /*
1085 : * We only allow the install user in the new cluster (see comment below)
1086 : * and we preserve pg_authid.oid, so this must be the install user in the
1087 : * old cluster too.
1088 : */
1089 50 : if (PQntuples(res) != 1 ||
1090 50 : atooid(PQgetvalue(res, 0, 1)) != BOOTSTRAP_SUPERUSERID)
1091 0 : pg_fatal("database user \"%s\" is not the install user",
1092 : os_info.user);
1093 :
1094 50 : PQclear(res);
1095 :
1096 50 : res = executeQueryOrDie(conn,
1097 : "SELECT COUNT(*) "
1098 : "FROM pg_catalog.pg_roles "
1099 : "WHERE rolname !~ '^pg_'");
1100 :
1101 50 : if (PQntuples(res) != 1)
1102 0 : pg_fatal("could not determine the number of users");
1103 :
1104 : /*
1105 : * We only allow the install user in the new cluster because other defined
1106 : * users might match users defined in the old cluster and generate an
1107 : * error during pg_dump restore.
1108 : */
1109 50 : if (cluster == &new_cluster && strcmp(PQgetvalue(res, 0, 0), "1") != 0)
1110 0 : pg_fatal("Only the install user can be defined in the new cluster.");
1111 :
1112 50 : PQclear(res);
1113 :
1114 50 : PQfinish(conn);
1115 :
1116 50 : check_ok();
1117 50 : }
1118 :
1119 :
1120 : /*
1121 : * check_for_connection_status
1122 : *
1123 : * Ensure that all non-template0 databases allow connections since they
1124 : * otherwise won't be restored; and that template0 explicitly doesn't allow
1125 : * connections since it would make pg_dumpall --globals restore fail.
1126 : */
1127 : static void
1128 30 : check_for_connection_status(ClusterInfo *cluster)
1129 : {
1130 : int dbnum;
1131 : PGconn *conn_template1;
1132 : PGresult *dbres;
1133 : int ntups;
1134 : int i_datname;
1135 : int i_datallowconn;
1136 : int i_datconnlimit;
1137 30 : FILE *script = NULL;
1138 : char output_path[MAXPGPATH];
1139 :
1140 30 : prep_status("Checking database connection settings");
1141 :
1142 30 : snprintf(output_path, sizeof(output_path), "%s/%s",
1143 : log_opts.basedir,
1144 : "databases_cannot_connect_to.txt");
1145 :
1146 30 : conn_template1 = connectToServer(cluster, "template1");
1147 :
1148 : /* get database names */
1149 30 : dbres = executeQueryOrDie(conn_template1,
1150 : "SELECT datname, datallowconn, datconnlimit "
1151 : "FROM pg_catalog.pg_database");
1152 :
1153 30 : i_datname = PQfnumber(dbres, "datname");
1154 30 : i_datallowconn = PQfnumber(dbres, "datallowconn");
1155 30 : i_datconnlimit = PQfnumber(dbres, "datconnlimit");
1156 :
1157 30 : ntups = PQntuples(dbres);
1158 156 : for (dbnum = 0; dbnum < ntups; dbnum++)
1159 : {
1160 126 : char *datname = PQgetvalue(dbres, dbnum, i_datname);
1161 126 : char *datallowconn = PQgetvalue(dbres, dbnum, i_datallowconn);
1162 126 : char *datconnlimit = PQgetvalue(dbres, dbnum, i_datconnlimit);
1163 :
1164 126 : if (strcmp(datname, "template0") == 0)
1165 : {
1166 : /* avoid restore failure when pg_dumpall tries to create template0 */
1167 30 : if (strcmp(datallowconn, "t") == 0)
1168 0 : pg_fatal("template0 must not allow connections, "
1169 : "i.e. its pg_database.datallowconn must be false");
1170 : }
1171 : else
1172 : {
1173 : /*
1174 : * Avoid datallowconn == false databases from being skipped on
1175 : * restore, and ensure that no databases are marked invalid with
1176 : * datconnlimit == -2.
1177 : */
1178 96 : if ((strcmp(datallowconn, "f") == 0) || strcmp(datconnlimit, "-2") == 0)
1179 : {
1180 2 : if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
1181 0 : pg_fatal("could not open file \"%s\": %m", output_path);
1182 :
1183 2 : fprintf(script, "%s\n", datname);
1184 : }
1185 : }
1186 : }
1187 :
1188 30 : PQclear(dbres);
1189 :
1190 30 : PQfinish(conn_template1);
1191 :
1192 30 : if (script)
1193 : {
1194 2 : fclose(script);
1195 2 : pg_log(PG_REPORT, "fatal");
1196 2 : pg_fatal("All non-template0 databases must allow connections, i.e. their\n"
1197 : "pg_database.datallowconn must be true and pg_database.datconnlimit\n"
1198 : "must not be -2. Your installation contains non-template0 databases\n"
1199 : "which cannot be connected to. Consider allowing connection for all\n"
1200 : "non-template0 databases or drop the databases which do not allow\n"
1201 : "connections. A list of databases with the problem is in the file:\n"
1202 : " %s", output_path);
1203 : }
1204 : else
1205 28 : check_ok();
1206 28 : }
1207 :
1208 :
1209 : /*
1210 : * check_for_prepared_transactions()
1211 : *
1212 : * Make sure there are no prepared transactions because the storage format
1213 : * might have changed.
1214 : */
1215 : static void
1216 50 : check_for_prepared_transactions(ClusterInfo *cluster)
1217 : {
1218 : PGresult *res;
1219 50 : PGconn *conn = connectToServer(cluster, "template1");
1220 :
1221 50 : prep_status("Checking for prepared transactions");
1222 :
1223 50 : res = executeQueryOrDie(conn,
1224 : "SELECT * "
1225 : "FROM pg_catalog.pg_prepared_xacts");
1226 :
1227 50 : if (PQntuples(res) != 0)
1228 : {
1229 0 : if (cluster == &old_cluster)
1230 0 : pg_fatal("The source cluster contains prepared transactions");
1231 : else
1232 0 : pg_fatal("The target cluster contains prepared transactions");
1233 : }
1234 :
1235 50 : PQclear(res);
1236 :
1237 50 : PQfinish(conn);
1238 :
1239 50 : check_ok();
1240 50 : }
1241 :
1242 : /*
1243 : * Callback function for processing result of query for
1244 : * check_for_isn_and_int8_passing_mismatch()'s UpgradeTask. If the query
1245 : * returned any rows (i.e., the check failed), write the details to the report
1246 : * file.
1247 : */
1248 : static void
1249 0 : process_isn_and_int8_passing_mismatch(DbInfo *dbinfo, PGresult *res, void *arg)
1250 : {
1251 0 : int ntups = PQntuples(res);
1252 0 : int i_nspname = PQfnumber(res, "nspname");
1253 0 : int i_proname = PQfnumber(res, "proname");
1254 0 : UpgradeTaskReport *report = (UpgradeTaskReport *) arg;
1255 :
1256 : AssertVariableIsOfType(&process_isn_and_int8_passing_mismatch,
1257 : UpgradeTaskProcessCB);
1258 :
1259 0 : if (ntups == 0)
1260 0 : return;
1261 :
1262 0 : if (report->file == NULL &&
1263 0 : (report->file = fopen_priv(report->path, "w")) == NULL)
1264 0 : pg_fatal("could not open file \"%s\": %m", report->path);
1265 :
1266 0 : fprintf(report->file, "In database: %s\n", dbinfo->db_name);
1267 :
1268 0 : for (int rowno = 0; rowno < ntups; rowno++)
1269 0 : fprintf(report->file, " %s.%s\n",
1270 : PQgetvalue(res, rowno, i_nspname),
1271 : PQgetvalue(res, rowno, i_proname));
1272 : }
1273 :
1274 : /*
1275 : * check_for_isn_and_int8_passing_mismatch()
1276 : *
1277 : * contrib/isn relies on data type int8, and in 8.4 int8 can now be passed
1278 : * by value. The schema dumps the CREATE TYPE PASSEDBYVALUE setting so
1279 : * it must match for the old and new servers.
1280 : */
1281 : static void
1282 28 : check_for_isn_and_int8_passing_mismatch(ClusterInfo *cluster)
1283 : {
1284 : UpgradeTask *task;
1285 : UpgradeTaskReport report;
1286 28 : const char *query = "SELECT n.nspname, p.proname "
1287 : "FROM pg_catalog.pg_proc p, "
1288 : " pg_catalog.pg_namespace n "
1289 : "WHERE p.pronamespace = n.oid AND "
1290 : " p.probin = '$libdir/isn'";
1291 :
1292 28 : prep_status("Checking for contrib/isn with bigint-passing mismatch");
1293 :
1294 28 : if (old_cluster.controldata.float8_pass_by_value ==
1295 28 : new_cluster.controldata.float8_pass_by_value)
1296 : {
1297 : /* no mismatch */
1298 28 : check_ok();
1299 28 : return;
1300 : }
1301 :
1302 0 : report.file = NULL;
1303 0 : snprintf(report.path, sizeof(report.path), "%s/%s",
1304 : log_opts.basedir,
1305 : "contrib_isn_and_int8_pass_by_value.txt");
1306 :
1307 0 : task = upgrade_task_create();
1308 0 : upgrade_task_add_step(task, query, process_isn_and_int8_passing_mismatch,
1309 : true, &report);
1310 0 : upgrade_task_run(task, cluster);
1311 0 : upgrade_task_free(task);
1312 :
1313 0 : if (report.file)
1314 : {
1315 0 : fclose(report.file);
1316 0 : pg_log(PG_REPORT, "fatal");
1317 0 : pg_fatal("Your installation contains \"contrib/isn\" functions which rely on the\n"
1318 : "bigint data type. Your old and new clusters pass bigint values\n"
1319 : "differently so this cluster cannot currently be upgraded. You can\n"
1320 : "manually dump databases in the old cluster that use \"contrib/isn\"\n"
1321 : "facilities, drop them, perform the upgrade, and then restore them. A\n"
1322 : "list of the problem functions is in the file:\n"
1323 : " %s", report.path);
1324 : }
1325 : else
1326 0 : check_ok();
1327 : }
1328 :
1329 : /*
1330 : * Callback function for processing result of query for
1331 : * check_for_user_defined_postfix_ops()'s UpgradeTask. If the query returned
1332 : * any rows (i.e., the check failed), write the details to the report file.
1333 : */
1334 : static void
1335 0 : process_user_defined_postfix_ops(DbInfo *dbinfo, PGresult *res, void *arg)
1336 : {
1337 0 : UpgradeTaskReport *report = (UpgradeTaskReport *) arg;
1338 0 : int ntups = PQntuples(res);
1339 0 : int i_oproid = PQfnumber(res, "oproid");
1340 0 : int i_oprnsp = PQfnumber(res, "oprnsp");
1341 0 : int i_oprname = PQfnumber(res, "oprname");
1342 0 : int i_typnsp = PQfnumber(res, "typnsp");
1343 0 : int i_typname = PQfnumber(res, "typname");
1344 :
1345 : AssertVariableIsOfType(&process_user_defined_postfix_ops,
1346 : UpgradeTaskProcessCB);
1347 :
1348 0 : if (ntups == 0)
1349 0 : return;
1350 :
1351 0 : if (report->file == NULL &&
1352 0 : (report->file = fopen_priv(report->path, "w")) == NULL)
1353 0 : pg_fatal("could not open file \"%s\": %m", report->path);
1354 :
1355 0 : fprintf(report->file, "In database: %s\n", dbinfo->db_name);
1356 :
1357 0 : for (int rowno = 0; rowno < ntups; rowno++)
1358 0 : fprintf(report->file, " (oid=%s) %s.%s (%s.%s, NONE)\n",
1359 : PQgetvalue(res, rowno, i_oproid),
1360 : PQgetvalue(res, rowno, i_oprnsp),
1361 : PQgetvalue(res, rowno, i_oprname),
1362 : PQgetvalue(res, rowno, i_typnsp),
1363 : PQgetvalue(res, rowno, i_typname));
1364 : }
1365 :
1366 : /*
1367 : * Verify that no user defined postfix operators exist.
1368 : */
1369 : static void
1370 0 : check_for_user_defined_postfix_ops(ClusterInfo *cluster)
1371 : {
1372 : UpgradeTaskReport report;
1373 0 : UpgradeTask *task = upgrade_task_create();
1374 : const char *query;
1375 :
1376 : /*
1377 : * The query below hardcodes FirstNormalObjectId as 16384 rather than
1378 : * interpolating that C #define into the query because, if that #define is
1379 : * ever changed, the cutoff we want to use is the value used by
1380 : * pre-version 14 servers, not that of some future version.
1381 : */
1382 0 : query = "SELECT o.oid AS oproid, "
1383 : " n.nspname AS oprnsp, "
1384 : " o.oprname, "
1385 : " tn.nspname AS typnsp, "
1386 : " t.typname "
1387 : "FROM pg_catalog.pg_operator o, "
1388 : " pg_catalog.pg_namespace n, "
1389 : " pg_catalog.pg_type t, "
1390 : " pg_catalog.pg_namespace tn "
1391 : "WHERE o.oprnamespace = n.oid AND "
1392 : " o.oprleft = t.oid AND "
1393 : " t.typnamespace = tn.oid AND "
1394 : " o.oprright = 0 AND "
1395 : " o.oid >= 16384";
1396 :
1397 0 : prep_status("Checking for user-defined postfix operators");
1398 :
1399 0 : report.file = NULL;
1400 0 : snprintf(report.path, sizeof(report.path), "%s/%s",
1401 : log_opts.basedir,
1402 : "postfix_ops.txt");
1403 :
1404 0 : upgrade_task_add_step(task, query, process_user_defined_postfix_ops,
1405 : true, &report);
1406 0 : upgrade_task_run(task, cluster);
1407 0 : upgrade_task_free(task);
1408 :
1409 0 : if (report.file)
1410 : {
1411 0 : fclose(report.file);
1412 0 : pg_log(PG_REPORT, "fatal");
1413 0 : pg_fatal("Your installation contains user-defined postfix operators, which are not\n"
1414 : "supported anymore. Consider dropping the postfix operators and replacing\n"
1415 : "them with prefix operators or function calls.\n"
1416 : "A list of user-defined postfix operators is in the file:\n"
1417 : " %s", report.path);
1418 : }
1419 : else
1420 0 : check_ok();
1421 0 : }
1422 :
1423 : /*
1424 : * Callback function for processing results of query for
1425 : * check_for_incompatible_polymorphics()'s UpgradeTask. If the query returned
1426 : * any rows (i.e., the check failed), write the details to the report file.
1427 : */
1428 : static void
1429 0 : process_incompat_polymorphics(DbInfo *dbinfo, PGresult *res, void *arg)
1430 : {
1431 0 : UpgradeTaskReport *report = (UpgradeTaskReport *) arg;
1432 0 : int ntups = PQntuples(res);
1433 0 : int i_objkind = PQfnumber(res, "objkind");
1434 0 : int i_objname = PQfnumber(res, "objname");
1435 :
1436 : AssertVariableIsOfType(&process_incompat_polymorphics,
1437 : UpgradeTaskProcessCB);
1438 :
1439 0 : if (ntups == 0)
1440 0 : return;
1441 :
1442 0 : if (report->file == NULL &&
1443 0 : (report->file = fopen_priv(report->path, "w")) == NULL)
1444 0 : pg_fatal("could not open file \"%s\": %m", report->path);
1445 :
1446 0 : fprintf(report->file, "In database: %s\n", dbinfo->db_name);
1447 :
1448 0 : for (int rowno = 0; rowno < ntups; rowno++)
1449 0 : fprintf(report->file, " %s: %s\n",
1450 : PQgetvalue(res, rowno, i_objkind),
1451 : PQgetvalue(res, rowno, i_objname));
1452 : }
1453 :
1454 : /*
1455 : * check_for_incompatible_polymorphics()
1456 : *
1457 : * Make sure nothing is using old polymorphic functions with
1458 : * anyarray/anyelement rather than the new anycompatible variants.
1459 : */
1460 : static void
1461 0 : check_for_incompatible_polymorphics(ClusterInfo *cluster)
1462 : {
1463 : PQExpBufferData old_polymorphics;
1464 0 : UpgradeTask *task = upgrade_task_create();
1465 : UpgradeTaskReport report;
1466 : char *query;
1467 :
1468 0 : prep_status("Checking for incompatible polymorphic functions");
1469 :
1470 0 : report.file = NULL;
1471 0 : snprintf(report.path, sizeof(report.path), "%s/%s",
1472 : log_opts.basedir,
1473 : "incompatible_polymorphics.txt");
1474 :
1475 : /* The set of problematic functions varies a bit in different versions */
1476 0 : initPQExpBuffer(&old_polymorphics);
1477 :
1478 0 : appendPQExpBufferStr(&old_polymorphics,
1479 : "'array_append(anyarray,anyelement)'"
1480 : ", 'array_cat(anyarray,anyarray)'"
1481 : ", 'array_prepend(anyelement,anyarray)'");
1482 :
1483 0 : if (GET_MAJOR_VERSION(cluster->major_version) >= 903)
1484 0 : appendPQExpBufferStr(&old_polymorphics,
1485 : ", 'array_remove(anyarray,anyelement)'"
1486 : ", 'array_replace(anyarray,anyelement,anyelement)'");
1487 :
1488 0 : if (GET_MAJOR_VERSION(cluster->major_version) >= 905)
1489 0 : appendPQExpBufferStr(&old_polymorphics,
1490 : ", 'array_position(anyarray,anyelement)'"
1491 : ", 'array_position(anyarray,anyelement,integer)'"
1492 : ", 'array_positions(anyarray,anyelement)'"
1493 : ", 'width_bucket(anyelement,anyarray)'");
1494 :
1495 : /*
1496 : * The query below hardcodes FirstNormalObjectId as 16384 rather than
1497 : * interpolating that C #define into the query because, if that #define is
1498 : * ever changed, the cutoff we want to use is the value used by
1499 : * pre-version 14 servers, not that of some future version.
1500 : */
1501 :
1502 : /* Aggregate transition functions */
1503 0 : query = psprintf("SELECT 'aggregate' AS objkind, p.oid::regprocedure::text AS objname "
1504 : "FROM pg_proc AS p "
1505 : "JOIN pg_aggregate AS a ON a.aggfnoid=p.oid "
1506 : "JOIN pg_proc AS transfn ON transfn.oid=a.aggtransfn "
1507 : "WHERE p.oid >= 16384 "
1508 : "AND a.aggtransfn = ANY(ARRAY[%s]::regprocedure[]) "
1509 : "AND a.aggtranstype = ANY(ARRAY['anyarray', 'anyelement']::regtype[]) "
1510 :
1511 : /* Aggregate final functions */
1512 : "UNION ALL "
1513 : "SELECT 'aggregate' AS objkind, p.oid::regprocedure::text AS objname "
1514 : "FROM pg_proc AS p "
1515 : "JOIN pg_aggregate AS a ON a.aggfnoid=p.oid "
1516 : "JOIN pg_proc AS finalfn ON finalfn.oid=a.aggfinalfn "
1517 : "WHERE p.oid >= 16384 "
1518 : "AND a.aggfinalfn = ANY(ARRAY[%s]::regprocedure[]) "
1519 : "AND a.aggtranstype = ANY(ARRAY['anyarray', 'anyelement']::regtype[]) "
1520 :
1521 : /* Operators */
1522 : "UNION ALL "
1523 : "SELECT 'operator' AS objkind, op.oid::regoperator::text AS objname "
1524 : "FROM pg_operator AS op "
1525 : "WHERE op.oid >= 16384 "
1526 : "AND oprcode = ANY(ARRAY[%s]::regprocedure[]) "
1527 : "AND oprleft = ANY(ARRAY['anyarray', 'anyelement']::regtype[])",
1528 : old_polymorphics.data,
1529 : old_polymorphics.data,
1530 : old_polymorphics.data);
1531 :
1532 0 : upgrade_task_add_step(task, query, process_incompat_polymorphics,
1533 : true, &report);
1534 0 : upgrade_task_run(task, cluster);
1535 0 : upgrade_task_free(task);
1536 :
1537 0 : if (report.file)
1538 : {
1539 0 : fclose(report.file);
1540 0 : pg_log(PG_REPORT, "fatal");
1541 0 : pg_fatal("Your installation contains user-defined objects that refer to internal\n"
1542 : "polymorphic functions with arguments of type \"anyarray\" or \"anyelement\".\n"
1543 : "These user-defined objects must be dropped before upgrading and restored\n"
1544 : "afterwards, changing them to refer to the new corresponding functions with\n"
1545 : "arguments of type \"anycompatiblearray\" and \"anycompatible\".\n"
1546 : "A list of the problematic objects is in the file:\n"
1547 : " %s", report.path);
1548 : }
1549 : else
1550 0 : check_ok();
1551 :
1552 0 : termPQExpBuffer(&old_polymorphics);
1553 0 : pg_free(query);
1554 0 : }
1555 :
1556 : /*
1557 : * Callback function for processing results of query for
1558 : * check_for_tables_with_oids()'s UpgradeTask. If the query returned any rows
1559 : * (i.e., the check failed), write the details to the report file.
1560 : */
1561 : static void
1562 0 : process_with_oids_check(DbInfo *dbinfo, PGresult *res, void *arg)
1563 : {
1564 0 : UpgradeTaskReport *report = (UpgradeTaskReport *) arg;
1565 0 : int ntups = PQntuples(res);
1566 0 : int i_nspname = PQfnumber(res, "nspname");
1567 0 : int i_relname = PQfnumber(res, "relname");
1568 :
1569 : AssertVariableIsOfType(&process_with_oids_check, UpgradeTaskProcessCB);
1570 :
1571 0 : if (ntups == 0)
1572 0 : return;
1573 :
1574 0 : if (report->file == NULL &&
1575 0 : (report->file = fopen_priv(report->path, "w")) == NULL)
1576 0 : pg_fatal("could not open file \"%s\": %m", report->path);
1577 :
1578 0 : fprintf(report->file, "In database: %s\n", dbinfo->db_name);
1579 :
1580 0 : for (int rowno = 0; rowno < ntups; rowno++)
1581 0 : fprintf(report->file, " %s.%s\n",
1582 : PQgetvalue(res, rowno, i_nspname),
1583 : PQgetvalue(res, rowno, i_relname));
1584 : }
1585 :
1586 : /*
1587 : * Verify that no tables are declared WITH OIDS.
1588 : */
1589 : static void
1590 0 : check_for_tables_with_oids(ClusterInfo *cluster)
1591 : {
1592 : UpgradeTaskReport report;
1593 0 : UpgradeTask *task = upgrade_task_create();
1594 0 : const char *query = "SELECT n.nspname, c.relname "
1595 : "FROM pg_catalog.pg_class c, "
1596 : " pg_catalog.pg_namespace n "
1597 : "WHERE c.relnamespace = n.oid AND "
1598 : " c.relhasoids AND"
1599 : " n.nspname NOT IN ('pg_catalog')";
1600 :
1601 0 : prep_status("Checking for tables WITH OIDS");
1602 :
1603 0 : report.file = NULL;
1604 0 : snprintf(report.path, sizeof(report.path), "%s/%s",
1605 : log_opts.basedir,
1606 : "tables_with_oids.txt");
1607 :
1608 0 : upgrade_task_add_step(task, query, process_with_oids_check,
1609 : true, &report);
1610 0 : upgrade_task_run(task, cluster);
1611 0 : upgrade_task_free(task);
1612 :
1613 0 : if (report.file)
1614 : {
1615 0 : fclose(report.file);
1616 0 : pg_log(PG_REPORT, "fatal");
1617 0 : pg_fatal("Your installation contains tables declared WITH OIDS, which is not\n"
1618 : "supported anymore. Consider removing the oid column using\n"
1619 : " ALTER TABLE ... SET WITHOUT OIDS;\n"
1620 : "A list of tables with the problem is in the file:\n"
1621 : " %s", report.path);
1622 : }
1623 : else
1624 0 : check_ok();
1625 0 : }
1626 :
1627 :
1628 : /*
1629 : * check_for_pg_role_prefix()
1630 : *
1631 : * Versions older than 9.6 should not have any pg_* roles
1632 : */
1633 : static void
1634 0 : check_for_pg_role_prefix(ClusterInfo *cluster)
1635 : {
1636 : PGresult *res;
1637 0 : PGconn *conn = connectToServer(cluster, "template1");
1638 : int ntups;
1639 : int i_roloid;
1640 : int i_rolname;
1641 0 : FILE *script = NULL;
1642 : char output_path[MAXPGPATH];
1643 :
1644 0 : prep_status("Checking for roles starting with \"pg_\"");
1645 :
1646 0 : snprintf(output_path, sizeof(output_path), "%s/%s",
1647 : log_opts.basedir,
1648 : "pg_role_prefix.txt");
1649 :
1650 0 : res = executeQueryOrDie(conn,
1651 : "SELECT oid AS roloid, rolname "
1652 : "FROM pg_catalog.pg_roles "
1653 : "WHERE rolname ~ '^pg_'");
1654 :
1655 0 : ntups = PQntuples(res);
1656 0 : i_roloid = PQfnumber(res, "roloid");
1657 0 : i_rolname = PQfnumber(res, "rolname");
1658 0 : for (int rowno = 0; rowno < ntups; rowno++)
1659 : {
1660 0 : if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
1661 0 : pg_fatal("could not open file \"%s\": %m", output_path);
1662 0 : fprintf(script, "%s (oid=%s)\n",
1663 : PQgetvalue(res, rowno, i_rolname),
1664 : PQgetvalue(res, rowno, i_roloid));
1665 : }
1666 :
1667 0 : PQclear(res);
1668 :
1669 0 : PQfinish(conn);
1670 :
1671 0 : if (script)
1672 : {
1673 0 : fclose(script);
1674 0 : pg_log(PG_REPORT, "fatal");
1675 0 : pg_fatal("Your installation contains roles starting with \"pg_\".\n"
1676 : "\"pg_\" is a reserved prefix for system roles. The cluster\n"
1677 : "cannot be upgraded until these roles are renamed.\n"
1678 : "A list of roles starting with \"pg_\" is in the file:\n"
1679 : " %s", output_path);
1680 : }
1681 : else
1682 0 : check_ok();
1683 0 : }
1684 :
1685 : /*
1686 : * Callback function for processing results of query for
1687 : * check_for_user_defined_encoding_conversions()'s UpgradeTask. If the query
1688 : * returned any rows (i.e., the check failed), write the details to the report
1689 : * file.
1690 : */
1691 : static void
1692 0 : process_user_defined_encoding_conversions(DbInfo *dbinfo, PGresult *res, void *arg)
1693 : {
1694 0 : UpgradeTaskReport *report = (UpgradeTaskReport *) arg;
1695 0 : int ntups = PQntuples(res);
1696 0 : int i_conoid = PQfnumber(res, "conoid");
1697 0 : int i_conname = PQfnumber(res, "conname");
1698 0 : int i_nspname = PQfnumber(res, "nspname");
1699 :
1700 : AssertVariableIsOfType(&process_user_defined_encoding_conversions,
1701 : UpgradeTaskProcessCB);
1702 :
1703 0 : if (ntups == 0)
1704 0 : return;
1705 :
1706 0 : if (report->file == NULL &&
1707 0 : (report->file = fopen_priv(report->path, "w")) == NULL)
1708 0 : pg_fatal("could not open file \"%s\": %m", report->path);
1709 :
1710 0 : fprintf(report->file, "In database: %s\n", dbinfo->db_name);
1711 :
1712 0 : for (int rowno = 0; rowno < ntups; rowno++)
1713 0 : fprintf(report->file, " (oid=%s) %s.%s\n",
1714 : PQgetvalue(res, rowno, i_conoid),
1715 : PQgetvalue(res, rowno, i_nspname),
1716 : PQgetvalue(res, rowno, i_conname));
1717 : }
1718 :
1719 : /*
1720 : * Verify that no user-defined encoding conversions exist.
1721 : */
1722 : static void
1723 0 : check_for_user_defined_encoding_conversions(ClusterInfo *cluster)
1724 : {
1725 : UpgradeTaskReport report;
1726 0 : UpgradeTask *task = upgrade_task_create();
1727 : const char *query;
1728 :
1729 0 : prep_status("Checking for user-defined encoding conversions");
1730 :
1731 0 : report.file = NULL;
1732 0 : snprintf(report.path, sizeof(report.path), "%s/%s",
1733 : log_opts.basedir,
1734 : "encoding_conversions.txt");
1735 :
1736 : /*
1737 : * The query below hardcodes FirstNormalObjectId as 16384 rather than
1738 : * interpolating that C #define into the query because, if that #define is
1739 : * ever changed, the cutoff we want to use is the value used by
1740 : * pre-version 14 servers, not that of some future version.
1741 : */
1742 0 : query = "SELECT c.oid as conoid, c.conname, n.nspname "
1743 : "FROM pg_catalog.pg_conversion c, "
1744 : " pg_catalog.pg_namespace n "
1745 : "WHERE c.connamespace = n.oid AND "
1746 : " c.oid >= 16384";
1747 :
1748 0 : upgrade_task_add_step(task, query,
1749 : process_user_defined_encoding_conversions,
1750 : true, &report);
1751 0 : upgrade_task_run(task, cluster);
1752 0 : upgrade_task_free(task);
1753 :
1754 0 : if (report.file)
1755 : {
1756 0 : fclose(report.file);
1757 0 : pg_log(PG_REPORT, "fatal");
1758 0 : pg_fatal("Your installation contains user-defined encoding conversions.\n"
1759 : "The conversion function parameters changed in PostgreSQL version 14\n"
1760 : "so this cluster cannot currently be upgraded. You can remove the\n"
1761 : "encoding conversions in the old cluster and restart the upgrade.\n"
1762 : "A list of user-defined encoding conversions is in the file:\n"
1763 : " %s", report.path);
1764 : }
1765 : else
1766 0 : check_ok();
1767 0 : }
1768 :
1769 : /*
1770 : * Callback function for processing results of query for
1771 : * check_for_unicode_update()'s UpgradeTask. If the query returned any rows
1772 : * (i.e., the check failed), write the details to the report file.
1773 : */
1774 : static void
1775 0 : process_unicode_update(DbInfo *dbinfo, PGresult *res, void *arg)
1776 : {
1777 0 : UpgradeTaskReport *report = (UpgradeTaskReport *) arg;
1778 0 : int ntups = PQntuples(res);
1779 0 : int i_reloid = PQfnumber(res, "reloid");
1780 0 : int i_nspname = PQfnumber(res, "nspname");
1781 0 : int i_relname = PQfnumber(res, "relname");
1782 :
1783 0 : if (ntups == 0)
1784 0 : return;
1785 :
1786 0 : if (report->file == NULL &&
1787 0 : (report->file = fopen_priv(report->path, "w")) == NULL)
1788 0 : pg_fatal("could not open file \"%s\": %m", report->path);
1789 :
1790 0 : fprintf(report->file, "In database: %s\n", dbinfo->db_name);
1791 :
1792 0 : for (int rowno = 0; rowno < ntups; rowno++)
1793 0 : fprintf(report->file, " (oid=%s) %s.%s\n",
1794 : PQgetvalue(res, rowno, i_reloid),
1795 : PQgetvalue(res, rowno, i_nspname),
1796 : PQgetvalue(res, rowno, i_relname));
1797 : }
1798 :
1799 : /*
1800 : * Check if the Unicode version built into Postgres changed between the old
1801 : * cluster and the new cluster.
1802 : */
1803 : static bool
1804 24 : unicode_version_changed(ClusterInfo *cluster)
1805 : {
1806 24 : PGconn *conn_template1 = connectToServer(cluster, "template1");
1807 : PGresult *res;
1808 : char *old_unicode_version;
1809 : bool unicode_updated;
1810 :
1811 24 : res = executeQueryOrDie(conn_template1, "SELECT unicode_version()");
1812 24 : old_unicode_version = PQgetvalue(res, 0, 0);
1813 24 : unicode_updated = (strcmp(old_unicode_version, PG_UNICODE_VERSION) != 0);
1814 :
1815 24 : PQclear(res);
1816 24 : PQfinish(conn_template1);
1817 :
1818 24 : return unicode_updated;
1819 : }
1820 :
1821 : /*
1822 : * check_for_unicode_update()
1823 : *
1824 : * Check if the version of Unicode in the old server and the new server
1825 : * differ. If so, check for indexes, partitioned tables, or constraints that
1826 : * use expressions with functions dependent on Unicode behavior.
1827 : */
1828 : static void
1829 24 : check_for_unicode_update(ClusterInfo *cluster)
1830 : {
1831 : UpgradeTaskReport report;
1832 : UpgradeTask *task;
1833 : const char *query;
1834 :
1835 : /*
1836 : * The builtin provider did not exist prior to version 17. While there are
1837 : * still problems that could potentially be caught from earlier versions,
1838 : * such as an index on NORMALIZE(), we don't check for that here.
1839 : */
1840 24 : if (GET_MAJOR_VERSION(cluster->major_version) < 1700)
1841 24 : return;
1842 :
1843 24 : prep_status("Checking for objects affected by Unicode update");
1844 :
1845 24 : if (!unicode_version_changed(cluster))
1846 : {
1847 24 : check_ok();
1848 24 : return;
1849 : }
1850 :
1851 0 : report.file = NULL;
1852 0 : snprintf(report.path, sizeof(report.path), "%s/%s",
1853 : log_opts.basedir,
1854 : "unicode_dependent_rels.txt");
1855 :
1856 0 : query =
1857 : /* collations that use built-in Unicode for character semantics */
1858 : "WITH collations(collid) AS ( "
1859 : " SELECT oid FROM pg_collation "
1860 : " WHERE collprovider='b' AND colllocale IN ('C.UTF-8','PG_UNICODE_FAST') "
1861 : /* include default collation, if appropriate */
1862 : " UNION "
1863 : " SELECT 'pg_catalog.default'::regcollation FROM pg_database "
1864 : " WHERE datname = current_database() AND "
1865 : " datlocprovider='b' AND datlocale IN ('C.UTF-8','PG_UNICODE_FAST') "
1866 : "), "
1867 : /* functions that use built-in Unicode */
1868 : "functions(procid) AS ( "
1869 : " SELECT proc.oid FROM pg_proc proc "
1870 : " WHERE proname IN ('normalize','unicode_assigned','unicode_version','is_normalized') AND "
1871 : " pronamespace='pg_catalog'::regnamespace "
1872 : "), "
1873 : /* operators that use the input collation for character semantics */
1874 : "coll_operators(operid, procid, collid) AS ( "
1875 : " SELECT oper.oid, oper.oprcode, collid FROM pg_operator oper, collations "
1876 : " WHERE oprname IN ('~', '~*', '!~', '!~*', '~~*', '!~~*') AND "
1877 : " oprnamespace='pg_catalog'::regnamespace AND "
1878 : " oprright='text'::regtype "
1879 : "), "
1880 : /* functions that use the input collation for character semantics */
1881 : "coll_functions(procid, collid) AS ( "
1882 : " SELECT proc.oid, collid FROM pg_proc proc, collations "
1883 : " WHERE proname IN ('lower','initcap','upper') AND "
1884 : " pronamespace='pg_catalog'::regnamespace AND "
1885 : " proargtypes[0] = 'text'::regtype "
1886 : /* include functions behind the operators listed above */
1887 : " UNION "
1888 : " SELECT procid, collid FROM coll_operators "
1889 : "), "
1890 :
1891 : /*
1892 : * Generate patterns to search a pg_node_tree for the above functions and
1893 : * operators.
1894 : */
1895 : "patterns(p) AS ( "
1896 : " SELECT '{FUNCEXPR :funcid ' || procid::text || '[ }]' FROM functions "
1897 : " UNION "
1898 : " SELECT '{OPEXPR :opno ' || operid::text || ' (:\\w+ \\w+ )*' || "
1899 : " ':inputcollid ' || collid::text || '[ }]' FROM coll_operators "
1900 : " UNION "
1901 : " SELECT '{FUNCEXPR :funcid ' || procid::text || ' (:\\w+ \\w+ )*' || "
1902 : " ':inputcollid ' || collid::text || '[ }]' FROM coll_functions "
1903 : ") "
1904 :
1905 : /*
1906 : * Match the patterns against expressions used for relation contents.
1907 : */
1908 : "SELECT reloid, relkind, nspname, relname "
1909 : " FROM ( "
1910 : " SELECT conrelid "
1911 : " FROM pg_constraint, patterns WHERE conbin::text ~ p "
1912 : " UNION "
1913 : " SELECT indexrelid "
1914 : " FROM pg_index, patterns WHERE indexprs::text ~ p OR indpred::text ~ p "
1915 : " UNION "
1916 : " SELECT partrelid "
1917 : " FROM pg_partitioned_table, patterns WHERE partexprs::text ~ p "
1918 : " UNION "
1919 : " SELECT ev_class "
1920 : " FROM pg_rewrite, pg_class, patterns "
1921 : " WHERE ev_class = pg_class.oid AND relkind = 'm' AND ev_action::text ~ p"
1922 : " ) s(reloid), pg_class c, pg_namespace n, pg_database d "
1923 : " WHERE s.reloid = c.oid AND c.relnamespace = n.oid AND "
1924 : " d.datname = current_database() AND "
1925 : " d.encoding = pg_char_to_encoding('UTF8');";
1926 :
1927 0 : task = upgrade_task_create();
1928 0 : upgrade_task_add_step(task, query,
1929 : process_unicode_update,
1930 : true, &report);
1931 0 : upgrade_task_run(task, cluster);
1932 0 : upgrade_task_free(task);
1933 :
1934 0 : if (report.file)
1935 : {
1936 0 : fclose(report.file);
1937 0 : report_status(PG_WARNING, "warning");
1938 0 : pg_log(PG_WARNING, "Your installation contains relations that might be affected by a new version of Unicode.\n"
1939 : "A list of potentially-affected relations is in the file:\n"
1940 : " %s", report.path);
1941 : }
1942 : else
1943 0 : check_ok();
1944 : }
1945 :
1946 : /*
1947 : * check_new_cluster_logical_replication_slots()
1948 : *
1949 : * Verify that there are no logical replication slots on the new cluster and
1950 : * that the parameter settings necessary for creating slots are sufficient.
1951 : */
1952 : static void
1953 22 : check_new_cluster_logical_replication_slots(void)
1954 : {
1955 : PGresult *res;
1956 : PGconn *conn;
1957 : int nslots_on_old;
1958 : int nslots_on_new;
1959 : int max_replication_slots;
1960 : char *wal_level;
1961 :
1962 : /* Logical slots can be migrated since PG17. */
1963 22 : if (GET_MAJOR_VERSION(old_cluster.major_version) <= 1600)
1964 0 : return;
1965 :
1966 22 : nslots_on_old = count_old_cluster_logical_slots();
1967 :
1968 : /* Quick return if there are no logical slots to be migrated. */
1969 22 : if (nslots_on_old == 0)
1970 18 : return;
1971 :
1972 4 : conn = connectToServer(&new_cluster, "template1");
1973 :
1974 4 : prep_status("Checking for new cluster logical replication slots");
1975 :
1976 4 : res = executeQueryOrDie(conn, "SELECT count(*) "
1977 : "FROM pg_catalog.pg_replication_slots "
1978 : "WHERE slot_type = 'logical' AND "
1979 : "temporary IS FALSE;");
1980 :
1981 4 : if (PQntuples(res) != 1)
1982 0 : pg_fatal("could not count the number of logical replication slots");
1983 :
1984 4 : nslots_on_new = atoi(PQgetvalue(res, 0, 0));
1985 :
1986 4 : if (nslots_on_new)
1987 0 : pg_fatal("expected 0 logical replication slots but found %d",
1988 : nslots_on_new);
1989 :
1990 4 : PQclear(res);
1991 :
1992 4 : res = executeQueryOrDie(conn, "SELECT setting FROM pg_settings "
1993 : "WHERE name IN ('wal_level', 'max_replication_slots') "
1994 : "ORDER BY name DESC;");
1995 :
1996 4 : if (PQntuples(res) != 2)
1997 0 : pg_fatal("could not determine parameter settings on new cluster");
1998 :
1999 4 : wal_level = PQgetvalue(res, 0, 0);
2000 :
2001 4 : if (strcmp(wal_level, "logical") != 0)
2002 0 : pg_fatal("\"wal_level\" must be \"logical\" but is set to \"%s\"",
2003 : wal_level);
2004 :
2005 4 : max_replication_slots = atoi(PQgetvalue(res, 1, 0));
2006 :
2007 4 : if (nslots_on_old > max_replication_slots)
2008 2 : pg_fatal("\"max_replication_slots\" (%d) must be greater than or equal to the number of "
2009 : "logical replication slots (%d) on the old cluster",
2010 : max_replication_slots, nslots_on_old);
2011 :
2012 2 : PQclear(res);
2013 2 : PQfinish(conn);
2014 :
2015 2 : check_ok();
2016 : }
2017 :
2018 : /*
2019 : * check_new_cluster_subscription_configuration()
2020 : *
2021 : * Verify that the max_active_replication_origins configuration specified is
2022 : * enough for creating the subscriptions. This is required to create the
2023 : * replication origin for each subscription.
2024 : */
2025 : static void
2026 20 : check_new_cluster_subscription_configuration(void)
2027 : {
2028 : PGresult *res;
2029 : PGconn *conn;
2030 : int max_active_replication_origins;
2031 :
2032 : /* Subscriptions and their dependencies can be migrated since PG17. */
2033 20 : if (GET_MAJOR_VERSION(old_cluster.major_version) < 1700)
2034 0 : return;
2035 :
2036 : /* Quick return if there are no subscriptions to be migrated. */
2037 20 : if (old_cluster.nsubs == 0)
2038 16 : return;
2039 :
2040 4 : prep_status("Checking for new cluster configuration for subscriptions");
2041 :
2042 4 : conn = connectToServer(&new_cluster, "template1");
2043 :
2044 4 : res = executeQueryOrDie(conn, "SELECT setting FROM pg_settings "
2045 : "WHERE name = 'max_active_replication_origins';");
2046 :
2047 4 : if (PQntuples(res) != 1)
2048 0 : pg_fatal("could not determine parameter settings on new cluster");
2049 :
2050 4 : max_active_replication_origins = atoi(PQgetvalue(res, 0, 0));
2051 4 : if (old_cluster.nsubs > max_active_replication_origins)
2052 2 : pg_fatal("\"max_active_replication_origins\" (%d) must be greater than or equal to the number of "
2053 : "subscriptions (%d) on the old cluster",
2054 : max_active_replication_origins, old_cluster.nsubs);
2055 :
2056 2 : PQclear(res);
2057 2 : PQfinish(conn);
2058 :
2059 2 : check_ok();
2060 : }
2061 :
2062 : /*
2063 : * check_old_cluster_for_valid_slots()
2064 : *
2065 : * Verify that all the logical slots are valid and have consumed all the WAL
2066 : * before shutdown.
2067 : */
2068 : static void
2069 28 : check_old_cluster_for_valid_slots(void)
2070 : {
2071 : char output_path[MAXPGPATH];
2072 28 : FILE *script = NULL;
2073 :
2074 28 : prep_status("Checking for valid logical replication slots");
2075 :
2076 28 : snprintf(output_path, sizeof(output_path), "%s/%s",
2077 : log_opts.basedir,
2078 : "invalid_logical_slots.txt");
2079 :
2080 110 : for (int dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
2081 : {
2082 82 : LogicalSlotInfoArr *slot_arr = &old_cluster.dbarr.dbs[dbnum].slot_arr;
2083 :
2084 92 : for (int slotnum = 0; slotnum < slot_arr->nslots; slotnum++)
2085 : {
2086 10 : LogicalSlotInfo *slot = &slot_arr->slots[slotnum];
2087 :
2088 : /* Is the slot usable? */
2089 10 : if (slot->invalid)
2090 : {
2091 0 : if (script == NULL &&
2092 0 : (script = fopen_priv(output_path, "w")) == NULL)
2093 0 : pg_fatal("could not open file \"%s\": %m", output_path);
2094 :
2095 0 : fprintf(script, "The slot \"%s\" is invalid\n",
2096 : slot->slotname);
2097 :
2098 0 : continue;
2099 : }
2100 :
2101 : /*
2102 : * Do additional check to ensure that all logical replication
2103 : * slots have consumed all the WAL before shutdown.
2104 : *
2105 : * Note: This can be satisfied only when the old cluster has been
2106 : * shut down, so we skip this for live checks.
2107 : */
2108 10 : if (!user_opts.live_check && !slot->caught_up)
2109 : {
2110 6 : if (script == NULL &&
2111 2 : (script = fopen_priv(output_path, "w")) == NULL)
2112 0 : pg_fatal("could not open file \"%s\": %m", output_path);
2113 :
2114 4 : fprintf(script,
2115 : "The slot \"%s\" has not consumed the WAL yet\n",
2116 : slot->slotname);
2117 : }
2118 : }
2119 : }
2120 :
2121 28 : if (script)
2122 : {
2123 2 : fclose(script);
2124 :
2125 2 : pg_log(PG_REPORT, "fatal");
2126 2 : pg_fatal("Your installation contains logical replication slots that cannot be upgraded.\n"
2127 : "You can remove invalid slots and/or consume the pending WAL for other slots,\n"
2128 : "and then restart the upgrade.\n"
2129 : "A list of the problematic slots is in the file:\n"
2130 : " %s", output_path);
2131 : }
2132 :
2133 26 : check_ok();
2134 26 : }
2135 :
2136 : /*
2137 : * Callback function for processing results of query for
2138 : * check_old_cluster_subscription_state()'s UpgradeTask. If the query returned
2139 : * any rows (i.e., the check failed), write the details to the report file.
2140 : */
2141 : static void
2142 78 : process_old_sub_state_check(DbInfo *dbinfo, PGresult *res, void *arg)
2143 : {
2144 78 : UpgradeTaskReport *report = (UpgradeTaskReport *) arg;
2145 78 : int ntups = PQntuples(res);
2146 78 : int i_srsubstate = PQfnumber(res, "srsubstate");
2147 78 : int i_subname = PQfnumber(res, "subname");
2148 78 : int i_nspname = PQfnumber(res, "nspname");
2149 78 : int i_relname = PQfnumber(res, "relname");
2150 :
2151 : AssertVariableIsOfType(&process_old_sub_state_check, UpgradeTaskProcessCB);
2152 :
2153 78 : if (ntups == 0)
2154 76 : return;
2155 :
2156 2 : if (report->file == NULL &&
2157 0 : (report->file = fopen_priv(report->path, "w")) == NULL)
2158 0 : pg_fatal("could not open file \"%s\": %m", report->path);
2159 :
2160 4 : for (int i = 0; i < ntups; i++)
2161 2 : fprintf(report->file, "The table sync state \"%s\" is not allowed for database:\"%s\" subscription:\"%s\" schema:\"%s\" relation:\"%s\"\n",
2162 : PQgetvalue(res, i, i_srsubstate),
2163 : dbinfo->db_name,
2164 : PQgetvalue(res, i, i_subname),
2165 : PQgetvalue(res, i, i_nspname),
2166 : PQgetvalue(res, i, i_relname));
2167 : }
2168 :
2169 : /*
2170 : * check_old_cluster_subscription_state()
2171 : *
2172 : * Verify that the replication origin corresponding to each of the
2173 : * subscriptions are present and each of the subscribed tables is in
2174 : * 'i' (initialize) or 'r' (ready) state.
2175 : */
2176 : static void
2177 26 : check_old_cluster_subscription_state(void)
2178 : {
2179 26 : UpgradeTask *task = upgrade_task_create();
2180 : UpgradeTaskReport report;
2181 : const char *query;
2182 : PGresult *res;
2183 : PGconn *conn;
2184 : int ntup;
2185 :
2186 26 : prep_status("Checking for subscription state");
2187 :
2188 26 : report.file = NULL;
2189 26 : snprintf(report.path, sizeof(report.path), "%s/%s",
2190 : log_opts.basedir,
2191 : "subs_invalid.txt");
2192 :
2193 : /*
2194 : * Check that all the subscriptions have their respective replication
2195 : * origin. This check only needs to run once.
2196 : */
2197 26 : conn = connectToServer(&old_cluster, old_cluster.dbarr.dbs[0].db_name);
2198 26 : res = executeQueryOrDie(conn,
2199 : "SELECT d.datname, s.subname "
2200 : "FROM pg_catalog.pg_subscription s "
2201 : "LEFT OUTER JOIN pg_catalog.pg_replication_origin o "
2202 : " ON o.roname = 'pg_' || s.oid "
2203 : "INNER JOIN pg_catalog.pg_database d "
2204 : " ON d.oid = s.subdbid "
2205 : "WHERE o.roname IS NULL;");
2206 26 : ntup = PQntuples(res);
2207 28 : for (int i = 0; i < ntup; i++)
2208 : {
2209 2 : if (report.file == NULL &&
2210 2 : (report.file = fopen_priv(report.path, "w")) == NULL)
2211 0 : pg_fatal("could not open file \"%s\": %m", report.path);
2212 2 : fprintf(report.file, "The replication origin is missing for database:\"%s\" subscription:\"%s\"\n",
2213 : PQgetvalue(res, i, 0),
2214 : PQgetvalue(res, i, 1));
2215 : }
2216 26 : PQclear(res);
2217 26 : PQfinish(conn);
2218 :
2219 : /*
2220 : * We don't allow upgrade if there is a risk of dangling slot or origin
2221 : * corresponding to initial sync after upgrade.
2222 : *
2223 : * A slot/origin not created yet refers to the 'i' (initialize) state,
2224 : * while 'r' (ready) state refers to a slot/origin created previously but
2225 : * already dropped. These states are supported for pg_upgrade. The other
2226 : * states listed below are not supported:
2227 : *
2228 : * a) SUBREL_STATE_DATASYNC: A relation upgraded while in this state would
2229 : * retain a replication slot, which could not be dropped by the sync
2230 : * worker spawned after the upgrade because the subscription ID used for
2231 : * the slot name won't match anymore.
2232 : *
2233 : * b) SUBREL_STATE_SYNCDONE: A relation upgraded while in this state would
2234 : * retain the replication origin when there is a failure in tablesync
2235 : * worker immediately after dropping the replication slot in the
2236 : * publisher.
2237 : *
2238 : * c) SUBREL_STATE_FINISHEDCOPY: A tablesync worker spawned to work on a
2239 : * relation upgraded while in this state would expect an origin ID with
2240 : * the OID of the subscription used before the upgrade, causing it to
2241 : * fail.
2242 : *
2243 : * d) SUBREL_STATE_SYNCWAIT, SUBREL_STATE_CATCHUP and
2244 : * SUBREL_STATE_UNKNOWN: These states are not stored in the catalog, so we
2245 : * need not allow these states.
2246 : */
2247 26 : query = "SELECT r.srsubstate, s.subname, n.nspname, c.relname "
2248 : "FROM pg_catalog.pg_subscription_rel r "
2249 : "LEFT JOIN pg_catalog.pg_subscription s"
2250 : " ON r.srsubid = s.oid "
2251 : "LEFT JOIN pg_catalog.pg_class c"
2252 : " ON r.srrelid = c.oid "
2253 : "LEFT JOIN pg_catalog.pg_namespace n"
2254 : " ON c.relnamespace = n.oid "
2255 : "WHERE r.srsubstate NOT IN ('i', 'r') "
2256 : "ORDER BY s.subname";
2257 :
2258 26 : upgrade_task_add_step(task, query, process_old_sub_state_check,
2259 : true, &report);
2260 :
2261 26 : upgrade_task_run(task, &old_cluster);
2262 26 : upgrade_task_free(task);
2263 :
2264 26 : if (report.file)
2265 : {
2266 2 : fclose(report.file);
2267 2 : pg_log(PG_REPORT, "fatal");
2268 2 : pg_fatal("Your installation contains subscriptions without origin or having relations not in i (initialize) or r (ready) state.\n"
2269 : "You can allow the initial sync to finish for all relations and then restart the upgrade.\n"
2270 : "A list of the problematic subscriptions is in the file:\n"
2271 : " %s", report.path);
2272 : }
2273 : else
2274 24 : check_ok();
2275 24 : }
|