Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * pg_dump.c
4 : * pg_dump is a utility for dumping out a postgres database
5 : * into a script file.
6 : *
7 : * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
8 : * Portions Copyright (c) 1994, Regents of the University of California
9 : *
10 : * pg_dump will read the system catalogs in a database and dump out a
11 : * script that reproduces the schema in terms of SQL that is understood
12 : * by PostgreSQL
13 : *
14 : * Note that pg_dump runs in a transaction-snapshot mode transaction,
15 : * so it sees a consistent snapshot of the database including system
16 : * catalogs. However, it relies in part on various specialized backend
17 : * functions like pg_get_indexdef(), and those things tend to look at
18 : * the currently committed state. So it is possible to get 'cache
19 : * lookup failed' error if someone performs DDL changes while a dump is
20 : * happening. The window for this sort of thing is from the acquisition
21 : * of the transaction snapshot to getSchemaData() (when pg_dump acquires
22 : * AccessShareLock on every table it intends to dump). It isn't very large,
23 : * but it can happen.
24 : *
25 : * http://archives.postgresql.org/pgsql-bugs/2010-02/msg00187.php
26 : *
27 : * IDENTIFICATION
28 : * src/bin/pg_dump/pg_dump.c
29 : *
30 : *-------------------------------------------------------------------------
31 : */
32 : #include "postgres_fe.h"
33 :
34 : #include <unistd.h>
35 : #include <ctype.h>
36 : #include <limits.h>
37 : #ifdef HAVE_TERMIOS_H
38 : #include <termios.h>
39 : #endif
40 :
41 : #include "access/attnum.h"
42 : #include "access/sysattr.h"
43 : #include "access/transam.h"
44 : #include "catalog/pg_aggregate_d.h"
45 : #include "catalog/pg_am_d.h"
46 : #include "catalog/pg_attribute_d.h"
47 : #include "catalog/pg_authid_d.h"
48 : #include "catalog/pg_cast_d.h"
49 : #include "catalog/pg_class_d.h"
50 : #include "catalog/pg_default_acl_d.h"
51 : #include "catalog/pg_largeobject_d.h"
52 : #include "catalog/pg_largeobject_metadata_d.h"
53 : #include "catalog/pg_proc_d.h"
54 : #include "catalog/pg_subscription.h"
55 : #include "catalog/pg_trigger_d.h"
56 : #include "catalog/pg_type_d.h"
57 : #include "common/connect.h"
58 : #include "common/int.h"
59 : #include "common/relpath.h"
60 : #include "compress_io.h"
61 : #include "dumputils.h"
62 : #include "fe_utils/option_utils.h"
63 : #include "fe_utils/string_utils.h"
64 : #include "filter.h"
65 : #include "getopt_long.h"
66 : #include "libpq/libpq-fs.h"
67 : #include "parallel.h"
68 : #include "pg_backup_db.h"
69 : #include "pg_backup_utils.h"
70 : #include "pg_dump.h"
71 : #include "storage/block.h"
72 :
73 : typedef struct
74 : {
75 : Oid roleoid; /* role's OID */
76 : const char *rolename; /* role's name */
77 : } RoleNameItem;
78 :
79 : typedef struct
80 : {
81 : const char *descr; /* comment for an object */
82 : Oid classoid; /* object class (catalog OID) */
83 : Oid objoid; /* object OID */
84 : int objsubid; /* subobject (table column #) */
85 : } CommentItem;
86 :
87 : typedef struct
88 : {
89 : const char *provider; /* label provider of this security label */
90 : const char *label; /* security label for an object */
91 : Oid classoid; /* object class (catalog OID) */
92 : Oid objoid; /* object OID */
93 : int objsubid; /* subobject (table column #) */
94 : } SecLabelItem;
95 :
96 : typedef struct
97 : {
98 : Oid oid; /* object OID */
99 : char relkind; /* object kind */
100 : RelFileNumber relfilenumber; /* object filenode */
101 : Oid toast_oid; /* toast table OID */
102 : RelFileNumber toast_relfilenumber; /* toast table filenode */
103 : Oid toast_index_oid; /* toast table index OID */
104 : RelFileNumber toast_index_relfilenumber; /* toast table index filenode */
105 : } BinaryUpgradeClassOidItem;
106 :
107 : /* sequence types */
108 : typedef enum SeqType
109 : {
110 : SEQTYPE_SMALLINT,
111 : SEQTYPE_INTEGER,
112 : SEQTYPE_BIGINT,
113 : } SeqType;
114 :
115 : static const char *const SeqTypeNames[] =
116 : {
117 : [SEQTYPE_SMALLINT] = "smallint",
118 : [SEQTYPE_INTEGER] = "integer",
119 : [SEQTYPE_BIGINT] = "bigint",
120 : };
121 :
122 : StaticAssertDecl(lengthof(SeqTypeNames) == (SEQTYPE_BIGINT + 1),
123 : "array length mismatch");
124 :
125 : typedef struct
126 : {
127 : Oid oid; /* sequence OID */
128 : SeqType seqtype; /* data type of sequence */
129 : bool cycled; /* whether sequence cycles */
130 : int64 minv; /* minimum value */
131 : int64 maxv; /* maximum value */
132 : int64 startv; /* start value */
133 : int64 incby; /* increment value */
134 : int64 cache; /* cache size */
135 : int64 last_value; /* last value of sequence */
136 : bool is_called; /* whether nextval advances before returning */
137 : } SequenceItem;
138 :
139 : typedef enum OidOptions
140 : {
141 : zeroIsError = 1,
142 : zeroAsStar = 2,
143 : zeroAsNone = 4,
144 : } OidOptions;
145 :
146 : /* global decls */
147 : static bool dosync = true; /* Issue fsync() to make dump durable on disk. */
148 :
149 : static Oid g_last_builtin_oid; /* value of the last builtin oid */
150 :
151 : /* The specified names/patterns should to match at least one entity */
152 : static int strict_names = 0;
153 :
154 : static pg_compress_algorithm compression_algorithm = PG_COMPRESSION_NONE;
155 :
156 : /*
157 : * Object inclusion/exclusion lists
158 : *
159 : * The string lists record the patterns given by command-line switches,
160 : * which we then convert to lists of OIDs of matching objects.
161 : */
162 : static SimpleStringList schema_include_patterns = {NULL, NULL};
163 : static SimpleOidList schema_include_oids = {NULL, NULL};
164 : static SimpleStringList schema_exclude_patterns = {NULL, NULL};
165 : static SimpleOidList schema_exclude_oids = {NULL, NULL};
166 :
167 : static SimpleStringList table_include_patterns = {NULL, NULL};
168 : static SimpleStringList table_include_patterns_and_children = {NULL, NULL};
169 : static SimpleOidList table_include_oids = {NULL, NULL};
170 : static SimpleStringList table_exclude_patterns = {NULL, NULL};
171 : static SimpleStringList table_exclude_patterns_and_children = {NULL, NULL};
172 : static SimpleOidList table_exclude_oids = {NULL, NULL};
173 : static SimpleStringList tabledata_exclude_patterns = {NULL, NULL};
174 : static SimpleStringList tabledata_exclude_patterns_and_children = {NULL, NULL};
175 : static SimpleOidList tabledata_exclude_oids = {NULL, NULL};
176 :
177 : static SimpleStringList foreign_servers_include_patterns = {NULL, NULL};
178 : static SimpleOidList foreign_servers_include_oids = {NULL, NULL};
179 :
180 : static SimpleStringList extension_include_patterns = {NULL, NULL};
181 : static SimpleOidList extension_include_oids = {NULL, NULL};
182 :
183 : static SimpleStringList extension_exclude_patterns = {NULL, NULL};
184 : static SimpleOidList extension_exclude_oids = {NULL, NULL};
185 :
186 : static const CatalogId nilCatalogId = {0, 0};
187 :
188 : /* override for standard extra_float_digits setting */
189 : static bool have_extra_float_digits = false;
190 : static int extra_float_digits;
191 :
192 : /* sorted table of role names */
193 : static RoleNameItem *rolenames = NULL;
194 : static int nrolenames = 0;
195 :
196 : /* sorted table of comments */
197 : static CommentItem *comments = NULL;
198 : static int ncomments = 0;
199 :
200 : /* sorted table of security labels */
201 : static SecLabelItem *seclabels = NULL;
202 : static int nseclabels = 0;
203 :
204 : /* sorted table of pg_class information for binary upgrade */
205 : static BinaryUpgradeClassOidItem *binaryUpgradeClassOids = NULL;
206 : static int nbinaryUpgradeClassOids = 0;
207 :
208 : /* sorted table of sequences */
209 : static SequenceItem *sequences = NULL;
210 : static int nsequences = 0;
211 :
212 : /*
213 : * The default number of rows per INSERT when
214 : * --inserts is specified without --rows-per-insert
215 : */
216 : #define DUMP_DEFAULT_ROWS_PER_INSERT 1
217 :
218 : /*
219 : * Maximum number of large objects to group into a single ArchiveEntry.
220 : * At some point we might want to make this user-controllable, but for now
221 : * a hard-wired setting will suffice.
222 : */
223 : #define MAX_BLOBS_PER_ARCHIVE_ENTRY 1000
224 :
225 : /*
226 : * Macro for producing quoted, schema-qualified name of a dumpable object.
227 : */
228 : #define fmtQualifiedDumpable(obj) \
229 : fmtQualifiedId((obj)->dobj.namespace->dobj.name, \
230 : (obj)->dobj.name)
231 :
232 : static void help(const char *progname);
233 : static void setup_connection(Archive *AH,
234 : const char *dumpencoding, const char *dumpsnapshot,
235 : char *use_role);
236 : static ArchiveFormat parseArchiveFormat(const char *format, ArchiveMode *mode);
237 : static void expand_schema_name_patterns(Archive *fout,
238 : SimpleStringList *patterns,
239 : SimpleOidList *oids,
240 : bool strict_names);
241 : static void expand_extension_name_patterns(Archive *fout,
242 : SimpleStringList *patterns,
243 : SimpleOidList *oids,
244 : bool strict_names);
245 : static void expand_foreign_server_name_patterns(Archive *fout,
246 : SimpleStringList *patterns,
247 : SimpleOidList *oids);
248 : static void expand_table_name_patterns(Archive *fout,
249 : SimpleStringList *patterns,
250 : SimpleOidList *oids,
251 : bool strict_names,
252 : bool with_child_tables);
253 : static void prohibit_crossdb_refs(PGconn *conn, const char *dbname,
254 : const char *pattern);
255 :
256 : static NamespaceInfo *findNamespace(Oid nsoid);
257 : static void dumpTableData(Archive *fout, const TableDataInfo *tdinfo);
258 : static void refreshMatViewData(Archive *fout, const TableDataInfo *tdinfo);
259 : static const char *getRoleName(const char *roleoid_str);
260 : static void collectRoleNames(Archive *fout);
261 : static void getAdditionalACLs(Archive *fout);
262 : static void dumpCommentExtended(Archive *fout, const char *type,
263 : const char *name, const char *namespace,
264 : const char *owner, CatalogId catalogId,
265 : int subid, DumpId dumpId,
266 : const char *initdb_comment);
267 : static inline void dumpComment(Archive *fout, const char *type,
268 : const char *name, const char *namespace,
269 : const char *owner, CatalogId catalogId,
270 : int subid, DumpId dumpId);
271 : static int findComments(Oid classoid, Oid objoid, CommentItem **items);
272 : static void collectComments(Archive *fout);
273 : static void dumpSecLabel(Archive *fout, const char *type, const char *name,
274 : const char *namespace, const char *owner,
275 : CatalogId catalogId, int subid, DumpId dumpId);
276 : static int findSecLabels(Oid classoid, Oid objoid, SecLabelItem **items);
277 : static void collectSecLabels(Archive *fout);
278 : static void dumpDumpableObject(Archive *fout, DumpableObject *dobj);
279 : static void dumpNamespace(Archive *fout, const NamespaceInfo *nspinfo);
280 : static void dumpExtension(Archive *fout, const ExtensionInfo *extinfo);
281 : static void dumpType(Archive *fout, const TypeInfo *tyinfo);
282 : static void dumpBaseType(Archive *fout, const TypeInfo *tyinfo);
283 : static void dumpEnumType(Archive *fout, const TypeInfo *tyinfo);
284 : static void dumpRangeType(Archive *fout, const TypeInfo *tyinfo);
285 : static void dumpUndefinedType(Archive *fout, const TypeInfo *tyinfo);
286 : static void dumpDomain(Archive *fout, const TypeInfo *tyinfo);
287 : static void dumpCompositeType(Archive *fout, const TypeInfo *tyinfo);
288 : static void dumpCompositeTypeColComments(Archive *fout, const TypeInfo *tyinfo,
289 : PGresult *res);
290 : static void dumpShellType(Archive *fout, const ShellTypeInfo *stinfo);
291 : static void dumpProcLang(Archive *fout, const ProcLangInfo *plang);
292 : static void dumpFunc(Archive *fout, const FuncInfo *finfo);
293 : static void dumpCast(Archive *fout, const CastInfo *cast);
294 : static void dumpTransform(Archive *fout, const TransformInfo *transform);
295 : static void dumpOpr(Archive *fout, const OprInfo *oprinfo);
296 : static void dumpAccessMethod(Archive *fout, const AccessMethodInfo *aminfo);
297 : static void dumpOpclass(Archive *fout, const OpclassInfo *opcinfo);
298 : static void dumpOpfamily(Archive *fout, const OpfamilyInfo *opfinfo);
299 : static void dumpCollation(Archive *fout, const CollInfo *collinfo);
300 : static void dumpConversion(Archive *fout, const ConvInfo *convinfo);
301 : static void dumpRule(Archive *fout, const RuleInfo *rinfo);
302 : static void dumpAgg(Archive *fout, const AggInfo *agginfo);
303 : static void dumpTrigger(Archive *fout, const TriggerInfo *tginfo);
304 : static void dumpEventTrigger(Archive *fout, const EventTriggerInfo *evtinfo);
305 : static void dumpTable(Archive *fout, const TableInfo *tbinfo);
306 : static void dumpTableSchema(Archive *fout, const TableInfo *tbinfo);
307 : static void dumpTableAttach(Archive *fout, const TableAttachInfo *attachinfo);
308 : static void dumpAttrDef(Archive *fout, const AttrDefInfo *adinfo);
309 : static void collectSequences(Archive *fout);
310 : static void dumpSequence(Archive *fout, const TableInfo *tbinfo);
311 : static void dumpSequenceData(Archive *fout, const TableDataInfo *tdinfo);
312 : static void dumpIndex(Archive *fout, const IndxInfo *indxinfo);
313 : static void dumpIndexAttach(Archive *fout, const IndexAttachInfo *attachinfo);
314 : static void dumpStatisticsExt(Archive *fout, const StatsExtInfo *statsextinfo);
315 : static void dumpConstraint(Archive *fout, const ConstraintInfo *coninfo);
316 : static void dumpTableConstraintComment(Archive *fout, const ConstraintInfo *coninfo);
317 : static void dumpTSParser(Archive *fout, const TSParserInfo *prsinfo);
318 : static void dumpTSDictionary(Archive *fout, const TSDictInfo *dictinfo);
319 : static void dumpTSTemplate(Archive *fout, const TSTemplateInfo *tmplinfo);
320 : static void dumpTSConfig(Archive *fout, const TSConfigInfo *cfginfo);
321 : static void dumpForeignDataWrapper(Archive *fout, const FdwInfo *fdwinfo);
322 : static void dumpForeignServer(Archive *fout, const ForeignServerInfo *srvinfo);
323 : static void dumpUserMappings(Archive *fout,
324 : const char *servername, const char *namespace,
325 : const char *owner, CatalogId catalogId, DumpId dumpId);
326 : static void dumpDefaultACL(Archive *fout, const DefaultACLInfo *daclinfo);
327 :
328 : static DumpId dumpACL(Archive *fout, DumpId objDumpId, DumpId altDumpId,
329 : const char *type, const char *name, const char *subname,
330 : const char *nspname, const char *tag, const char *owner,
331 : const DumpableAcl *dacl);
332 :
333 : static void getDependencies(Archive *fout);
334 : static void BuildArchiveDependencies(Archive *fout);
335 : static void findDumpableDependencies(ArchiveHandle *AH, const DumpableObject *dobj,
336 : DumpId **dependencies, int *nDeps, int *allocDeps);
337 :
338 : static DumpableObject *createBoundaryObjects(void);
339 : static void addBoundaryDependencies(DumpableObject **dobjs, int numObjs,
340 : DumpableObject *boundaryObjs);
341 :
342 : static void addConstrChildIdxDeps(DumpableObject *dobj, const IndxInfo *refidx);
343 : static void getDomainConstraints(Archive *fout, TypeInfo *tyinfo);
344 : static void getTableData(DumpOptions *dopt, TableInfo *tblinfo, int numTables, char relkind);
345 : static void makeTableDataInfo(DumpOptions *dopt, TableInfo *tbinfo);
346 : static void buildMatViewRefreshDependencies(Archive *fout);
347 : static void getTableDataFKConstraints(void);
348 : static char *format_function_arguments(const FuncInfo *finfo, const char *funcargs,
349 : bool is_agg);
350 : static char *format_function_signature(Archive *fout,
351 : const FuncInfo *finfo, bool honor_quotes);
352 : static char *convertRegProcReference(const char *proc);
353 : static char *getFormattedOperatorName(const char *oproid);
354 : static char *convertTSFunction(Archive *fout, Oid funcOid);
355 : static const char *getFormattedTypeName(Archive *fout, Oid oid, OidOptions opts);
356 : static void getLOs(Archive *fout);
357 : static void dumpLO(Archive *fout, const LoInfo *loinfo);
358 : static int dumpLOs(Archive *fout, const void *arg);
359 : static void dumpPolicy(Archive *fout, const PolicyInfo *polinfo);
360 : static void dumpPublication(Archive *fout, const PublicationInfo *pubinfo);
361 : static void dumpPublicationTable(Archive *fout, const PublicationRelInfo *pubrinfo);
362 : static void dumpSubscription(Archive *fout, const SubscriptionInfo *subinfo);
363 : static void dumpSubscriptionTable(Archive *fout, const SubRelInfo *subrinfo);
364 : static void dumpDatabase(Archive *fout);
365 : static void dumpDatabaseConfig(Archive *AH, PQExpBuffer outbuf,
366 : const char *dbname, Oid dboid);
367 : static void dumpEncoding(Archive *AH);
368 : static void dumpStdStrings(Archive *AH);
369 : static void dumpSearchPath(Archive *AH);
370 : static void binary_upgrade_set_type_oids_by_type_oid(Archive *fout,
371 : PQExpBuffer upgrade_buffer,
372 : Oid pg_type_oid,
373 : bool force_array_type,
374 : bool include_multirange_type);
375 : static void binary_upgrade_set_type_oids_by_rel(Archive *fout,
376 : PQExpBuffer upgrade_buffer,
377 : const TableInfo *tbinfo);
378 : static void collectBinaryUpgradeClassOids(Archive *fout);
379 : static void binary_upgrade_set_pg_class_oids(Archive *fout,
380 : PQExpBuffer upgrade_buffer,
381 : Oid pg_class_oid);
382 : static void binary_upgrade_extension_member(PQExpBuffer upgrade_buffer,
383 : const DumpableObject *dobj,
384 : const char *objtype,
385 : const char *objname,
386 : const char *objnamespace);
387 : static const char *getAttrName(int attrnum, const TableInfo *tblInfo);
388 : static const char *fmtCopyColumnList(const TableInfo *ti, PQExpBuffer buffer);
389 : static bool nonemptyReloptions(const char *reloptions);
390 : static void appendReloptionsArrayAH(PQExpBuffer buffer, const char *reloptions,
391 : const char *prefix, Archive *fout);
392 : static char *get_synchronized_snapshot(Archive *fout);
393 : static void set_restrict_relation_kind(Archive *AH, const char *value);
394 : static void setupDumpWorker(Archive *AH);
395 : static TableInfo *getRootTableInfo(const TableInfo *tbinfo);
396 : static bool forcePartitionRootLoad(const TableInfo *tbinfo);
397 : static void read_dump_filters(const char *filename, DumpOptions *dopt);
398 :
399 :
400 : int
401 486 : main(int argc, char **argv)
402 : {
403 : int c;
404 486 : const char *filename = NULL;
405 486 : const char *format = "p";
406 : TableInfo *tblinfo;
407 : int numTables;
408 : DumpableObject **dobjs;
409 : int numObjs;
410 : DumpableObject *boundaryObjs;
411 : int i;
412 : int optindex;
413 : RestoreOptions *ropt;
414 : Archive *fout; /* the script file */
415 486 : bool g_verbose = false;
416 486 : const char *dumpencoding = NULL;
417 486 : const char *dumpsnapshot = NULL;
418 486 : char *use_role = NULL;
419 486 : int numWorkers = 1;
420 486 : int plainText = 0;
421 486 : ArchiveFormat archiveFormat = archUnknown;
422 : ArchiveMode archiveMode;
423 486 : pg_compress_specification compression_spec = {0};
424 486 : char *compression_detail = NULL;
425 486 : char *compression_algorithm_str = "none";
426 486 : char *error_detail = NULL;
427 486 : bool user_compression_defined = false;
428 486 : DataDirSyncMethod sync_method = DATA_DIR_SYNC_METHOD_FSYNC;
429 :
430 : static DumpOptions dopt;
431 :
432 : static struct option long_options[] = {
433 : {"data-only", no_argument, NULL, 'a'},
434 : {"blobs", no_argument, NULL, 'b'},
435 : {"large-objects", no_argument, NULL, 'b'},
436 : {"no-blobs", no_argument, NULL, 'B'},
437 : {"no-large-objects", no_argument, NULL, 'B'},
438 : {"clean", no_argument, NULL, 'c'},
439 : {"create", no_argument, NULL, 'C'},
440 : {"dbname", required_argument, NULL, 'd'},
441 : {"extension", required_argument, NULL, 'e'},
442 : {"file", required_argument, NULL, 'f'},
443 : {"format", required_argument, NULL, 'F'},
444 : {"host", required_argument, NULL, 'h'},
445 : {"jobs", 1, NULL, 'j'},
446 : {"no-reconnect", no_argument, NULL, 'R'},
447 : {"no-owner", no_argument, NULL, 'O'},
448 : {"port", required_argument, NULL, 'p'},
449 : {"schema", required_argument, NULL, 'n'},
450 : {"exclude-schema", required_argument, NULL, 'N'},
451 : {"schema-only", no_argument, NULL, 's'},
452 : {"superuser", required_argument, NULL, 'S'},
453 : {"table", required_argument, NULL, 't'},
454 : {"exclude-table", required_argument, NULL, 'T'},
455 : {"no-password", no_argument, NULL, 'w'},
456 : {"password", no_argument, NULL, 'W'},
457 : {"username", required_argument, NULL, 'U'},
458 : {"verbose", no_argument, NULL, 'v'},
459 : {"no-privileges", no_argument, NULL, 'x'},
460 : {"no-acl", no_argument, NULL, 'x'},
461 : {"compress", required_argument, NULL, 'Z'},
462 : {"encoding", required_argument, NULL, 'E'},
463 : {"help", no_argument, NULL, '?'},
464 : {"version", no_argument, NULL, 'V'},
465 :
466 : /*
467 : * the following options don't have an equivalent short option letter
468 : */
469 : {"attribute-inserts", no_argument, &dopt.column_inserts, 1},
470 : {"binary-upgrade", no_argument, &dopt.binary_upgrade, 1},
471 : {"column-inserts", no_argument, &dopt.column_inserts, 1},
472 : {"disable-dollar-quoting", no_argument, &dopt.disable_dollar_quoting, 1},
473 : {"disable-triggers", no_argument, &dopt.disable_triggers, 1},
474 : {"enable-row-security", no_argument, &dopt.enable_row_security, 1},
475 : {"exclude-table-data", required_argument, NULL, 4},
476 : {"extra-float-digits", required_argument, NULL, 8},
477 : {"if-exists", no_argument, &dopt.if_exists, 1},
478 : {"inserts", no_argument, NULL, 9},
479 : {"lock-wait-timeout", required_argument, NULL, 2},
480 : {"no-table-access-method", no_argument, &dopt.outputNoTableAm, 1},
481 : {"no-tablespaces", no_argument, &dopt.outputNoTablespaces, 1},
482 : {"quote-all-identifiers", no_argument, "e_all_identifiers, 1},
483 : {"load-via-partition-root", no_argument, &dopt.load_via_partition_root, 1},
484 : {"role", required_argument, NULL, 3},
485 : {"section", required_argument, NULL, 5},
486 : {"serializable-deferrable", no_argument, &dopt.serializable_deferrable, 1},
487 : {"snapshot", required_argument, NULL, 6},
488 : {"strict-names", no_argument, &strict_names, 1},
489 : {"use-set-session-authorization", no_argument, &dopt.use_setsessauth, 1},
490 : {"no-comments", no_argument, &dopt.no_comments, 1},
491 : {"no-publications", no_argument, &dopt.no_publications, 1},
492 : {"no-security-labels", no_argument, &dopt.no_security_labels, 1},
493 : {"no-subscriptions", no_argument, &dopt.no_subscriptions, 1},
494 : {"no-toast-compression", no_argument, &dopt.no_toast_compression, 1},
495 : {"no-unlogged-table-data", no_argument, &dopt.no_unlogged_table_data, 1},
496 : {"no-sync", no_argument, NULL, 7},
497 : {"on-conflict-do-nothing", no_argument, &dopt.do_nothing, 1},
498 : {"rows-per-insert", required_argument, NULL, 10},
499 : {"include-foreign-data", required_argument, NULL, 11},
500 : {"table-and-children", required_argument, NULL, 12},
501 : {"exclude-table-and-children", required_argument, NULL, 13},
502 : {"exclude-table-data-and-children", required_argument, NULL, 14},
503 : {"sync-method", required_argument, NULL, 15},
504 : {"filter", required_argument, NULL, 16},
505 : {"exclude-extension", required_argument, NULL, 17},
506 :
507 : {NULL, 0, NULL, 0}
508 : };
509 :
510 486 : pg_logging_init(argv[0]);
511 486 : pg_logging_set_level(PG_LOG_WARNING);
512 486 : set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("pg_dump"));
513 :
514 : /*
515 : * Initialize what we need for parallel execution, especially for thread
516 : * support on Windows.
517 : */
518 486 : init_parallel_dump_utils();
519 :
520 486 : progname = get_progname(argv[0]);
521 :
522 486 : if (argc > 1)
523 : {
524 486 : if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0)
525 : {
526 2 : help(progname);
527 2 : exit_nicely(0);
528 : }
529 484 : if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0)
530 : {
531 94 : puts("pg_dump (PostgreSQL) " PG_VERSION);
532 94 : exit_nicely(0);
533 : }
534 : }
535 :
536 390 : InitDumpOptions(&dopt);
537 :
538 1712 : while ((c = getopt_long(argc, argv, "abBcCd:e:E:f:F:h:j:n:N:Op:RsS:t:T:U:vwWxZ:",
539 : long_options, &optindex)) != -1)
540 : {
541 1338 : switch (c)
542 : {
543 16 : case 'a': /* Dump data only */
544 16 : dopt.dataOnly = true;
545 16 : break;
546 :
547 2 : case 'b': /* Dump LOs */
548 2 : dopt.outputLOs = true;
549 2 : break;
550 :
551 4 : case 'B': /* Don't dump LOs */
552 4 : dopt.dontOutputLOs = true;
553 4 : break;
554 :
555 12 : case 'c': /* clean (i.e., drop) schema prior to create */
556 12 : dopt.outputClean = 1;
557 12 : break;
558 :
559 58 : case 'C': /* Create DB */
560 58 : dopt.outputCreateDB = 1;
561 58 : break;
562 :
563 10 : case 'd': /* database name */
564 10 : dopt.cparams.dbname = pg_strdup(optarg);
565 10 : break;
566 :
567 8 : case 'e': /* include extension(s) */
568 8 : simple_string_list_append(&extension_include_patterns, optarg);
569 8 : dopt.include_everything = false;
570 8 : break;
571 :
572 4 : case 'E': /* Dump encoding */
573 4 : dumpencoding = pg_strdup(optarg);
574 4 : break;
575 :
576 306 : case 'f':
577 306 : filename = pg_strdup(optarg);
578 306 : break;
579 :
580 170 : case 'F':
581 170 : format = pg_strdup(optarg);
582 170 : break;
583 :
584 24 : case 'h': /* server host */
585 24 : dopt.cparams.pghost = pg_strdup(optarg);
586 24 : break;
587 :
588 22 : case 'j': /* number of dump jobs */
589 22 : if (!option_parse_int(optarg, "-j/--jobs", 1,
590 : PG_MAX_JOBS,
591 : &numWorkers))
592 2 : exit_nicely(1);
593 20 : break;
594 :
595 34 : case 'n': /* include schema(s) */
596 34 : simple_string_list_append(&schema_include_patterns, optarg);
597 34 : dopt.include_everything = false;
598 34 : break;
599 :
600 2 : case 'N': /* exclude schema(s) */
601 2 : simple_string_list_append(&schema_exclude_patterns, optarg);
602 2 : break;
603 :
604 4 : case 'O': /* Don't reconnect to match owner */
605 4 : dopt.outputNoOwner = 1;
606 4 : break;
607 :
608 100 : case 'p': /* server port */
609 100 : dopt.cparams.pgport = pg_strdup(optarg);
610 100 : break;
611 :
612 4 : case 'R':
613 : /* no-op, still accepted for backwards compatibility */
614 4 : break;
615 :
616 36 : case 's': /* dump schema only */
617 36 : dopt.schemaOnly = true;
618 36 : break;
619 :
620 2 : case 'S': /* Username for superuser in plain text output */
621 2 : dopt.outputSuperuser = pg_strdup(optarg);
622 2 : break;
623 :
624 16 : case 't': /* include table(s) */
625 16 : simple_string_list_append(&table_include_patterns, optarg);
626 16 : dopt.include_everything = false;
627 16 : break;
628 :
629 8 : case 'T': /* exclude table(s) */
630 8 : simple_string_list_append(&table_exclude_patterns, optarg);
631 8 : break;
632 :
633 28 : case 'U':
634 28 : dopt.cparams.username = pg_strdup(optarg);
635 28 : break;
636 :
637 12 : case 'v': /* verbose */
638 12 : g_verbose = true;
639 12 : pg_logging_increase_verbosity();
640 12 : break;
641 :
642 2 : case 'w':
643 2 : dopt.cparams.promptPassword = TRI_NO;
644 2 : break;
645 :
646 0 : case 'W':
647 0 : dopt.cparams.promptPassword = TRI_YES;
648 0 : break;
649 :
650 4 : case 'x': /* skip ACL dump */
651 4 : dopt.aclsSkip = true;
652 4 : break;
653 :
654 24 : case 'Z': /* Compression */
655 24 : parse_compress_options(optarg, &compression_algorithm_str,
656 : &compression_detail);
657 24 : user_compression_defined = true;
658 24 : break;
659 :
660 100 : case 0:
661 : /* This covers the long options. */
662 100 : break;
663 :
664 4 : case 2: /* lock-wait-timeout */
665 4 : dopt.lockWaitTimeout = pg_strdup(optarg);
666 4 : break;
667 :
668 6 : case 3: /* SET ROLE */
669 6 : use_role = pg_strdup(optarg);
670 6 : break;
671 :
672 2 : case 4: /* exclude table(s) data */
673 2 : simple_string_list_append(&tabledata_exclude_patterns, optarg);
674 2 : break;
675 :
676 12 : case 5: /* section */
677 12 : set_dump_section(optarg, &dopt.dumpSections);
678 12 : break;
679 :
680 0 : case 6: /* snapshot */
681 0 : dumpsnapshot = pg_strdup(optarg);
682 0 : break;
683 :
684 222 : case 7: /* no-sync */
685 222 : dosync = false;
686 222 : break;
687 :
688 2 : case 8:
689 2 : have_extra_float_digits = true;
690 2 : if (!option_parse_int(optarg, "--extra-float-digits", -15, 3,
691 : &extra_float_digits))
692 2 : exit_nicely(1);
693 0 : break;
694 :
695 4 : case 9: /* inserts */
696 :
697 : /*
698 : * dump_inserts also stores --rows-per-insert, careful not to
699 : * overwrite that.
700 : */
701 4 : if (dopt.dump_inserts == 0)
702 4 : dopt.dump_inserts = DUMP_DEFAULT_ROWS_PER_INSERT;
703 4 : break;
704 :
705 4 : case 10: /* rows per insert */
706 4 : if (!option_parse_int(optarg, "--rows-per-insert", 1, INT_MAX,
707 : &dopt.dump_inserts))
708 2 : exit_nicely(1);
709 2 : break;
710 :
711 8 : case 11: /* include foreign data */
712 8 : simple_string_list_append(&foreign_servers_include_patterns,
713 : optarg);
714 8 : break;
715 :
716 2 : case 12: /* include table(s) and their children */
717 2 : simple_string_list_append(&table_include_patterns_and_children,
718 : optarg);
719 2 : dopt.include_everything = false;
720 2 : break;
721 :
722 2 : case 13: /* exclude table(s) and their children */
723 2 : simple_string_list_append(&table_exclude_patterns_and_children,
724 : optarg);
725 2 : break;
726 :
727 2 : case 14: /* exclude data of table(s) and children */
728 2 : simple_string_list_append(&tabledata_exclude_patterns_and_children,
729 : optarg);
730 2 : break;
731 :
732 0 : case 15:
733 0 : if (!parse_sync_method(optarg, &sync_method))
734 0 : exit_nicely(1);
735 0 : break;
736 :
737 52 : case 16: /* read object filters from file */
738 52 : read_dump_filters(optarg, &dopt);
739 44 : break;
740 :
741 2 : case 17: /* exclude extension(s) */
742 2 : simple_string_list_append(&extension_exclude_patterns,
743 : optarg);
744 2 : break;
745 :
746 2 : default:
747 : /* getopt_long already emitted a complaint */
748 2 : pg_log_error_hint("Try \"%s --help\" for more information.", progname);
749 2 : exit_nicely(1);
750 : }
751 : }
752 :
753 : /*
754 : * Non-option argument specifies database name as long as it wasn't
755 : * already specified with -d / --dbname
756 : */
757 374 : if (optind < argc && dopt.cparams.dbname == NULL)
758 310 : dopt.cparams.dbname = argv[optind++];
759 :
760 : /* Complain if any arguments remain */
761 374 : if (optind < argc)
762 : {
763 2 : pg_log_error("too many command-line arguments (first is \"%s\")",
764 : argv[optind]);
765 2 : pg_log_error_hint("Try \"%s --help\" for more information.", progname);
766 2 : exit_nicely(1);
767 : }
768 :
769 : /* --column-inserts implies --inserts */
770 372 : if (dopt.column_inserts && dopt.dump_inserts == 0)
771 2 : dopt.dump_inserts = DUMP_DEFAULT_ROWS_PER_INSERT;
772 :
773 : /*
774 : * Binary upgrade mode implies dumping sequence data even in schema-only
775 : * mode. This is not exposed as a separate option, but kept separate
776 : * internally for clarity.
777 : */
778 372 : if (dopt.binary_upgrade)
779 28 : dopt.sequence_data = 1;
780 :
781 372 : if (dopt.dataOnly && dopt.schemaOnly)
782 2 : pg_fatal("options -s/--schema-only and -a/--data-only cannot be used together");
783 :
784 370 : if (dopt.schemaOnly && foreign_servers_include_patterns.head != NULL)
785 2 : pg_fatal("options -s/--schema-only and --include-foreign-data cannot be used together");
786 :
787 368 : if (numWorkers > 1 && foreign_servers_include_patterns.head != NULL)
788 2 : pg_fatal("option --include-foreign-data is not supported with parallel backup");
789 :
790 366 : if (dopt.dataOnly && dopt.outputClean)
791 2 : pg_fatal("options -c/--clean and -a/--data-only cannot be used together");
792 :
793 364 : if (dopt.if_exists && !dopt.outputClean)
794 2 : pg_fatal("option --if-exists requires option -c/--clean");
795 :
796 : /*
797 : * --inserts are already implied above if --column-inserts or
798 : * --rows-per-insert were specified.
799 : */
800 362 : if (dopt.do_nothing && dopt.dump_inserts == 0)
801 2 : pg_fatal("option --on-conflict-do-nothing requires option --inserts, --rows-per-insert, or --column-inserts");
802 :
803 : /* Identify archive format to emit */
804 360 : archiveFormat = parseArchiveFormat(format, &archiveMode);
805 :
806 : /* archiveFormat specific setup */
807 358 : if (archiveFormat == archNull)
808 294 : plainText = 1;
809 :
810 : /*
811 : * Custom and directory formats are compressed by default with gzip when
812 : * available, not the others. If gzip is not available, no compression is
813 : * done by default.
814 : */
815 358 : if ((archiveFormat == archCustom || archiveFormat == archDirectory) &&
816 58 : !user_compression_defined)
817 : {
818 : #ifdef HAVE_LIBZ
819 48 : compression_algorithm_str = "gzip";
820 : #else
821 : compression_algorithm_str = "none";
822 : #endif
823 : }
824 :
825 : /*
826 : * Compression options
827 : */
828 358 : if (!parse_compress_algorithm(compression_algorithm_str,
829 : &compression_algorithm))
830 2 : pg_fatal("unrecognized compression algorithm: \"%s\"",
831 : compression_algorithm_str);
832 :
833 356 : parse_compress_specification(compression_algorithm, compression_detail,
834 : &compression_spec);
835 356 : error_detail = validate_compress_specification(&compression_spec);
836 356 : if (error_detail != NULL)
837 6 : pg_fatal("invalid compression specification: %s",
838 : error_detail);
839 :
840 350 : error_detail = supports_compression(compression_spec);
841 350 : if (error_detail != NULL)
842 0 : pg_fatal("%s", error_detail);
843 :
844 : /*
845 : * Disable support for zstd workers for now - these are based on
846 : * threading, and it's unclear how it interacts with parallel dumps on
847 : * platforms where that relies on threads too (e.g. Windows).
848 : */
849 350 : if (compression_spec.options & PG_COMPRESSION_OPTION_WORKERS)
850 0 : pg_log_warning("compression option \"%s\" is not currently supported by pg_dump",
851 : "workers");
852 :
853 : /*
854 : * If emitting an archive format, we always want to emit a DATABASE item,
855 : * in case --create is specified at pg_restore time.
856 : */
857 350 : if (!plainText)
858 64 : dopt.outputCreateDB = 1;
859 :
860 : /* Parallel backup only in the directory archive format so far */
861 350 : if (archiveFormat != archDirectory && numWorkers > 1)
862 2 : pg_fatal("parallel backup only supported by the directory format");
863 :
864 : /* Open the output file */
865 348 : fout = CreateArchive(filename, archiveFormat, compression_spec,
866 : dosync, archiveMode, setupDumpWorker, sync_method);
867 :
868 : /* Make dump options accessible right away */
869 346 : SetArchiveOptions(fout, &dopt, NULL);
870 :
871 : /* Register the cleanup hook */
872 346 : on_exit_close_archive(fout);
873 :
874 : /* Let the archiver know how noisy to be */
875 346 : fout->verbose = g_verbose;
876 :
877 :
878 : /*
879 : * We allow the server to be back to 9.2, and up to any minor release of
880 : * our own major version. (See also version check in pg_dumpall.c.)
881 : */
882 346 : fout->minRemoteVersion = 90200;
883 346 : fout->maxRemoteVersion = (PG_VERSION_NUM / 100) * 100 + 99;
884 :
885 346 : fout->numWorkers = numWorkers;
886 :
887 : /*
888 : * Open the database using the Archiver, so it knows about it. Errors mean
889 : * death.
890 : */
891 346 : ConnectDatabase(fout, &dopt.cparams, false);
892 342 : setup_connection(fout, dumpencoding, dumpsnapshot, use_role);
893 :
894 : /*
895 : * On hot standbys, never try to dump unlogged table data, since it will
896 : * just throw an error.
897 : */
898 342 : if (fout->isStandby)
899 8 : dopt.no_unlogged_table_data = true;
900 :
901 : /*
902 : * Find the last built-in OID, if needed (prior to 8.1)
903 : *
904 : * With 8.1 and above, we can just use FirstNormalObjectId - 1.
905 : */
906 342 : g_last_builtin_oid = FirstNormalObjectId - 1;
907 :
908 342 : pg_log_info("last built-in OID is %u", g_last_builtin_oid);
909 :
910 : /* Expand schema selection patterns into OID lists */
911 342 : if (schema_include_patterns.head != NULL)
912 : {
913 36 : expand_schema_name_patterns(fout, &schema_include_patterns,
914 : &schema_include_oids,
915 : strict_names);
916 24 : if (schema_include_oids.head == NULL)
917 2 : pg_fatal("no matching schemas were found");
918 : }
919 328 : expand_schema_name_patterns(fout, &schema_exclude_patterns,
920 : &schema_exclude_oids,
921 : false);
922 : /* non-matching exclusion patterns aren't an error */
923 :
924 : /* Expand table selection patterns into OID lists */
925 328 : expand_table_name_patterns(fout, &table_include_patterns,
926 : &table_include_oids,
927 : strict_names, false);
928 318 : expand_table_name_patterns(fout, &table_include_patterns_and_children,
929 : &table_include_oids,
930 : strict_names, true);
931 318 : if ((table_include_patterns.head != NULL ||
932 296 : table_include_patterns_and_children.head != NULL) &&
933 26 : table_include_oids.head == NULL)
934 4 : pg_fatal("no matching tables were found");
935 :
936 314 : expand_table_name_patterns(fout, &table_exclude_patterns,
937 : &table_exclude_oids,
938 : false, false);
939 314 : expand_table_name_patterns(fout, &table_exclude_patterns_and_children,
940 : &table_exclude_oids,
941 : false, true);
942 :
943 314 : expand_table_name_patterns(fout, &tabledata_exclude_patterns,
944 : &tabledata_exclude_oids,
945 : false, false);
946 314 : expand_table_name_patterns(fout, &tabledata_exclude_patterns_and_children,
947 : &tabledata_exclude_oids,
948 : false, true);
949 :
950 314 : expand_foreign_server_name_patterns(fout, &foreign_servers_include_patterns,
951 : &foreign_servers_include_oids);
952 :
953 : /* non-matching exclusion patterns aren't an error */
954 :
955 : /* Expand extension selection patterns into OID lists */
956 312 : if (extension_include_patterns.head != NULL)
957 : {
958 10 : expand_extension_name_patterns(fout, &extension_include_patterns,
959 : &extension_include_oids,
960 : strict_names);
961 10 : if (extension_include_oids.head == NULL)
962 2 : pg_fatal("no matching extensions were found");
963 : }
964 310 : expand_extension_name_patterns(fout, &extension_exclude_patterns,
965 : &extension_exclude_oids,
966 : false);
967 : /* non-matching exclusion patterns aren't an error */
968 :
969 : /*
970 : * Dumping LOs is the default for dumps where an inclusion switch is not
971 : * used (an "include everything" dump). -B can be used to exclude LOs
972 : * from those dumps. -b can be used to include LOs even when an inclusion
973 : * switch is used.
974 : *
975 : * -s means "schema only" and LOs are data, not schema, so we never
976 : * include LOs when -s is used.
977 : */
978 310 : if (dopt.include_everything && !dopt.schemaOnly && !dopt.dontOutputLOs)
979 228 : dopt.outputLOs = true;
980 :
981 : /*
982 : * Collect role names so we can map object owner OIDs to names.
983 : */
984 310 : collectRoleNames(fout);
985 :
986 : /*
987 : * Now scan the database and create DumpableObject structs for all the
988 : * objects we intend to dump.
989 : */
990 310 : tblinfo = getSchemaData(fout, &numTables);
991 :
992 308 : if (!dopt.schemaOnly)
993 : {
994 276 : getTableData(&dopt, tblinfo, numTables, 0);
995 276 : buildMatViewRefreshDependencies(fout);
996 276 : if (dopt.dataOnly)
997 12 : getTableDataFKConstraints();
998 : }
999 :
1000 308 : if (dopt.schemaOnly && dopt.sequence_data)
1001 28 : getTableData(&dopt, tblinfo, numTables, RELKIND_SEQUENCE);
1002 :
1003 : /*
1004 : * In binary-upgrade mode, we do not have to worry about the actual LO
1005 : * data or the associated metadata that resides in the pg_largeobject and
1006 : * pg_largeobject_metadata tables, respectively.
1007 : *
1008 : * However, we do need to collect LO information as there may be comments
1009 : * or other information on LOs that we do need to dump out.
1010 : */
1011 308 : if (dopt.outputLOs || dopt.binary_upgrade)
1012 256 : getLOs(fout);
1013 :
1014 : /*
1015 : * Collect dependency data to assist in ordering the objects.
1016 : */
1017 308 : getDependencies(fout);
1018 :
1019 : /*
1020 : * Collect ACLs, comments, and security labels, if wanted.
1021 : */
1022 308 : if (!dopt.aclsSkip)
1023 304 : getAdditionalACLs(fout);
1024 308 : if (!dopt.no_comments)
1025 308 : collectComments(fout);
1026 308 : if (!dopt.no_security_labels)
1027 308 : collectSecLabels(fout);
1028 :
1029 : /* For binary upgrade mode, collect required pg_class information. */
1030 308 : if (dopt.binary_upgrade)
1031 28 : collectBinaryUpgradeClassOids(fout);
1032 :
1033 : /* Collect sequence information. */
1034 308 : collectSequences(fout);
1035 :
1036 : /* Lastly, create dummy objects to represent the section boundaries */
1037 308 : boundaryObjs = createBoundaryObjects();
1038 :
1039 : /* Get pointers to all the known DumpableObjects */
1040 308 : getDumpableObjects(&dobjs, &numObjs);
1041 :
1042 : /*
1043 : * Add dummy dependencies to enforce the dump section ordering.
1044 : */
1045 308 : addBoundaryDependencies(dobjs, numObjs, boundaryObjs);
1046 :
1047 : /*
1048 : * Sort the objects into a safe dump order (no forward references).
1049 : *
1050 : * We rely on dependency information to help us determine a safe order, so
1051 : * the initial sort is mostly for cosmetic purposes: we sort by name to
1052 : * ensure that logically identical schemas will dump identically.
1053 : */
1054 308 : sortDumpableObjectsByTypeName(dobjs, numObjs);
1055 :
1056 308 : sortDumpableObjects(dobjs, numObjs,
1057 308 : boundaryObjs[0].dumpId, boundaryObjs[1].dumpId);
1058 :
1059 : /*
1060 : * Create archive TOC entries for all the objects to be dumped, in a safe
1061 : * order.
1062 : */
1063 :
1064 : /*
1065 : * First the special entries for ENCODING, STDSTRINGS, and SEARCHPATH.
1066 : */
1067 308 : dumpEncoding(fout);
1068 308 : dumpStdStrings(fout);
1069 308 : dumpSearchPath(fout);
1070 :
1071 : /* The database items are always next, unless we don't want them at all */
1072 308 : if (dopt.outputCreateDB)
1073 120 : dumpDatabase(fout);
1074 :
1075 : /* Now the rearrangeable objects. */
1076 1124830 : for (i = 0; i < numObjs; i++)
1077 1124522 : dumpDumpableObject(fout, dobjs[i]);
1078 :
1079 : /*
1080 : * Set up options info to ensure we dump what we want.
1081 : */
1082 308 : ropt = NewRestoreOptions();
1083 308 : ropt->filename = filename;
1084 :
1085 : /* if you change this list, see dumpOptionsFromRestoreOptions */
1086 308 : ropt->cparams.dbname = dopt.cparams.dbname ? pg_strdup(dopt.cparams.dbname) : NULL;
1087 308 : ropt->cparams.pgport = dopt.cparams.pgport ? pg_strdup(dopt.cparams.pgport) : NULL;
1088 308 : ropt->cparams.pghost = dopt.cparams.pghost ? pg_strdup(dopt.cparams.pghost) : NULL;
1089 308 : ropt->cparams.username = dopt.cparams.username ? pg_strdup(dopt.cparams.username) : NULL;
1090 308 : ropt->cparams.promptPassword = dopt.cparams.promptPassword;
1091 308 : ropt->dropSchema = dopt.outputClean;
1092 308 : ropt->dataOnly = dopt.dataOnly;
1093 308 : ropt->schemaOnly = dopt.schemaOnly;
1094 308 : ropt->if_exists = dopt.if_exists;
1095 308 : ropt->column_inserts = dopt.column_inserts;
1096 308 : ropt->dumpSections = dopt.dumpSections;
1097 308 : ropt->aclsSkip = dopt.aclsSkip;
1098 308 : ropt->superuser = dopt.outputSuperuser;
1099 308 : ropt->createDB = dopt.outputCreateDB;
1100 308 : ropt->noOwner = dopt.outputNoOwner;
1101 308 : ropt->noTableAm = dopt.outputNoTableAm;
1102 308 : ropt->noTablespace = dopt.outputNoTablespaces;
1103 308 : ropt->disable_triggers = dopt.disable_triggers;
1104 308 : ropt->use_setsessauth = dopt.use_setsessauth;
1105 308 : ropt->disable_dollar_quoting = dopt.disable_dollar_quoting;
1106 308 : ropt->dump_inserts = dopt.dump_inserts;
1107 308 : ropt->no_comments = dopt.no_comments;
1108 308 : ropt->no_publications = dopt.no_publications;
1109 308 : ropt->no_security_labels = dopt.no_security_labels;
1110 308 : ropt->no_subscriptions = dopt.no_subscriptions;
1111 308 : ropt->lockWaitTimeout = dopt.lockWaitTimeout;
1112 308 : ropt->include_everything = dopt.include_everything;
1113 308 : ropt->enable_row_security = dopt.enable_row_security;
1114 308 : ropt->sequence_data = dopt.sequence_data;
1115 308 : ropt->binary_upgrade = dopt.binary_upgrade;
1116 :
1117 308 : ropt->compression_spec = compression_spec;
1118 :
1119 308 : ropt->suppressDumpWarnings = true; /* We've already shown them */
1120 :
1121 308 : SetArchiveOptions(fout, &dopt, ropt);
1122 :
1123 : /* Mark which entries should be output */
1124 308 : ProcessArchiveRestoreOptions(fout);
1125 :
1126 : /*
1127 : * The archive's TOC entries are now marked as to which ones will actually
1128 : * be output, so we can set up their dependency lists properly. This isn't
1129 : * necessary for plain-text output, though.
1130 : */
1131 308 : if (!plainText)
1132 62 : BuildArchiveDependencies(fout);
1133 :
1134 : /*
1135 : * And finally we can do the actual output.
1136 : *
1137 : * Note: for non-plain-text output formats, the output file is written
1138 : * inside CloseArchive(). This is, um, bizarre; but not worth changing
1139 : * right now.
1140 : */
1141 308 : if (plainText)
1142 246 : RestoreArchive(fout);
1143 :
1144 306 : CloseArchive(fout);
1145 :
1146 306 : exit_nicely(0);
1147 : }
1148 :
1149 :
1150 : static void
1151 2 : help(const char *progname)
1152 : {
1153 2 : printf(_("%s dumps a database as a text file or to other formats.\n\n"), progname);
1154 2 : printf(_("Usage:\n"));
1155 2 : printf(_(" %s [OPTION]... [DBNAME]\n"), progname);
1156 :
1157 2 : printf(_("\nGeneral options:\n"));
1158 2 : printf(_(" -f, --file=FILENAME output file or directory name\n"));
1159 2 : printf(_(" -F, --format=c|d|t|p output file format (custom, directory, tar,\n"
1160 : " plain text (default))\n"));
1161 2 : printf(_(" -j, --jobs=NUM use this many parallel jobs to dump\n"));
1162 2 : printf(_(" -v, --verbose verbose mode\n"));
1163 2 : printf(_(" -V, --version output version information, then exit\n"));
1164 2 : printf(_(" -Z, --compress=METHOD[:DETAIL]\n"
1165 : " compress as specified\n"));
1166 2 : printf(_(" --lock-wait-timeout=TIMEOUT fail after waiting TIMEOUT for a table lock\n"));
1167 2 : printf(_(" --no-sync do not wait for changes to be written safely to disk\n"));
1168 2 : printf(_(" --sync-method=METHOD set method for syncing files to disk\n"));
1169 2 : printf(_(" -?, --help show this help, then exit\n"));
1170 :
1171 2 : printf(_("\nOptions controlling the output content:\n"));
1172 2 : printf(_(" -a, --data-only dump only the data, not the schema\n"));
1173 2 : printf(_(" -b, --large-objects include large objects in dump\n"));
1174 2 : printf(_(" --blobs (same as --large-objects, deprecated)\n"));
1175 2 : printf(_(" -B, --no-large-objects exclude large objects in dump\n"));
1176 2 : printf(_(" --no-blobs (same as --no-large-objects, deprecated)\n"));
1177 2 : printf(_(" -c, --clean clean (drop) database objects before recreating\n"));
1178 2 : printf(_(" -C, --create include commands to create database in dump\n"));
1179 2 : printf(_(" -e, --extension=PATTERN dump the specified extension(s) only\n"));
1180 2 : printf(_(" -E, --encoding=ENCODING dump the data in encoding ENCODING\n"));
1181 2 : printf(_(" -n, --schema=PATTERN dump the specified schema(s) only\n"));
1182 2 : printf(_(" -N, --exclude-schema=PATTERN do NOT dump the specified schema(s)\n"));
1183 2 : printf(_(" -O, --no-owner skip restoration of object ownership in\n"
1184 : " plain-text format\n"));
1185 2 : printf(_(" -s, --schema-only dump only the schema, no data\n"));
1186 2 : printf(_(" -S, --superuser=NAME superuser user name to use in plain-text format\n"));
1187 2 : printf(_(" -t, --table=PATTERN dump only the specified table(s)\n"));
1188 2 : printf(_(" -T, --exclude-table=PATTERN do NOT dump the specified table(s)\n"));
1189 2 : printf(_(" -x, --no-privileges do not dump privileges (grant/revoke)\n"));
1190 2 : printf(_(" --binary-upgrade for use by upgrade utilities only\n"));
1191 2 : printf(_(" --column-inserts dump data as INSERT commands with column names\n"));
1192 2 : printf(_(" --disable-dollar-quoting disable dollar quoting, use SQL standard quoting\n"));
1193 2 : printf(_(" --disable-triggers disable triggers during data-only restore\n"));
1194 2 : printf(_(" --enable-row-security enable row security (dump only content user has\n"
1195 : " access to)\n"));
1196 2 : printf(_(" --exclude-extension=PATTERN do NOT dump the specified extension(s)\n"));
1197 2 : printf(_(" --exclude-table-and-children=PATTERN\n"
1198 : " do NOT dump the specified table(s), including\n"
1199 : " child and partition tables\n"));
1200 2 : printf(_(" --exclude-table-data=PATTERN do NOT dump data for the specified table(s)\n"));
1201 2 : printf(_(" --exclude-table-data-and-children=PATTERN\n"
1202 : " do NOT dump data for the specified table(s),\n"
1203 : " including child and partition tables\n"));
1204 2 : printf(_(" --extra-float-digits=NUM override default setting for extra_float_digits\n"));
1205 2 : printf(_(" --filter=FILENAME include or exclude objects and data from dump\n"
1206 : " based on expressions in FILENAME\n"));
1207 2 : printf(_(" --if-exists use IF EXISTS when dropping objects\n"));
1208 2 : printf(_(" --include-foreign-data=PATTERN\n"
1209 : " include data of foreign tables on foreign\n"
1210 : " servers matching PATTERN\n"));
1211 2 : printf(_(" --inserts dump data as INSERT commands, rather than COPY\n"));
1212 2 : printf(_(" --load-via-partition-root load partitions via the root table\n"));
1213 2 : printf(_(" --no-comments do not dump comments\n"));
1214 2 : printf(_(" --no-publications do not dump publications\n"));
1215 2 : printf(_(" --no-security-labels do not dump security label assignments\n"));
1216 2 : printf(_(" --no-subscriptions do not dump subscriptions\n"));
1217 2 : printf(_(" --no-table-access-method do not dump table access methods\n"));
1218 2 : printf(_(" --no-tablespaces do not dump tablespace assignments\n"));
1219 2 : printf(_(" --no-toast-compression do not dump TOAST compression methods\n"));
1220 2 : printf(_(" --no-unlogged-table-data do not dump unlogged table data\n"));
1221 2 : printf(_(" --on-conflict-do-nothing add ON CONFLICT DO NOTHING to INSERT commands\n"));
1222 2 : printf(_(" --quote-all-identifiers quote all identifiers, even if not key words\n"));
1223 2 : printf(_(" --rows-per-insert=NROWS number of rows per INSERT; implies --inserts\n"));
1224 2 : printf(_(" --section=SECTION dump named section (pre-data, data, or post-data)\n"));
1225 2 : printf(_(" --serializable-deferrable wait until the dump can run without anomalies\n"));
1226 2 : printf(_(" --snapshot=SNAPSHOT use given snapshot for the dump\n"));
1227 2 : printf(_(" --strict-names require table and/or schema include patterns to\n"
1228 : " match at least one entity each\n"));
1229 2 : printf(_(" --table-and-children=PATTERN dump only the specified table(s), including\n"
1230 : " child and partition tables\n"));
1231 2 : printf(_(" --use-set-session-authorization\n"
1232 : " use SET SESSION AUTHORIZATION commands instead of\n"
1233 : " ALTER OWNER commands to set ownership\n"));
1234 :
1235 2 : printf(_("\nConnection options:\n"));
1236 2 : printf(_(" -d, --dbname=DBNAME database to dump\n"));
1237 2 : printf(_(" -h, --host=HOSTNAME database server host or socket directory\n"));
1238 2 : printf(_(" -p, --port=PORT database server port number\n"));
1239 2 : printf(_(" -U, --username=NAME connect as specified database user\n"));
1240 2 : printf(_(" -w, --no-password never prompt for password\n"));
1241 2 : printf(_(" -W, --password force password prompt (should happen automatically)\n"));
1242 2 : printf(_(" --role=ROLENAME do SET ROLE before dump\n"));
1243 :
1244 2 : printf(_("\nIf no database name is supplied, then the PGDATABASE environment\n"
1245 : "variable value is used.\n\n"));
1246 2 : printf(_("Report bugs to <%s>.\n"), PACKAGE_BUGREPORT);
1247 2 : printf(_("%s home page: <%s>\n"), PACKAGE_NAME, PACKAGE_URL);
1248 2 : }
1249 :
1250 : static void
1251 374 : setup_connection(Archive *AH, const char *dumpencoding,
1252 : const char *dumpsnapshot, char *use_role)
1253 : {
1254 374 : DumpOptions *dopt = AH->dopt;
1255 374 : PGconn *conn = GetConnection(AH);
1256 : const char *std_strings;
1257 :
1258 374 : PQclear(ExecuteSqlQueryForSingleRow(AH, ALWAYS_SECURE_SEARCH_PATH_SQL));
1259 :
1260 : /*
1261 : * Set the client encoding if requested.
1262 : */
1263 374 : if (dumpencoding)
1264 : {
1265 36 : if (PQsetClientEncoding(conn, dumpencoding) < 0)
1266 0 : pg_fatal("invalid client encoding \"%s\" specified",
1267 : dumpencoding);
1268 : }
1269 :
1270 : /*
1271 : * Get the active encoding and the standard_conforming_strings setting, so
1272 : * we know how to escape strings.
1273 : */
1274 374 : AH->encoding = PQclientEncoding(conn);
1275 :
1276 374 : std_strings = PQparameterStatus(conn, "standard_conforming_strings");
1277 374 : AH->std_strings = (std_strings && strcmp(std_strings, "on") == 0);
1278 :
1279 : /*
1280 : * Set the role if requested. In a parallel dump worker, we'll be passed
1281 : * use_role == NULL, but AH->use_role is already set (if user specified it
1282 : * originally) and we should use that.
1283 : */
1284 374 : if (!use_role && AH->use_role)
1285 4 : use_role = AH->use_role;
1286 :
1287 : /* Set the role if requested */
1288 374 : if (use_role)
1289 : {
1290 10 : PQExpBuffer query = createPQExpBuffer();
1291 :
1292 10 : appendPQExpBuffer(query, "SET ROLE %s", fmtId(use_role));
1293 10 : ExecuteSqlStatement(AH, query->data);
1294 10 : destroyPQExpBuffer(query);
1295 :
1296 : /* save it for possible later use by parallel workers */
1297 10 : if (!AH->use_role)
1298 6 : AH->use_role = pg_strdup(use_role);
1299 : }
1300 :
1301 : /* Set the datestyle to ISO to ensure the dump's portability */
1302 374 : ExecuteSqlStatement(AH, "SET DATESTYLE = ISO");
1303 :
1304 : /* Likewise, avoid using sql_standard intervalstyle */
1305 374 : ExecuteSqlStatement(AH, "SET INTERVALSTYLE = POSTGRES");
1306 :
1307 : /*
1308 : * Use an explicitly specified extra_float_digits if it has been provided.
1309 : * Otherwise, set extra_float_digits so that we can dump float data
1310 : * exactly (given correctly implemented float I/O code, anyway).
1311 : */
1312 374 : if (have_extra_float_digits)
1313 : {
1314 0 : PQExpBuffer q = createPQExpBuffer();
1315 :
1316 0 : appendPQExpBuffer(q, "SET extra_float_digits TO %d",
1317 : extra_float_digits);
1318 0 : ExecuteSqlStatement(AH, q->data);
1319 0 : destroyPQExpBuffer(q);
1320 : }
1321 : else
1322 374 : ExecuteSqlStatement(AH, "SET extra_float_digits TO 3");
1323 :
1324 : /*
1325 : * Disable synchronized scanning, to prevent unpredictable changes in row
1326 : * ordering across a dump and reload.
1327 : */
1328 374 : ExecuteSqlStatement(AH, "SET synchronize_seqscans TO off");
1329 :
1330 : /*
1331 : * Disable timeouts if supported.
1332 : */
1333 374 : ExecuteSqlStatement(AH, "SET statement_timeout = 0");
1334 374 : if (AH->remoteVersion >= 90300)
1335 374 : ExecuteSqlStatement(AH, "SET lock_timeout = 0");
1336 374 : if (AH->remoteVersion >= 90600)
1337 374 : ExecuteSqlStatement(AH, "SET idle_in_transaction_session_timeout = 0");
1338 374 : if (AH->remoteVersion >= 170000)
1339 374 : ExecuteSqlStatement(AH, "SET transaction_timeout = 0");
1340 :
1341 : /*
1342 : * Quote all identifiers, if requested.
1343 : */
1344 374 : if (quote_all_identifiers)
1345 24 : ExecuteSqlStatement(AH, "SET quote_all_identifiers = true");
1346 :
1347 : /*
1348 : * Adjust row-security mode, if supported.
1349 : */
1350 374 : if (AH->remoteVersion >= 90500)
1351 : {
1352 374 : if (dopt->enable_row_security)
1353 0 : ExecuteSqlStatement(AH, "SET row_security = on");
1354 : else
1355 374 : ExecuteSqlStatement(AH, "SET row_security = off");
1356 : }
1357 :
1358 : /*
1359 : * For security reasons, we restrict the expansion of non-system views and
1360 : * access to foreign tables during the pg_dump process. This restriction
1361 : * is adjusted when dumping foreign table data.
1362 : */
1363 374 : set_restrict_relation_kind(AH, "view, foreign-table");
1364 :
1365 : /*
1366 : * Initialize prepared-query state to "nothing prepared". We do this here
1367 : * so that a parallel dump worker will have its own state.
1368 : */
1369 374 : AH->is_prepared = (bool *) pg_malloc0(NUM_PREP_QUERIES * sizeof(bool));
1370 :
1371 : /*
1372 : * Start transaction-snapshot mode transaction to dump consistent data.
1373 : */
1374 374 : ExecuteSqlStatement(AH, "BEGIN");
1375 :
1376 : /*
1377 : * To support the combination of serializable_deferrable with the jobs
1378 : * option we use REPEATABLE READ for the worker connections that are
1379 : * passed a snapshot. As long as the snapshot is acquired in a
1380 : * SERIALIZABLE, READ ONLY, DEFERRABLE transaction, its use within a
1381 : * REPEATABLE READ transaction provides the appropriate integrity
1382 : * guarantees. This is a kluge, but safe for back-patching.
1383 : */
1384 374 : if (dopt->serializable_deferrable && AH->sync_snapshot_id == NULL)
1385 0 : ExecuteSqlStatement(AH,
1386 : "SET TRANSACTION ISOLATION LEVEL "
1387 : "SERIALIZABLE, READ ONLY, DEFERRABLE");
1388 : else
1389 374 : ExecuteSqlStatement(AH,
1390 : "SET TRANSACTION ISOLATION LEVEL "
1391 : "REPEATABLE READ, READ ONLY");
1392 :
1393 : /*
1394 : * If user specified a snapshot to use, select that. In a parallel dump
1395 : * worker, we'll be passed dumpsnapshot == NULL, but AH->sync_snapshot_id
1396 : * is already set (if the server can handle it) and we should use that.
1397 : */
1398 374 : if (dumpsnapshot)
1399 0 : AH->sync_snapshot_id = pg_strdup(dumpsnapshot);
1400 :
1401 374 : if (AH->sync_snapshot_id)
1402 : {
1403 32 : PQExpBuffer query = createPQExpBuffer();
1404 :
1405 32 : appendPQExpBufferStr(query, "SET TRANSACTION SNAPSHOT ");
1406 32 : appendStringLiteralConn(query, AH->sync_snapshot_id, conn);
1407 32 : ExecuteSqlStatement(AH, query->data);
1408 32 : destroyPQExpBuffer(query);
1409 : }
1410 342 : else if (AH->numWorkers > 1)
1411 : {
1412 16 : if (AH->isStandby && AH->remoteVersion < 100000)
1413 0 : pg_fatal("parallel dumps from standby servers are not supported by this server version");
1414 16 : AH->sync_snapshot_id = get_synchronized_snapshot(AH);
1415 : }
1416 374 : }
1417 :
1418 : /* Set up connection for a parallel worker process */
1419 : static void
1420 32 : setupDumpWorker(Archive *AH)
1421 : {
1422 : /*
1423 : * We want to re-select all the same values the leader connection is
1424 : * using. We'll have inherited directly-usable values in
1425 : * AH->sync_snapshot_id and AH->use_role, but we need to translate the
1426 : * inherited encoding value back to a string to pass to setup_connection.
1427 : */
1428 32 : setup_connection(AH,
1429 : pg_encoding_to_char(AH->encoding),
1430 : NULL,
1431 : NULL);
1432 32 : }
1433 :
1434 : static char *
1435 16 : get_synchronized_snapshot(Archive *fout)
1436 : {
1437 16 : char *query = "SELECT pg_catalog.pg_export_snapshot()";
1438 : char *result;
1439 : PGresult *res;
1440 :
1441 16 : res = ExecuteSqlQueryForSingleRow(fout, query);
1442 16 : result = pg_strdup(PQgetvalue(res, 0, 0));
1443 16 : PQclear(res);
1444 :
1445 16 : return result;
1446 : }
1447 :
1448 : static ArchiveFormat
1449 360 : parseArchiveFormat(const char *format, ArchiveMode *mode)
1450 : {
1451 : ArchiveFormat archiveFormat;
1452 :
1453 360 : *mode = archModeWrite;
1454 :
1455 360 : if (pg_strcasecmp(format, "a") == 0 || pg_strcasecmp(format, "append") == 0)
1456 : {
1457 : /* This is used by pg_dumpall, and is not documented */
1458 86 : archiveFormat = archNull;
1459 86 : *mode = archModeAppend;
1460 : }
1461 274 : else if (pg_strcasecmp(format, "c") == 0)
1462 8 : archiveFormat = archCustom;
1463 266 : else if (pg_strcasecmp(format, "custom") == 0)
1464 30 : archiveFormat = archCustom;
1465 236 : else if (pg_strcasecmp(format, "d") == 0)
1466 14 : archiveFormat = archDirectory;
1467 222 : else if (pg_strcasecmp(format, "directory") == 0)
1468 6 : archiveFormat = archDirectory;
1469 216 : else if (pg_strcasecmp(format, "p") == 0)
1470 202 : archiveFormat = archNull;
1471 14 : else if (pg_strcasecmp(format, "plain") == 0)
1472 6 : archiveFormat = archNull;
1473 8 : else if (pg_strcasecmp(format, "t") == 0)
1474 4 : archiveFormat = archTar;
1475 4 : else if (pg_strcasecmp(format, "tar") == 0)
1476 2 : archiveFormat = archTar;
1477 : else
1478 2 : pg_fatal("invalid output format \"%s\" specified", format);
1479 358 : return archiveFormat;
1480 : }
1481 :
1482 : /*
1483 : * Find the OIDs of all schemas matching the given list of patterns,
1484 : * and append them to the given OID list.
1485 : */
1486 : static void
1487 364 : expand_schema_name_patterns(Archive *fout,
1488 : SimpleStringList *patterns,
1489 : SimpleOidList *oids,
1490 : bool strict_names)
1491 : {
1492 : PQExpBuffer query;
1493 : PGresult *res;
1494 : SimpleStringListCell *cell;
1495 : int i;
1496 :
1497 364 : if (patterns->head == NULL)
1498 322 : return; /* nothing to do */
1499 :
1500 42 : query = createPQExpBuffer();
1501 :
1502 : /*
1503 : * The loop below runs multiple SELECTs might sometimes result in
1504 : * duplicate entries in the OID list, but we don't care.
1505 : */
1506 :
1507 72 : for (cell = patterns->head; cell; cell = cell->next)
1508 : {
1509 : PQExpBufferData dbbuf;
1510 : int dotcnt;
1511 :
1512 42 : appendPQExpBufferStr(query,
1513 : "SELECT oid FROM pg_catalog.pg_namespace n\n");
1514 42 : initPQExpBuffer(&dbbuf);
1515 42 : processSQLNamePattern(GetConnection(fout), query, cell->val, false,
1516 : false, NULL, "n.nspname", NULL, NULL, &dbbuf,
1517 : &dotcnt);
1518 42 : if (dotcnt > 1)
1519 4 : pg_fatal("improper qualified name (too many dotted names): %s",
1520 : cell->val);
1521 38 : else if (dotcnt == 1)
1522 6 : prohibit_crossdb_refs(GetConnection(fout), dbbuf.data, cell->val);
1523 32 : termPQExpBuffer(&dbbuf);
1524 :
1525 32 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1526 32 : if (strict_names && PQntuples(res) == 0)
1527 2 : pg_fatal("no matching schemas were found for pattern \"%s\"", cell->val);
1528 :
1529 58 : for (i = 0; i < PQntuples(res); i++)
1530 : {
1531 28 : simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1532 : }
1533 :
1534 30 : PQclear(res);
1535 30 : resetPQExpBuffer(query);
1536 : }
1537 :
1538 30 : destroyPQExpBuffer(query);
1539 : }
1540 :
1541 : /*
1542 : * Find the OIDs of all extensions matching the given list of patterns,
1543 : * and append them to the given OID list.
1544 : */
1545 : static void
1546 320 : expand_extension_name_patterns(Archive *fout,
1547 : SimpleStringList *patterns,
1548 : SimpleOidList *oids,
1549 : bool strict_names)
1550 : {
1551 : PQExpBuffer query;
1552 : PGresult *res;
1553 : SimpleStringListCell *cell;
1554 : int i;
1555 :
1556 320 : if (patterns->head == NULL)
1557 306 : return; /* nothing to do */
1558 :
1559 14 : query = createPQExpBuffer();
1560 :
1561 : /*
1562 : * The loop below runs multiple SELECTs might sometimes result in
1563 : * duplicate entries in the OID list, but we don't care.
1564 : */
1565 28 : for (cell = patterns->head; cell; cell = cell->next)
1566 : {
1567 : int dotcnt;
1568 :
1569 14 : appendPQExpBufferStr(query,
1570 : "SELECT oid FROM pg_catalog.pg_extension e\n");
1571 14 : processSQLNamePattern(GetConnection(fout), query, cell->val, false,
1572 : false, NULL, "e.extname", NULL, NULL, NULL,
1573 : &dotcnt);
1574 14 : if (dotcnt > 0)
1575 0 : pg_fatal("improper qualified name (too many dotted names): %s",
1576 : cell->val);
1577 :
1578 14 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1579 14 : if (strict_names && PQntuples(res) == 0)
1580 0 : pg_fatal("no matching extensions were found for pattern \"%s\"", cell->val);
1581 :
1582 26 : for (i = 0; i < PQntuples(res); i++)
1583 : {
1584 12 : simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1585 : }
1586 :
1587 14 : PQclear(res);
1588 14 : resetPQExpBuffer(query);
1589 : }
1590 :
1591 14 : destroyPQExpBuffer(query);
1592 : }
1593 :
1594 : /*
1595 : * Find the OIDs of all foreign servers matching the given list of patterns,
1596 : * and append them to the given OID list.
1597 : */
1598 : static void
1599 314 : expand_foreign_server_name_patterns(Archive *fout,
1600 : SimpleStringList *patterns,
1601 : SimpleOidList *oids)
1602 : {
1603 : PQExpBuffer query;
1604 : PGresult *res;
1605 : SimpleStringListCell *cell;
1606 : int i;
1607 :
1608 314 : if (patterns->head == NULL)
1609 308 : return; /* nothing to do */
1610 :
1611 6 : query = createPQExpBuffer();
1612 :
1613 : /*
1614 : * The loop below runs multiple SELECTs might sometimes result in
1615 : * duplicate entries in the OID list, but we don't care.
1616 : */
1617 :
1618 10 : for (cell = patterns->head; cell; cell = cell->next)
1619 : {
1620 : int dotcnt;
1621 :
1622 6 : appendPQExpBufferStr(query,
1623 : "SELECT oid FROM pg_catalog.pg_foreign_server s\n");
1624 6 : processSQLNamePattern(GetConnection(fout), query, cell->val, false,
1625 : false, NULL, "s.srvname", NULL, NULL, NULL,
1626 : &dotcnt);
1627 6 : if (dotcnt > 0)
1628 0 : pg_fatal("improper qualified name (too many dotted names): %s",
1629 : cell->val);
1630 :
1631 6 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1632 6 : if (PQntuples(res) == 0)
1633 2 : pg_fatal("no matching foreign servers were found for pattern \"%s\"", cell->val);
1634 :
1635 8 : for (i = 0; i < PQntuples(res); i++)
1636 4 : simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1637 :
1638 4 : PQclear(res);
1639 4 : resetPQExpBuffer(query);
1640 : }
1641 :
1642 4 : destroyPQExpBuffer(query);
1643 : }
1644 :
1645 : /*
1646 : * Find the OIDs of all tables matching the given list of patterns,
1647 : * and append them to the given OID list. See also expand_dbname_patterns()
1648 : * in pg_dumpall.c
1649 : */
1650 : static void
1651 1902 : expand_table_name_patterns(Archive *fout,
1652 : SimpleStringList *patterns, SimpleOidList *oids,
1653 : bool strict_names, bool with_child_tables)
1654 : {
1655 : PQExpBuffer query;
1656 : PGresult *res;
1657 : SimpleStringListCell *cell;
1658 : int i;
1659 :
1660 1902 : if (patterns->head == NULL)
1661 1844 : return; /* nothing to do */
1662 :
1663 58 : query = createPQExpBuffer();
1664 :
1665 : /*
1666 : * this might sometimes result in duplicate entries in the OID list, but
1667 : * we don't care.
1668 : */
1669 :
1670 118 : for (cell = patterns->head; cell; cell = cell->next)
1671 : {
1672 : PQExpBufferData dbbuf;
1673 : int dotcnt;
1674 :
1675 : /*
1676 : * Query must remain ABSOLUTELY devoid of unqualified names. This
1677 : * would be unnecessary given a pg_table_is_visible() variant taking a
1678 : * search_path argument.
1679 : *
1680 : * For with_child_tables, we start with the basic query's results and
1681 : * recursively search the inheritance tree to add child tables.
1682 : */
1683 70 : if (with_child_tables)
1684 : {
1685 12 : appendPQExpBuffer(query, "WITH RECURSIVE partition_tree (relid) AS (\n");
1686 : }
1687 :
1688 70 : appendPQExpBuffer(query,
1689 : "SELECT c.oid"
1690 : "\nFROM pg_catalog.pg_class c"
1691 : "\n LEFT JOIN pg_catalog.pg_namespace n"
1692 : "\n ON n.oid OPERATOR(pg_catalog.=) c.relnamespace"
1693 : "\nWHERE c.relkind OPERATOR(pg_catalog.=) ANY"
1694 : "\n (array['%c', '%c', '%c', '%c', '%c', '%c'])\n",
1695 : RELKIND_RELATION, RELKIND_SEQUENCE, RELKIND_VIEW,
1696 : RELKIND_MATVIEW, RELKIND_FOREIGN_TABLE,
1697 : RELKIND_PARTITIONED_TABLE);
1698 70 : initPQExpBuffer(&dbbuf);
1699 70 : processSQLNamePattern(GetConnection(fout), query, cell->val, true,
1700 : false, "n.nspname", "c.relname", NULL,
1701 : "pg_catalog.pg_table_is_visible(c.oid)", &dbbuf,
1702 : &dotcnt);
1703 70 : if (dotcnt > 2)
1704 2 : pg_fatal("improper relation name (too many dotted names): %s",
1705 : cell->val);
1706 68 : else if (dotcnt == 2)
1707 4 : prohibit_crossdb_refs(GetConnection(fout), dbbuf.data, cell->val);
1708 64 : termPQExpBuffer(&dbbuf);
1709 :
1710 64 : if (with_child_tables)
1711 : {
1712 12 : appendPQExpBuffer(query, "UNION"
1713 : "\nSELECT i.inhrelid"
1714 : "\nFROM partition_tree p"
1715 : "\n JOIN pg_catalog.pg_inherits i"
1716 : "\n ON p.relid OPERATOR(pg_catalog.=) i.inhparent"
1717 : "\n)"
1718 : "\nSELECT relid FROM partition_tree");
1719 : }
1720 :
1721 64 : ExecuteSqlStatement(fout, "RESET search_path");
1722 64 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1723 64 : PQclear(ExecuteSqlQueryForSingleRow(fout,
1724 : ALWAYS_SECURE_SEARCH_PATH_SQL));
1725 64 : if (strict_names && PQntuples(res) == 0)
1726 4 : pg_fatal("no matching tables were found for pattern \"%s\"", cell->val);
1727 :
1728 148 : for (i = 0; i < PQntuples(res); i++)
1729 : {
1730 88 : simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1731 : }
1732 :
1733 60 : PQclear(res);
1734 60 : resetPQExpBuffer(query);
1735 : }
1736 :
1737 48 : destroyPQExpBuffer(query);
1738 : }
1739 :
1740 : /*
1741 : * Verifies that the connected database name matches the given database name,
1742 : * and if not, dies with an error about the given pattern.
1743 : *
1744 : * The 'dbname' argument should be a literal name parsed from 'pattern'.
1745 : */
1746 : static void
1747 10 : prohibit_crossdb_refs(PGconn *conn, const char *dbname, const char *pattern)
1748 : {
1749 : const char *db;
1750 :
1751 10 : db = PQdb(conn);
1752 10 : if (db == NULL)
1753 0 : pg_fatal("You are currently not connected to a database.");
1754 :
1755 10 : if (strcmp(db, dbname) != 0)
1756 10 : pg_fatal("cross-database references are not implemented: %s",
1757 : pattern);
1758 0 : }
1759 :
1760 : /*
1761 : * checkExtensionMembership
1762 : * Determine whether object is an extension member, and if so,
1763 : * record an appropriate dependency and set the object's dump flag.
1764 : *
1765 : * It's important to call this for each object that could be an extension
1766 : * member. Generally, we integrate this with determining the object's
1767 : * to-be-dumped-ness, since extension membership overrides other rules for that.
1768 : *
1769 : * Returns true if object is an extension member, else false.
1770 : */
1771 : static bool
1772 959036 : checkExtensionMembership(DumpableObject *dobj, Archive *fout)
1773 : {
1774 959036 : ExtensionInfo *ext = findOwningExtension(dobj->catId);
1775 :
1776 959036 : if (ext == NULL)
1777 957644 : return false;
1778 :
1779 1392 : dobj->ext_member = true;
1780 :
1781 : /* Record dependency so that getDependencies needn't deal with that */
1782 1392 : addObjectDependency(dobj, ext->dobj.dumpId);
1783 :
1784 : /*
1785 : * In 9.6 and above, mark the member object to have any non-initial ACLs
1786 : * dumped. (Any initial ACLs will be removed later, using data from
1787 : * pg_init_privs, so that we'll dump only the delta from the extension's
1788 : * initial setup.)
1789 : *
1790 : * Prior to 9.6, we do not include any extension member components.
1791 : *
1792 : * In binary upgrades, we still dump all components of the members
1793 : * individually, since the idea is to exactly reproduce the database
1794 : * contents rather than replace the extension contents with something
1795 : * different.
1796 : *
1797 : * Note: it might be interesting someday to implement storage and delta
1798 : * dumping of extension members' RLS policies and/or security labels.
1799 : * However there is a pitfall for RLS policies: trying to dump them
1800 : * requires getting a lock on their tables, and the calling user might not
1801 : * have privileges for that. We need no lock to examine a table's ACLs,
1802 : * so the current feature doesn't have a problem of that sort.
1803 : */
1804 1392 : if (fout->dopt->binary_upgrade)
1805 152 : dobj->dump = ext->dobj.dump;
1806 : else
1807 : {
1808 1240 : if (fout->remoteVersion < 90600)
1809 0 : dobj->dump = DUMP_COMPONENT_NONE;
1810 : else
1811 1240 : dobj->dump = ext->dobj.dump_contains & (DUMP_COMPONENT_ACL);
1812 : }
1813 :
1814 1392 : return true;
1815 : }
1816 :
1817 : /*
1818 : * selectDumpableNamespace: policy-setting subroutine
1819 : * Mark a namespace as to be dumped or not
1820 : */
1821 : static void
1822 2550 : selectDumpableNamespace(NamespaceInfo *nsinfo, Archive *fout)
1823 : {
1824 : /*
1825 : * DUMP_COMPONENT_DEFINITION typically implies a CREATE SCHEMA statement
1826 : * and (for --clean) a DROP SCHEMA statement. (In the absence of
1827 : * DUMP_COMPONENT_DEFINITION, this value is irrelevant.)
1828 : */
1829 2550 : nsinfo->create = true;
1830 :
1831 : /*
1832 : * If specific tables are being dumped, do not dump any complete
1833 : * namespaces. If specific namespaces are being dumped, dump just those
1834 : * namespaces. Otherwise, dump all non-system namespaces.
1835 : */
1836 2550 : if (table_include_oids.head != NULL)
1837 100 : nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1838 2450 : else if (schema_include_oids.head != NULL)
1839 354 : nsinfo->dobj.dump_contains = nsinfo->dobj.dump =
1840 354 : simple_oid_list_member(&schema_include_oids,
1841 : nsinfo->dobj.catId.oid) ?
1842 354 : DUMP_COMPONENT_ALL : DUMP_COMPONENT_NONE;
1843 2096 : else if (fout->remoteVersion >= 90600 &&
1844 2096 : strcmp(nsinfo->dobj.name, "pg_catalog") == 0)
1845 : {
1846 : /*
1847 : * In 9.6 and above, we dump out any ACLs defined in pg_catalog, if
1848 : * they are interesting (and not the original ACLs which were set at
1849 : * initdb time, see pg_init_privs).
1850 : */
1851 266 : nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_ACL;
1852 : }
1853 1830 : else if (strncmp(nsinfo->dobj.name, "pg_", 3) == 0 ||
1854 820 : strcmp(nsinfo->dobj.name, "information_schema") == 0)
1855 : {
1856 : /* Other system schemas don't get dumped */
1857 1276 : nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1858 : }
1859 554 : else if (strcmp(nsinfo->dobj.name, "public") == 0)
1860 : {
1861 : /*
1862 : * The public schema is a strange beast that sits in a sort of
1863 : * no-mans-land between being a system object and a user object.
1864 : * CREATE SCHEMA would fail, so its DUMP_COMPONENT_DEFINITION is just
1865 : * a comment and an indication of ownership. If the owner is the
1866 : * default, omit that superfluous DUMP_COMPONENT_DEFINITION. Before
1867 : * v15, the default owner was BOOTSTRAP_SUPERUSERID.
1868 : */
1869 258 : nsinfo->create = false;
1870 258 : nsinfo->dobj.dump = DUMP_COMPONENT_ALL;
1871 258 : if (nsinfo->nspowner == ROLE_PG_DATABASE_OWNER)
1872 178 : nsinfo->dobj.dump &= ~DUMP_COMPONENT_DEFINITION;
1873 258 : nsinfo->dobj.dump_contains = DUMP_COMPONENT_ALL;
1874 :
1875 : /*
1876 : * Also, make like it has a comment even if it doesn't; this is so
1877 : * that we'll emit a command to drop the comment, if appropriate.
1878 : * (Without this, we'd not call dumpCommentExtended for it.)
1879 : */
1880 258 : nsinfo->dobj.components |= DUMP_COMPONENT_COMMENT;
1881 : }
1882 : else
1883 296 : nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_ALL;
1884 :
1885 : /*
1886 : * In any case, a namespace can be excluded by an exclusion switch
1887 : */
1888 3392 : if (nsinfo->dobj.dump_contains &&
1889 842 : simple_oid_list_member(&schema_exclude_oids,
1890 : nsinfo->dobj.catId.oid))
1891 6 : nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
1892 :
1893 : /*
1894 : * If the schema belongs to an extension, allow extension membership to
1895 : * override the dump decision for the schema itself. However, this does
1896 : * not change dump_contains, so this won't change what we do with objects
1897 : * within the schema. (If they belong to the extension, they'll get
1898 : * suppressed by it, otherwise not.)
1899 : */
1900 2550 : (void) checkExtensionMembership(&nsinfo->dobj, fout);
1901 2550 : }
1902 :
1903 : /*
1904 : * selectDumpableTable: policy-setting subroutine
1905 : * Mark a table as to be dumped or not
1906 : */
1907 : static void
1908 80826 : selectDumpableTable(TableInfo *tbinfo, Archive *fout)
1909 : {
1910 80826 : if (checkExtensionMembership(&tbinfo->dobj, fout))
1911 450 : return; /* extension membership overrides all else */
1912 :
1913 : /*
1914 : * If specific tables are being dumped, dump just those tables; else, dump
1915 : * according to the parent namespace's dump flag.
1916 : */
1917 80376 : if (table_include_oids.head != NULL)
1918 10104 : tbinfo->dobj.dump = simple_oid_list_member(&table_include_oids,
1919 : tbinfo->dobj.catId.oid) ?
1920 5052 : DUMP_COMPONENT_ALL : DUMP_COMPONENT_NONE;
1921 : else
1922 75324 : tbinfo->dobj.dump = tbinfo->dobj.namespace->dobj.dump_contains;
1923 :
1924 : /*
1925 : * In any case, a table can be excluded by an exclusion switch
1926 : */
1927 130234 : if (tbinfo->dobj.dump &&
1928 49858 : simple_oid_list_member(&table_exclude_oids,
1929 : tbinfo->dobj.catId.oid))
1930 24 : tbinfo->dobj.dump = DUMP_COMPONENT_NONE;
1931 : }
1932 :
1933 : /*
1934 : * selectDumpableType: policy-setting subroutine
1935 : * Mark a type as to be dumped or not
1936 : *
1937 : * If it's a table's rowtype or an autogenerated array type, we also apply a
1938 : * special type code to facilitate sorting into the desired order. (We don't
1939 : * want to consider those to be ordinary types because that would bring tables
1940 : * up into the datatype part of the dump order.) We still set the object's
1941 : * dump flag; that's not going to cause the dummy type to be dumped, but we
1942 : * need it so that casts involving such types will be dumped correctly -- see
1943 : * dumpCast. This means the flag should be set the same as for the underlying
1944 : * object (the table or base type).
1945 : */
1946 : static void
1947 221436 : selectDumpableType(TypeInfo *tyinfo, Archive *fout)
1948 : {
1949 : /* skip complex types, except for standalone composite types */
1950 221436 : if (OidIsValid(tyinfo->typrelid) &&
1951 79486 : tyinfo->typrelkind != RELKIND_COMPOSITE_TYPE)
1952 : {
1953 79126 : TableInfo *tytable = findTableByOid(tyinfo->typrelid);
1954 :
1955 79126 : tyinfo->dobj.objType = DO_DUMMY_TYPE;
1956 79126 : if (tytable != NULL)
1957 79126 : tyinfo->dobj.dump = tytable->dobj.dump;
1958 : else
1959 0 : tyinfo->dobj.dump = DUMP_COMPONENT_NONE;
1960 79126 : return;
1961 : }
1962 :
1963 : /* skip auto-generated array and multirange types */
1964 142310 : if (tyinfo->isArray || tyinfo->isMultirange)
1965 : {
1966 108278 : tyinfo->dobj.objType = DO_DUMMY_TYPE;
1967 :
1968 : /*
1969 : * Fall through to set the dump flag; we assume that the subsequent
1970 : * rules will do the same thing as they would for the array's base
1971 : * type or multirange's range type. (We cannot reliably look up the
1972 : * base type here, since getTypes may not have processed it yet.)
1973 : */
1974 : }
1975 :
1976 142310 : if (checkExtensionMembership(&tyinfo->dobj, fout))
1977 300 : return; /* extension membership overrides all else */
1978 :
1979 : /* Dump based on if the contents of the namespace are being dumped */
1980 142010 : tyinfo->dobj.dump = tyinfo->dobj.namespace->dobj.dump_contains;
1981 : }
1982 :
1983 : /*
1984 : * selectDumpableDefaultACL: policy-setting subroutine
1985 : * Mark a default ACL as to be dumped or not
1986 : *
1987 : * For per-schema default ACLs, dump if the schema is to be dumped.
1988 : * Otherwise dump if we are dumping "everything". Note that dataOnly
1989 : * and aclsSkip are checked separately.
1990 : */
1991 : static void
1992 344 : selectDumpableDefaultACL(DefaultACLInfo *dinfo, DumpOptions *dopt)
1993 : {
1994 : /* Default ACLs can't be extension members */
1995 :
1996 344 : if (dinfo->dobj.namespace)
1997 : /* default ACLs are considered part of the namespace */
1998 172 : dinfo->dobj.dump = dinfo->dobj.namespace->dobj.dump_contains;
1999 : else
2000 172 : dinfo->dobj.dump = dopt->include_everything ?
2001 172 : DUMP_COMPONENT_ALL : DUMP_COMPONENT_NONE;
2002 344 : }
2003 :
2004 : /*
2005 : * selectDumpableCast: policy-setting subroutine
2006 : * Mark a cast as to be dumped or not
2007 : *
2008 : * Casts do not belong to any particular namespace (since they haven't got
2009 : * names), nor do they have identifiable owners. To distinguish user-defined
2010 : * casts from built-in ones, we must resort to checking whether the cast's
2011 : * OID is in the range reserved for initdb.
2012 : */
2013 : static void
2014 68854 : selectDumpableCast(CastInfo *cast, Archive *fout)
2015 : {
2016 68854 : if (checkExtensionMembership(&cast->dobj, fout))
2017 0 : return; /* extension membership overrides all else */
2018 :
2019 : /*
2020 : * This would be DUMP_COMPONENT_ACL for from-initdb casts, but they do not
2021 : * support ACLs currently.
2022 : */
2023 68854 : if (cast->dobj.catId.oid <= (Oid) g_last_builtin_oid)
2024 68684 : cast->dobj.dump = DUMP_COMPONENT_NONE;
2025 : else
2026 170 : cast->dobj.dump = fout->dopt->include_everything ?
2027 170 : DUMP_COMPONENT_ALL : DUMP_COMPONENT_NONE;
2028 : }
2029 :
2030 : /*
2031 : * selectDumpableProcLang: policy-setting subroutine
2032 : * Mark a procedural language as to be dumped or not
2033 : *
2034 : * Procedural languages do not belong to any particular namespace. To
2035 : * identify built-in languages, we must resort to checking whether the
2036 : * language's OID is in the range reserved for initdb.
2037 : */
2038 : static void
2039 394 : selectDumpableProcLang(ProcLangInfo *plang, Archive *fout)
2040 : {
2041 394 : if (checkExtensionMembership(&plang->dobj, fout))
2042 308 : return; /* extension membership overrides all else */
2043 :
2044 : /*
2045 : * Only include procedural languages when we are dumping everything.
2046 : *
2047 : * For from-initdb procedural languages, only include ACLs, as we do for
2048 : * the pg_catalog namespace. We need this because procedural languages do
2049 : * not live in any namespace.
2050 : */
2051 86 : if (!fout->dopt->include_everything)
2052 16 : plang->dobj.dump = DUMP_COMPONENT_NONE;
2053 : else
2054 : {
2055 70 : if (plang->dobj.catId.oid <= (Oid) g_last_builtin_oid)
2056 0 : plang->dobj.dump = fout->remoteVersion < 90600 ?
2057 0 : DUMP_COMPONENT_NONE : DUMP_COMPONENT_ACL;
2058 : else
2059 70 : plang->dobj.dump = DUMP_COMPONENT_ALL;
2060 : }
2061 : }
2062 :
2063 : /*
2064 : * selectDumpableAccessMethod: policy-setting subroutine
2065 : * Mark an access method as to be dumped or not
2066 : *
2067 : * Access methods do not belong to any particular namespace. To identify
2068 : * built-in access methods, we must resort to checking whether the
2069 : * method's OID is in the range reserved for initdb.
2070 : */
2071 : static void
2072 2392 : selectDumpableAccessMethod(AccessMethodInfo *method, Archive *fout)
2073 : {
2074 2392 : if (checkExtensionMembership(&method->dobj, fout))
2075 50 : return; /* extension membership overrides all else */
2076 :
2077 : /*
2078 : * This would be DUMP_COMPONENT_ACL for from-initdb access methods, but
2079 : * they do not support ACLs currently.
2080 : */
2081 2342 : if (method->dobj.catId.oid <= (Oid) g_last_builtin_oid)
2082 2156 : method->dobj.dump = DUMP_COMPONENT_NONE;
2083 : else
2084 186 : method->dobj.dump = fout->dopt->include_everything ?
2085 186 : DUMP_COMPONENT_ALL : DUMP_COMPONENT_NONE;
2086 : }
2087 :
2088 : /*
2089 : * selectDumpableExtension: policy-setting subroutine
2090 : * Mark an extension as to be dumped or not
2091 : *
2092 : * Built-in extensions should be skipped except for checking ACLs, since we
2093 : * assume those will already be installed in the target database. We identify
2094 : * such extensions by their having OIDs in the range reserved for initdb.
2095 : * We dump all user-added extensions by default. No extensions are dumped
2096 : * if include_everything is false (i.e., a --schema or --table switch was
2097 : * given), except if --extension specifies a list of extensions to dump.
2098 : */
2099 : static void
2100 360 : selectDumpableExtension(ExtensionInfo *extinfo, DumpOptions *dopt)
2101 : {
2102 : /*
2103 : * Use DUMP_COMPONENT_ACL for built-in extensions, to allow users to
2104 : * change permissions on their member objects, if they wish to, and have
2105 : * those changes preserved.
2106 : */
2107 360 : if (extinfo->dobj.catId.oid <= (Oid) g_last_builtin_oid)
2108 310 : extinfo->dobj.dump = extinfo->dobj.dump_contains = DUMP_COMPONENT_ACL;
2109 : else
2110 : {
2111 : /* check if there is a list of extensions to dump */
2112 50 : if (extension_include_oids.head != NULL)
2113 8 : extinfo->dobj.dump = extinfo->dobj.dump_contains =
2114 8 : simple_oid_list_member(&extension_include_oids,
2115 : extinfo->dobj.catId.oid) ?
2116 8 : DUMP_COMPONENT_ALL : DUMP_COMPONENT_NONE;
2117 : else
2118 42 : extinfo->dobj.dump = extinfo->dobj.dump_contains =
2119 42 : dopt->include_everything ?
2120 42 : DUMP_COMPONENT_ALL : DUMP_COMPONENT_NONE;
2121 :
2122 : /* check that the extension is not explicitly excluded */
2123 92 : if (extinfo->dobj.dump &&
2124 42 : simple_oid_list_member(&extension_exclude_oids,
2125 : extinfo->dobj.catId.oid))
2126 4 : extinfo->dobj.dump = extinfo->dobj.dump_contains = DUMP_COMPONENT_NONE;
2127 : }
2128 360 : }
2129 :
2130 : /*
2131 : * selectDumpablePublicationObject: policy-setting subroutine
2132 : * Mark a publication object as to be dumped or not
2133 : *
2134 : * A publication can have schemas and tables which have schemas, but those are
2135 : * ignored in decision making, because publications are only dumped when we are
2136 : * dumping everything.
2137 : */
2138 : static void
2139 652 : selectDumpablePublicationObject(DumpableObject *dobj, Archive *fout)
2140 : {
2141 652 : if (checkExtensionMembership(dobj, fout))
2142 0 : return; /* extension membership overrides all else */
2143 :
2144 652 : dobj->dump = fout->dopt->include_everything ?
2145 652 : DUMP_COMPONENT_ALL : DUMP_COMPONENT_NONE;
2146 : }
2147 :
2148 : /*
2149 : * selectDumpableStatisticsObject: policy-setting subroutine
2150 : * Mark an extended statistics object as to be dumped or not
2151 : *
2152 : * We dump an extended statistics object if the schema it's in and the table
2153 : * it's for are being dumped. (This'll need more thought if statistics
2154 : * objects ever support cross-table stats.)
2155 : */
2156 : static void
2157 314 : selectDumpableStatisticsObject(StatsExtInfo *sobj, Archive *fout)
2158 : {
2159 314 : if (checkExtensionMembership(&sobj->dobj, fout))
2160 0 : return; /* extension membership overrides all else */
2161 :
2162 314 : sobj->dobj.dump = sobj->dobj.namespace->dobj.dump_contains;
2163 314 : if (sobj->stattable == NULL ||
2164 314 : !(sobj->stattable->dobj.dump & DUMP_COMPONENT_DEFINITION))
2165 56 : sobj->dobj.dump = DUMP_COMPONENT_NONE;
2166 : }
2167 :
2168 : /*
2169 : * selectDumpableObject: policy-setting subroutine
2170 : * Mark a generic dumpable object as to be dumped or not
2171 : *
2172 : * Use this only for object types without a special-case routine above.
2173 : */
2174 : static void
2175 660744 : selectDumpableObject(DumpableObject *dobj, Archive *fout)
2176 : {
2177 660744 : if (checkExtensionMembership(dobj, fout))
2178 234 : return; /* extension membership overrides all else */
2179 :
2180 : /*
2181 : * Default policy is to dump if parent namespace is dumpable, or for
2182 : * non-namespace-associated items, dump if we're dumping "everything".
2183 : */
2184 660510 : if (dobj->namespace)
2185 659426 : dobj->dump = dobj->namespace->dobj.dump_contains;
2186 : else
2187 1084 : dobj->dump = fout->dopt->include_everything ?
2188 1084 : DUMP_COMPONENT_ALL : DUMP_COMPONENT_NONE;
2189 : }
2190 :
2191 : /*
2192 : * Dump a table's contents for loading using the COPY command
2193 : * - this routine is called by the Archiver when it wants the table
2194 : * to be dumped.
2195 : */
2196 : static int
2197 6890 : dumpTableData_copy(Archive *fout, const void *dcontext)
2198 : {
2199 6890 : TableDataInfo *tdinfo = (TableDataInfo *) dcontext;
2200 6890 : TableInfo *tbinfo = tdinfo->tdtable;
2201 6890 : const char *classname = tbinfo->dobj.name;
2202 6890 : PQExpBuffer q = createPQExpBuffer();
2203 :
2204 : /*
2205 : * Note: can't use getThreadLocalPQExpBuffer() here, we're calling fmtId
2206 : * which uses it already.
2207 : */
2208 6890 : PQExpBuffer clistBuf = createPQExpBuffer();
2209 6890 : PGconn *conn = GetConnection(fout);
2210 : PGresult *res;
2211 : int ret;
2212 : char *copybuf;
2213 : const char *column_list;
2214 :
2215 6890 : pg_log_info("dumping contents of table \"%s.%s\"",
2216 : tbinfo->dobj.namespace->dobj.name, classname);
2217 :
2218 : /*
2219 : * Specify the column list explicitly so that we have no possibility of
2220 : * retrieving data in the wrong column order. (The default column
2221 : * ordering of COPY will not be what we want in certain corner cases
2222 : * involving ADD COLUMN and inheritance.)
2223 : */
2224 6890 : column_list = fmtCopyColumnList(tbinfo, clistBuf);
2225 :
2226 : /*
2227 : * Use COPY (SELECT ...) TO when dumping a foreign table's data, and when
2228 : * a filter condition was specified. For other cases a simple COPY
2229 : * suffices.
2230 : */
2231 6890 : if (tdinfo->filtercond || tbinfo->relkind == RELKIND_FOREIGN_TABLE)
2232 : {
2233 : /* Temporary allows to access to foreign tables to dump data */
2234 2 : if (tbinfo->relkind == RELKIND_FOREIGN_TABLE)
2235 2 : set_restrict_relation_kind(fout, "view");
2236 :
2237 2 : appendPQExpBufferStr(q, "COPY (SELECT ");
2238 : /* klugery to get rid of parens in column list */
2239 2 : if (strlen(column_list) > 2)
2240 : {
2241 2 : appendPQExpBufferStr(q, column_list + 1);
2242 2 : q->data[q->len - 1] = ' ';
2243 : }
2244 : else
2245 0 : appendPQExpBufferStr(q, "* ");
2246 :
2247 4 : appendPQExpBuffer(q, "FROM %s %s) TO stdout;",
2248 2 : fmtQualifiedDumpable(tbinfo),
2249 2 : tdinfo->filtercond ? tdinfo->filtercond : "");
2250 : }
2251 : else
2252 : {
2253 6888 : appendPQExpBuffer(q, "COPY %s %s TO stdout;",
2254 6888 : fmtQualifiedDumpable(tbinfo),
2255 : column_list);
2256 : }
2257 6890 : res = ExecuteSqlQuery(fout, q->data, PGRES_COPY_OUT);
2258 6888 : PQclear(res);
2259 6888 : destroyPQExpBuffer(clistBuf);
2260 :
2261 : for (;;)
2262 : {
2263 3597732 : ret = PQgetCopyData(conn, ©buf, 0);
2264 :
2265 3597732 : if (ret < 0)
2266 6888 : break; /* done or error */
2267 :
2268 3590844 : if (copybuf)
2269 : {
2270 3590844 : WriteData(fout, copybuf, ret);
2271 3590844 : PQfreemem(copybuf);
2272 : }
2273 :
2274 : /* ----------
2275 : * THROTTLE:
2276 : *
2277 : * There was considerable discussion in late July, 2000 regarding
2278 : * slowing down pg_dump when backing up large tables. Users with both
2279 : * slow & fast (multi-processor) machines experienced performance
2280 : * degradation when doing a backup.
2281 : *
2282 : * Initial attempts based on sleeping for a number of ms for each ms
2283 : * of work were deemed too complex, then a simple 'sleep in each loop'
2284 : * implementation was suggested. The latter failed because the loop
2285 : * was too tight. Finally, the following was implemented:
2286 : *
2287 : * If throttle is non-zero, then
2288 : * See how long since the last sleep.
2289 : * Work out how long to sleep (based on ratio).
2290 : * If sleep is more than 100ms, then
2291 : * sleep
2292 : * reset timer
2293 : * EndIf
2294 : * EndIf
2295 : *
2296 : * where the throttle value was the number of ms to sleep per ms of
2297 : * work. The calculation was done in each loop.
2298 : *
2299 : * Most of the hard work is done in the backend, and this solution
2300 : * still did not work particularly well: on slow machines, the ratio
2301 : * was 50:1, and on medium paced machines, 1:1, and on fast
2302 : * multi-processor machines, it had little or no effect, for reasons
2303 : * that were unclear.
2304 : *
2305 : * Further discussion ensued, and the proposal was dropped.
2306 : *
2307 : * For those people who want this feature, it can be implemented using
2308 : * gettimeofday in each loop, calculating the time since last sleep,
2309 : * multiplying that by the sleep ratio, then if the result is more
2310 : * than a preset 'minimum sleep time' (say 100ms), call the 'select'
2311 : * function to sleep for a subsecond period ie.
2312 : *
2313 : * select(0, NULL, NULL, NULL, &tvi);
2314 : *
2315 : * This will return after the interval specified in the structure tvi.
2316 : * Finally, call gettimeofday again to save the 'last sleep time'.
2317 : * ----------
2318 : */
2319 : }
2320 6888 : archprintf(fout, "\\.\n\n\n");
2321 :
2322 6888 : if (ret == -2)
2323 : {
2324 : /* copy data transfer failed */
2325 0 : pg_log_error("Dumping the contents of table \"%s\" failed: PQgetCopyData() failed.", classname);
2326 0 : pg_log_error_detail("Error message from server: %s", PQerrorMessage(conn));
2327 0 : pg_log_error_detail("Command was: %s", q->data);
2328 0 : exit_nicely(1);
2329 : }
2330 :
2331 : /* Check command status and return to normal libpq state */
2332 6888 : res = PQgetResult(conn);
2333 6888 : if (PQresultStatus(res) != PGRES_COMMAND_OK)
2334 : {
2335 0 : pg_log_error("Dumping the contents of table \"%s\" failed: PQgetResult() failed.", classname);
2336 0 : pg_log_error_detail("Error message from server: %s", PQerrorMessage(conn));
2337 0 : pg_log_error_detail("Command was: %s", q->data);
2338 0 : exit_nicely(1);
2339 : }
2340 6888 : PQclear(res);
2341 :
2342 : /* Do this to ensure we've pumped libpq back to idle state */
2343 6888 : if (PQgetResult(conn) != NULL)
2344 0 : pg_log_warning("unexpected extra results during COPY of table \"%s\"",
2345 : classname);
2346 :
2347 6888 : destroyPQExpBuffer(q);
2348 :
2349 : /* Revert back the setting */
2350 6888 : if (tbinfo->relkind == RELKIND_FOREIGN_TABLE)
2351 0 : set_restrict_relation_kind(fout, "view, foreign-table");
2352 :
2353 6888 : return 1;
2354 : }
2355 :
2356 : /*
2357 : * Dump table data using INSERT commands.
2358 : *
2359 : * Caution: when we restore from an archive file direct to database, the
2360 : * INSERT commands emitted by this function have to be parsed by
2361 : * pg_backup_db.c's ExecuteSimpleCommands(), which will not handle comments,
2362 : * E'' strings, or dollar-quoted strings. So don't emit anything like that.
2363 : */
2364 : static int
2365 138 : dumpTableData_insert(Archive *fout, const void *dcontext)
2366 : {
2367 138 : TableDataInfo *tdinfo = (TableDataInfo *) dcontext;
2368 138 : TableInfo *tbinfo = tdinfo->tdtable;
2369 138 : DumpOptions *dopt = fout->dopt;
2370 138 : PQExpBuffer q = createPQExpBuffer();
2371 138 : PQExpBuffer insertStmt = NULL;
2372 : char *attgenerated;
2373 : PGresult *res;
2374 : int nfields,
2375 : i;
2376 138 : int rows_per_statement = dopt->dump_inserts;
2377 138 : int rows_this_statement = 0;
2378 :
2379 : /* Temporary allows to access to foreign tables to dump data */
2380 138 : if (tbinfo->relkind == RELKIND_FOREIGN_TABLE)
2381 0 : set_restrict_relation_kind(fout, "view");
2382 :
2383 : /*
2384 : * If we're going to emit INSERTs with column names, the most efficient
2385 : * way to deal with generated columns is to exclude them entirely. For
2386 : * INSERTs without column names, we have to emit DEFAULT rather than the
2387 : * actual column value --- but we can save a few cycles by fetching nulls
2388 : * rather than the uninteresting-to-us value.
2389 : */
2390 138 : attgenerated = (char *) pg_malloc(tbinfo->numatts * sizeof(char));
2391 138 : appendPQExpBufferStr(q, "DECLARE _pg_dump_cursor CURSOR FOR SELECT ");
2392 138 : nfields = 0;
2393 442 : for (i = 0; i < tbinfo->numatts; i++)
2394 : {
2395 304 : if (tbinfo->attisdropped[i])
2396 4 : continue;
2397 300 : if (tbinfo->attgenerated[i] && dopt->column_inserts)
2398 10 : continue;
2399 290 : if (nfields > 0)
2400 166 : appendPQExpBufferStr(q, ", ");
2401 290 : if (tbinfo->attgenerated[i])
2402 10 : appendPQExpBufferStr(q, "NULL");
2403 : else
2404 280 : appendPQExpBufferStr(q, fmtId(tbinfo->attnames[i]));
2405 290 : attgenerated[nfields] = tbinfo->attgenerated[i];
2406 290 : nfields++;
2407 : }
2408 : /* Servers before 9.4 will complain about zero-column SELECT */
2409 138 : if (nfields == 0)
2410 14 : appendPQExpBufferStr(q, "NULL");
2411 138 : appendPQExpBuffer(q, " FROM ONLY %s",
2412 138 : fmtQualifiedDumpable(tbinfo));
2413 138 : if (tdinfo->filtercond)
2414 0 : appendPQExpBuffer(q, " %s", tdinfo->filtercond);
2415 :
2416 138 : ExecuteSqlStatement(fout, q->data);
2417 :
2418 : while (1)
2419 : {
2420 238 : res = ExecuteSqlQuery(fout, "FETCH 100 FROM _pg_dump_cursor",
2421 : PGRES_TUPLES_OK);
2422 :
2423 : /* cross-check field count, allowing for dummy NULL if any */
2424 238 : if (nfields != PQnfields(res) &&
2425 20 : !(nfields == 0 && PQnfields(res) == 1))
2426 0 : pg_fatal("wrong number of fields retrieved from table \"%s\"",
2427 : tbinfo->dobj.name);
2428 :
2429 : /*
2430 : * First time through, we build as much of the INSERT statement as
2431 : * possible in "insertStmt", which we can then just print for each
2432 : * statement. If the table happens to have zero dumpable columns then
2433 : * this will be a complete statement, otherwise it will end in
2434 : * "VALUES" and be ready to have the row's column values printed.
2435 : */
2436 238 : if (insertStmt == NULL)
2437 : {
2438 : TableInfo *targettab;
2439 :
2440 138 : insertStmt = createPQExpBuffer();
2441 :
2442 : /*
2443 : * When load-via-partition-root is set or forced, get the root
2444 : * table name for the partition table, so that we can reload data
2445 : * through the root table.
2446 : */
2447 138 : if (tbinfo->ispartition &&
2448 80 : (dopt->load_via_partition_root ||
2449 40 : forcePartitionRootLoad(tbinfo)))
2450 6 : targettab = getRootTableInfo(tbinfo);
2451 : else
2452 132 : targettab = tbinfo;
2453 :
2454 138 : appendPQExpBuffer(insertStmt, "INSERT INTO %s ",
2455 138 : fmtQualifiedDumpable(targettab));
2456 :
2457 : /* corner case for zero-column table */
2458 138 : if (nfields == 0)
2459 : {
2460 14 : appendPQExpBufferStr(insertStmt, "DEFAULT VALUES;\n");
2461 : }
2462 : else
2463 : {
2464 : /* append the list of column names if required */
2465 124 : if (dopt->column_inserts)
2466 : {
2467 54 : appendPQExpBufferChar(insertStmt, '(');
2468 176 : for (int field = 0; field < nfields; field++)
2469 : {
2470 122 : if (field > 0)
2471 68 : appendPQExpBufferStr(insertStmt, ", ");
2472 122 : appendPQExpBufferStr(insertStmt,
2473 122 : fmtId(PQfname(res, field)));
2474 : }
2475 54 : appendPQExpBufferStr(insertStmt, ") ");
2476 : }
2477 :
2478 124 : if (tbinfo->needs_override)
2479 4 : appendPQExpBufferStr(insertStmt, "OVERRIDING SYSTEM VALUE ");
2480 :
2481 124 : appendPQExpBufferStr(insertStmt, "VALUES");
2482 : }
2483 : }
2484 :
2485 6380 : for (int tuple = 0; tuple < PQntuples(res); tuple++)
2486 : {
2487 : /* Write the INSERT if not in the middle of a multi-row INSERT. */
2488 6142 : if (rows_this_statement == 0)
2489 6130 : archputs(insertStmt->data, fout);
2490 :
2491 : /*
2492 : * If it is zero-column table then we've already written the
2493 : * complete statement, which will mean we've disobeyed
2494 : * --rows-per-insert when it's set greater than 1. We do support
2495 : * a way to make this multi-row with: SELECT UNION ALL SELECT
2496 : * UNION ALL ... but that's non-standard so we should avoid it
2497 : * given that using INSERTs is mostly only ever needed for
2498 : * cross-database exports.
2499 : */
2500 6142 : if (nfields == 0)
2501 12 : continue;
2502 :
2503 : /* Emit a row heading */
2504 6130 : if (rows_per_statement == 1)
2505 6112 : archputs(" (", fout);
2506 18 : else if (rows_this_statement > 0)
2507 12 : archputs(",\n\t(", fout);
2508 : else
2509 6 : archputs("\n\t(", fout);
2510 :
2511 18498 : for (int field = 0; field < nfields; field++)
2512 : {
2513 12368 : if (field > 0)
2514 6238 : archputs(", ", fout);
2515 12368 : if (attgenerated[field])
2516 : {
2517 4 : archputs("DEFAULT", fout);
2518 4 : continue;
2519 : }
2520 12364 : if (PQgetisnull(res, tuple, field))
2521 : {
2522 166 : archputs("NULL", fout);
2523 166 : continue;
2524 : }
2525 :
2526 : /* XXX This code is partially duplicated in ruleutils.c */
2527 12198 : switch (PQftype(res, field))
2528 : {
2529 8138 : case INT2OID:
2530 : case INT4OID:
2531 : case INT8OID:
2532 : case OIDOID:
2533 : case FLOAT4OID:
2534 : case FLOAT8OID:
2535 : case NUMERICOID:
2536 : {
2537 : /*
2538 : * These types are printed without quotes unless
2539 : * they contain values that aren't accepted by the
2540 : * scanner unquoted (e.g., 'NaN'). Note that
2541 : * strtod() and friends might accept NaN, so we
2542 : * can't use that to test.
2543 : *
2544 : * In reality we only need to defend against
2545 : * infinity and NaN, so we need not get too crazy
2546 : * about pattern matching here.
2547 : */
2548 8138 : const char *s = PQgetvalue(res, tuple, field);
2549 :
2550 8138 : if (strspn(s, "0123456789 +-eE.") == strlen(s))
2551 8134 : archputs(s, fout);
2552 : else
2553 4 : archprintf(fout, "'%s'", s);
2554 : }
2555 8138 : break;
2556 :
2557 4 : case BITOID:
2558 : case VARBITOID:
2559 4 : archprintf(fout, "B'%s'",
2560 : PQgetvalue(res, tuple, field));
2561 4 : break;
2562 :
2563 8 : case BOOLOID:
2564 8 : if (strcmp(PQgetvalue(res, tuple, field), "t") == 0)
2565 4 : archputs("true", fout);
2566 : else
2567 4 : archputs("false", fout);
2568 8 : break;
2569 :
2570 4048 : default:
2571 : /* All other types are printed as string literals. */
2572 4048 : resetPQExpBuffer(q);
2573 4048 : appendStringLiteralAH(q,
2574 : PQgetvalue(res, tuple, field),
2575 : fout);
2576 4048 : archputs(q->data, fout);
2577 4048 : break;
2578 : }
2579 : }
2580 :
2581 : /* Terminate the row ... */
2582 6130 : archputs(")", fout);
2583 :
2584 : /* ... and the statement, if the target no. of rows is reached */
2585 6130 : if (++rows_this_statement >= rows_per_statement)
2586 : {
2587 6116 : if (dopt->do_nothing)
2588 0 : archputs(" ON CONFLICT DO NOTHING;\n", fout);
2589 : else
2590 6116 : archputs(";\n", fout);
2591 : /* Reset the row counter */
2592 6116 : rows_this_statement = 0;
2593 : }
2594 : }
2595 :
2596 238 : if (PQntuples(res) <= 0)
2597 : {
2598 138 : PQclear(res);
2599 138 : break;
2600 : }
2601 100 : PQclear(res);
2602 : }
2603 :
2604 : /* Terminate any statements that didn't make the row count. */
2605 138 : if (rows_this_statement > 0)
2606 : {
2607 2 : if (dopt->do_nothing)
2608 0 : archputs(" ON CONFLICT DO NOTHING;\n", fout);
2609 : else
2610 2 : archputs(";\n", fout);
2611 : }
2612 :
2613 138 : archputs("\n\n", fout);
2614 :
2615 138 : ExecuteSqlStatement(fout, "CLOSE _pg_dump_cursor");
2616 :
2617 138 : destroyPQExpBuffer(q);
2618 138 : if (insertStmt != NULL)
2619 138 : destroyPQExpBuffer(insertStmt);
2620 138 : free(attgenerated);
2621 :
2622 : /* Revert back the setting */
2623 138 : if (tbinfo->relkind == RELKIND_FOREIGN_TABLE)
2624 0 : set_restrict_relation_kind(fout, "view, foreign-table");
2625 :
2626 138 : return 1;
2627 : }
2628 :
2629 : /*
2630 : * getRootTableInfo:
2631 : * get the root TableInfo for the given partition table.
2632 : */
2633 : static TableInfo *
2634 18 : getRootTableInfo(const TableInfo *tbinfo)
2635 : {
2636 : TableInfo *parentTbinfo;
2637 :
2638 : Assert(tbinfo->ispartition);
2639 : Assert(tbinfo->numParents == 1);
2640 :
2641 18 : parentTbinfo = tbinfo->parents[0];
2642 18 : while (parentTbinfo->ispartition)
2643 : {
2644 : Assert(parentTbinfo->numParents == 1);
2645 0 : parentTbinfo = parentTbinfo->parents[0];
2646 : }
2647 :
2648 18 : return parentTbinfo;
2649 : }
2650 :
2651 : /*
2652 : * forcePartitionRootLoad
2653 : * Check if we must force load_via_partition_root for this partition.
2654 : *
2655 : * This is required if any level of ancestral partitioned table has an
2656 : * unsafe partitioning scheme.
2657 : */
2658 : static bool
2659 1876 : forcePartitionRootLoad(const TableInfo *tbinfo)
2660 : {
2661 : TableInfo *parentTbinfo;
2662 :
2663 : Assert(tbinfo->ispartition);
2664 : Assert(tbinfo->numParents == 1);
2665 :
2666 1876 : parentTbinfo = tbinfo->parents[0];
2667 1876 : if (parentTbinfo->unsafe_partitions)
2668 18 : return true;
2669 2290 : while (parentTbinfo->ispartition)
2670 : {
2671 : Assert(parentTbinfo->numParents == 1);
2672 432 : parentTbinfo = parentTbinfo->parents[0];
2673 432 : if (parentTbinfo->unsafe_partitions)
2674 0 : return true;
2675 : }
2676 :
2677 1858 : return false;
2678 : }
2679 :
2680 : /*
2681 : * dumpTableData -
2682 : * dump the contents of a single table
2683 : *
2684 : * Actually, this just makes an ArchiveEntry for the table contents.
2685 : */
2686 : static void
2687 7164 : dumpTableData(Archive *fout, const TableDataInfo *tdinfo)
2688 : {
2689 7164 : DumpOptions *dopt = fout->dopt;
2690 7164 : TableInfo *tbinfo = tdinfo->tdtable;
2691 7164 : PQExpBuffer copyBuf = createPQExpBuffer();
2692 7164 : PQExpBuffer clistBuf = createPQExpBuffer();
2693 : DataDumperPtr dumpFn;
2694 7164 : char *tdDefn = NULL;
2695 : char *copyStmt;
2696 : const char *copyFrom;
2697 :
2698 : /* We had better have loaded per-column details about this table */
2699 : Assert(tbinfo->interesting);
2700 :
2701 : /*
2702 : * When load-via-partition-root is set or forced, get the root table name
2703 : * for the partition table, so that we can reload data through the root
2704 : * table. Then construct a comment to be inserted into the TOC entry's
2705 : * defn field, so that such cases can be identified reliably.
2706 : */
2707 7164 : if (tbinfo->ispartition &&
2708 3672 : (dopt->load_via_partition_root ||
2709 1836 : forcePartitionRootLoad(tbinfo)))
2710 12 : {
2711 : TableInfo *parentTbinfo;
2712 :
2713 12 : parentTbinfo = getRootTableInfo(tbinfo);
2714 12 : copyFrom = fmtQualifiedDumpable(parentTbinfo);
2715 12 : printfPQExpBuffer(copyBuf, "-- load via partition root %s",
2716 : copyFrom);
2717 12 : tdDefn = pg_strdup(copyBuf->data);
2718 : }
2719 : else
2720 7152 : copyFrom = fmtQualifiedDumpable(tbinfo);
2721 :
2722 7164 : if (dopt->dump_inserts == 0)
2723 : {
2724 : /* Dump/restore using COPY */
2725 7026 : dumpFn = dumpTableData_copy;
2726 : /* must use 2 steps here 'cause fmtId is nonreentrant */
2727 7026 : printfPQExpBuffer(copyBuf, "COPY %s ",
2728 : copyFrom);
2729 7026 : appendPQExpBuffer(copyBuf, "%s FROM stdin;\n",
2730 : fmtCopyColumnList(tbinfo, clistBuf));
2731 7026 : copyStmt = copyBuf->data;
2732 : }
2733 : else
2734 : {
2735 : /* Restore using INSERT */
2736 138 : dumpFn = dumpTableData_insert;
2737 138 : copyStmt = NULL;
2738 : }
2739 :
2740 : /*
2741 : * Note: although the TableDataInfo is a full DumpableObject, we treat its
2742 : * dependency on its table as "special" and pass it to ArchiveEntry now.
2743 : * See comments for BuildArchiveDependencies.
2744 : */
2745 7164 : if (tdinfo->dobj.dump & DUMP_COMPONENT_DATA)
2746 : {
2747 : TocEntry *te;
2748 :
2749 7164 : te = ArchiveEntry(fout, tdinfo->dobj.catId, tdinfo->dobj.dumpId,
2750 7164 : ARCHIVE_OPTS(.tag = tbinfo->dobj.name,
2751 : .namespace = tbinfo->dobj.namespace->dobj.name,
2752 : .owner = tbinfo->rolname,
2753 : .description = "TABLE DATA",
2754 : .section = SECTION_DATA,
2755 : .createStmt = tdDefn,
2756 : .copyStmt = copyStmt,
2757 : .deps = &(tbinfo->dobj.dumpId),
2758 : .nDeps = 1,
2759 : .dumpFn = dumpFn,
2760 : .dumpArg = tdinfo));
2761 :
2762 : /*
2763 : * Set the TocEntry's dataLength in case we are doing a parallel dump
2764 : * and want to order dump jobs by table size. We choose to measure
2765 : * dataLength in table pages (including TOAST pages) during dump, so
2766 : * no scaling is needed.
2767 : *
2768 : * However, relpages is declared as "integer" in pg_class, and hence
2769 : * also in TableInfo, but it's really BlockNumber a/k/a unsigned int.
2770 : * Cast so that we get the right interpretation of table sizes
2771 : * exceeding INT_MAX pages.
2772 : */
2773 7164 : te->dataLength = (BlockNumber) tbinfo->relpages;
2774 7164 : te->dataLength += (BlockNumber) tbinfo->toastpages;
2775 :
2776 : /*
2777 : * If pgoff_t is only 32 bits wide, the above refinement is useless,
2778 : * and instead we'd better worry about integer overflow. Clamp to
2779 : * INT_MAX if the correct result exceeds that.
2780 : */
2781 : if (sizeof(te->dataLength) == 4 &&
2782 : (tbinfo->relpages < 0 || tbinfo->toastpages < 0 ||
2783 : te->dataLength < 0))
2784 : te->dataLength = INT_MAX;
2785 : }
2786 :
2787 7164 : destroyPQExpBuffer(copyBuf);
2788 7164 : destroyPQExpBuffer(clistBuf);
2789 7164 : }
2790 :
2791 : /*
2792 : * refreshMatViewData -
2793 : * load or refresh the contents of a single materialized view
2794 : *
2795 : * Actually, this just makes an ArchiveEntry for the REFRESH MATERIALIZED VIEW
2796 : * statement.
2797 : */
2798 : static void
2799 676 : refreshMatViewData(Archive *fout, const TableDataInfo *tdinfo)
2800 : {
2801 676 : TableInfo *tbinfo = tdinfo->tdtable;
2802 : PQExpBuffer q;
2803 :
2804 : /* If the materialized view is not flagged as populated, skip this. */
2805 676 : if (!tbinfo->relispopulated)
2806 136 : return;
2807 :
2808 540 : q = createPQExpBuffer();
2809 :
2810 540 : appendPQExpBuffer(q, "REFRESH MATERIALIZED VIEW %s;\n",
2811 540 : fmtQualifiedDumpable(tbinfo));
2812 :
2813 540 : if (tdinfo->dobj.dump & DUMP_COMPONENT_DATA)
2814 540 : ArchiveEntry(fout,
2815 : tdinfo->dobj.catId, /* catalog ID */
2816 : tdinfo->dobj.dumpId, /* dump ID */
2817 540 : ARCHIVE_OPTS(.tag = tbinfo->dobj.name,
2818 : .namespace = tbinfo->dobj.namespace->dobj.name,
2819 : .owner = tbinfo->rolname,
2820 : .description = "MATERIALIZED VIEW DATA",
2821 : .section = SECTION_POST_DATA,
2822 : .createStmt = q->data,
2823 : .deps = tdinfo->dobj.dependencies,
2824 : .nDeps = tdinfo->dobj.nDeps));
2825 :
2826 540 : destroyPQExpBuffer(q);
2827 : }
2828 :
2829 : /*
2830 : * getTableData -
2831 : * set up dumpable objects representing the contents of tables
2832 : */
2833 : static void
2834 304 : getTableData(DumpOptions *dopt, TableInfo *tblinfo, int numTables, char relkind)
2835 : {
2836 : int i;
2837 :
2838 80010 : for (i = 0; i < numTables; i++)
2839 : {
2840 79706 : if (tblinfo[i].dobj.dump & DUMP_COMPONENT_DATA &&
2841 1600 : (!relkind || tblinfo[i].relkind == relkind))
2842 10406 : makeTableDataInfo(dopt, &(tblinfo[i]));
2843 : }
2844 304 : }
2845 :
2846 : /*
2847 : * Make a dumpable object for the data of this specific table
2848 : *
2849 : * Note: we make a TableDataInfo if and only if we are going to dump the
2850 : * table data; the "dump" field in such objects isn't very interesting.
2851 : */
2852 : static void
2853 10484 : makeTableDataInfo(DumpOptions *dopt, TableInfo *tbinfo)
2854 : {
2855 : TableDataInfo *tdinfo;
2856 :
2857 : /*
2858 : * Nothing to do if we already decided to dump the table. This will
2859 : * happen for "config" tables.
2860 : */
2861 10484 : if (tbinfo->dataObj != NULL)
2862 2 : return;
2863 :
2864 : /* Skip VIEWs (no data to dump) */
2865 10482 : if (tbinfo->relkind == RELKIND_VIEW)
2866 920 : return;
2867 : /* Skip FOREIGN TABLEs (no data to dump) unless requested explicitly */
2868 9562 : if (tbinfo->relkind == RELKIND_FOREIGN_TABLE &&
2869 76 : (foreign_servers_include_oids.head == NULL ||
2870 8 : !simple_oid_list_member(&foreign_servers_include_oids,
2871 : tbinfo->foreign_server)))
2872 74 : return;
2873 : /* Skip partitioned tables (data in partitions) */
2874 9488 : if (tbinfo->relkind == RELKIND_PARTITIONED_TABLE)
2875 868 : return;
2876 :
2877 : /* Don't dump data in unlogged tables, if so requested */
2878 8620 : if (tbinfo->relpersistence == RELPERSISTENCE_UNLOGGED &&
2879 82 : dopt->no_unlogged_table_data)
2880 36 : return;
2881 :
2882 : /* Check that the data is not explicitly excluded */
2883 8584 : if (simple_oid_list_member(&tabledata_exclude_oids,
2884 : tbinfo->dobj.catId.oid))
2885 16 : return;
2886 :
2887 : /* OK, let's dump it */
2888 8568 : tdinfo = (TableDataInfo *) pg_malloc(sizeof(TableDataInfo));
2889 :
2890 8568 : if (tbinfo->relkind == RELKIND_MATVIEW)
2891 676 : tdinfo->dobj.objType = DO_REFRESH_MATVIEW;
2892 7892 : else if (tbinfo->relkind == RELKIND_SEQUENCE)
2893 728 : tdinfo->dobj.objType = DO_SEQUENCE_SET;
2894 : else
2895 7164 : tdinfo->dobj.objType = DO_TABLE_DATA;
2896 :
2897 : /*
2898 : * Note: use tableoid 0 so that this object won't be mistaken for
2899 : * something that pg_depend entries apply to.
2900 : */
2901 8568 : tdinfo->dobj.catId.tableoid = 0;
2902 8568 : tdinfo->dobj.catId.oid = tbinfo->dobj.catId.oid;
2903 8568 : AssignDumpId(&tdinfo->dobj);
2904 8568 : tdinfo->dobj.name = tbinfo->dobj.name;
2905 8568 : tdinfo->dobj.namespace = tbinfo->dobj.namespace;
2906 8568 : tdinfo->tdtable = tbinfo;
2907 8568 : tdinfo->filtercond = NULL; /* might get set later */
2908 8568 : addObjectDependency(&tdinfo->dobj, tbinfo->dobj.dumpId);
2909 :
2910 : /* A TableDataInfo contains data, of course */
2911 8568 : tdinfo->dobj.components |= DUMP_COMPONENT_DATA;
2912 :
2913 8568 : tbinfo->dataObj = tdinfo;
2914 :
2915 : /* Make sure that we'll collect per-column info for this table. */
2916 8568 : tbinfo->interesting = true;
2917 : }
2918 :
2919 : /*
2920 : * The refresh for a materialized view must be dependent on the refresh for
2921 : * any materialized view that this one is dependent on.
2922 : *
2923 : * This must be called after all the objects are created, but before they are
2924 : * sorted.
2925 : */
2926 : static void
2927 276 : buildMatViewRefreshDependencies(Archive *fout)
2928 : {
2929 : PQExpBuffer query;
2930 : PGresult *res;
2931 : int ntups,
2932 : i;
2933 : int i_classid,
2934 : i_objid,
2935 : i_refobjid;
2936 :
2937 : /* No Mat Views before 9.3. */
2938 276 : if (fout->remoteVersion < 90300)
2939 0 : return;
2940 :
2941 276 : query = createPQExpBuffer();
2942 :
2943 276 : appendPQExpBufferStr(query, "WITH RECURSIVE w AS "
2944 : "( "
2945 : "SELECT d1.objid, d2.refobjid, c2.relkind AS refrelkind "
2946 : "FROM pg_depend d1 "
2947 : "JOIN pg_class c1 ON c1.oid = d1.objid "
2948 : "AND c1.relkind = " CppAsString2(RELKIND_MATVIEW)
2949 : " JOIN pg_rewrite r1 ON r1.ev_class = d1.objid "
2950 : "JOIN pg_depend d2 ON d2.classid = 'pg_rewrite'::regclass "
2951 : "AND d2.objid = r1.oid "
2952 : "AND d2.refobjid <> d1.objid "
2953 : "JOIN pg_class c2 ON c2.oid = d2.refobjid "
2954 : "AND c2.relkind IN (" CppAsString2(RELKIND_MATVIEW) ","
2955 : CppAsString2(RELKIND_VIEW) ") "
2956 : "WHERE d1.classid = 'pg_class'::regclass "
2957 : "UNION "
2958 : "SELECT w.objid, d3.refobjid, c3.relkind "
2959 : "FROM w "
2960 : "JOIN pg_rewrite r3 ON r3.ev_class = w.refobjid "
2961 : "JOIN pg_depend d3 ON d3.classid = 'pg_rewrite'::regclass "
2962 : "AND d3.objid = r3.oid "
2963 : "AND d3.refobjid <> w.refobjid "
2964 : "JOIN pg_class c3 ON c3.oid = d3.refobjid "
2965 : "AND c3.relkind IN (" CppAsString2(RELKIND_MATVIEW) ","
2966 : CppAsString2(RELKIND_VIEW) ") "
2967 : ") "
2968 : "SELECT 'pg_class'::regclass::oid AS classid, objid, refobjid "
2969 : "FROM w "
2970 : "WHERE refrelkind = " CppAsString2(RELKIND_MATVIEW));
2971 :
2972 276 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
2973 :
2974 276 : ntups = PQntuples(res);
2975 :
2976 276 : i_classid = PQfnumber(res, "classid");
2977 276 : i_objid = PQfnumber(res, "objid");
2978 276 : i_refobjid = PQfnumber(res, "refobjid");
2979 :
2980 804 : for (i = 0; i < ntups; i++)
2981 : {
2982 : CatalogId objId;
2983 : CatalogId refobjId;
2984 : DumpableObject *dobj;
2985 : DumpableObject *refdobj;
2986 : TableInfo *tbinfo;
2987 : TableInfo *reftbinfo;
2988 :
2989 528 : objId.tableoid = atooid(PQgetvalue(res, i, i_classid));
2990 528 : objId.oid = atooid(PQgetvalue(res, i, i_objid));
2991 528 : refobjId.tableoid = objId.tableoid;
2992 528 : refobjId.oid = atooid(PQgetvalue(res, i, i_refobjid));
2993 :
2994 528 : dobj = findObjectByCatalogId(objId);
2995 528 : if (dobj == NULL)
2996 96 : continue;
2997 :
2998 : Assert(dobj->objType == DO_TABLE);
2999 528 : tbinfo = (TableInfo *) dobj;
3000 : Assert(tbinfo->relkind == RELKIND_MATVIEW);
3001 528 : dobj = (DumpableObject *) tbinfo->dataObj;
3002 528 : if (dobj == NULL)
3003 96 : continue;
3004 : Assert(dobj->objType == DO_REFRESH_MATVIEW);
3005 :
3006 432 : refdobj = findObjectByCatalogId(refobjId);
3007 432 : if (refdobj == NULL)
3008 0 : continue;
3009 :
3010 : Assert(refdobj->objType == DO_TABLE);
3011 432 : reftbinfo = (TableInfo *) refdobj;
3012 : Assert(reftbinfo->relkind == RELKIND_MATVIEW);
3013 432 : refdobj = (DumpableObject *) reftbinfo->dataObj;
3014 432 : if (refdobj == NULL)
3015 0 : continue;
3016 : Assert(refdobj->objType == DO_REFRESH_MATVIEW);
3017 :
3018 432 : addObjectDependency(dobj, refdobj->dumpId);
3019 :
3020 432 : if (!reftbinfo->relispopulated)
3021 68 : tbinfo->relispopulated = false;
3022 : }
3023 :
3024 276 : PQclear(res);
3025 :
3026 276 : destroyPQExpBuffer(query);
3027 : }
3028 :
3029 : /*
3030 : * getTableDataFKConstraints -
3031 : * add dump-order dependencies reflecting foreign key constraints
3032 : *
3033 : * This code is executed only in a data-only dump --- in schema+data dumps
3034 : * we handle foreign key issues by not creating the FK constraints until
3035 : * after the data is loaded. In a data-only dump, however, we want to
3036 : * order the table data objects in such a way that a table's referenced
3037 : * tables are restored first. (In the presence of circular references or
3038 : * self-references this may be impossible; we'll detect and complain about
3039 : * that during the dependency sorting step.)
3040 : */
3041 : static void
3042 12 : getTableDataFKConstraints(void)
3043 : {
3044 : DumpableObject **dobjs;
3045 : int numObjs;
3046 : int i;
3047 :
3048 : /* Search through all the dumpable objects for FK constraints */
3049 12 : getDumpableObjects(&dobjs, &numObjs);
3050 42770 : for (i = 0; i < numObjs; i++)
3051 : {
3052 42758 : if (dobjs[i]->objType == DO_FK_CONSTRAINT)
3053 : {
3054 12 : ConstraintInfo *cinfo = (ConstraintInfo *) dobjs[i];
3055 : TableInfo *ftable;
3056 :
3057 : /* Not interesting unless both tables are to be dumped */
3058 12 : if (cinfo->contable == NULL ||
3059 12 : cinfo->contable->dataObj == NULL)
3060 6 : continue;
3061 6 : ftable = findTableByOid(cinfo->confrelid);
3062 6 : if (ftable == NULL ||
3063 6 : ftable->dataObj == NULL)
3064 0 : continue;
3065 :
3066 : /*
3067 : * Okay, make referencing table's TABLE_DATA object depend on the
3068 : * referenced table's TABLE_DATA object.
3069 : */
3070 6 : addObjectDependency(&cinfo->contable->dataObj->dobj,
3071 6 : ftable->dataObj->dobj.dumpId);
3072 : }
3073 : }
3074 12 : free(dobjs);
3075 12 : }
3076 :
3077 :
3078 : /*
3079 : * dumpDatabase:
3080 : * dump the database definition
3081 : */
3082 : static void
3083 120 : dumpDatabase(Archive *fout)
3084 : {
3085 120 : DumpOptions *dopt = fout->dopt;
3086 120 : PQExpBuffer dbQry = createPQExpBuffer();
3087 120 : PQExpBuffer delQry = createPQExpBuffer();
3088 120 : PQExpBuffer creaQry = createPQExpBuffer();
3089 120 : PQExpBuffer labelq = createPQExpBuffer();
3090 120 : PGconn *conn = GetConnection(fout);
3091 : PGresult *res;
3092 : int i_tableoid,
3093 : i_oid,
3094 : i_datname,
3095 : i_datdba,
3096 : i_encoding,
3097 : i_datlocprovider,
3098 : i_collate,
3099 : i_ctype,
3100 : i_datlocale,
3101 : i_daticurules,
3102 : i_frozenxid,
3103 : i_minmxid,
3104 : i_datacl,
3105 : i_acldefault,
3106 : i_datistemplate,
3107 : i_datconnlimit,
3108 : i_datcollversion,
3109 : i_tablespace;
3110 : CatalogId dbCatId;
3111 : DumpId dbDumpId;
3112 : DumpableAcl dbdacl;
3113 : const char *datname,
3114 : *dba,
3115 : *encoding,
3116 : *datlocprovider,
3117 : *collate,
3118 : *ctype,
3119 : *locale,
3120 : *icurules,
3121 : *datistemplate,
3122 : *datconnlimit,
3123 : *tablespace;
3124 : uint32 frozenxid,
3125 : minmxid;
3126 : char *qdatname;
3127 :
3128 120 : pg_log_info("saving database definition");
3129 :
3130 : /*
3131 : * Fetch the database-level properties for this database.
3132 : */
3133 120 : appendPQExpBufferStr(dbQry, "SELECT tableoid, oid, datname, "
3134 : "datdba, "
3135 : "pg_encoding_to_char(encoding) AS encoding, "
3136 : "datcollate, datctype, datfrozenxid, "
3137 : "datacl, acldefault('d', datdba) AS acldefault, "
3138 : "datistemplate, datconnlimit, ");
3139 120 : if (fout->remoteVersion >= 90300)
3140 120 : appendPQExpBufferStr(dbQry, "datminmxid, ");
3141 : else
3142 0 : appendPQExpBufferStr(dbQry, "0 AS datminmxid, ");
3143 120 : if (fout->remoteVersion >= 170000)
3144 120 : appendPQExpBufferStr(dbQry, "datlocprovider, datlocale, datcollversion, ");
3145 0 : else if (fout->remoteVersion >= 150000)
3146 0 : appendPQExpBufferStr(dbQry, "datlocprovider, daticulocale AS datlocale, datcollversion, ");
3147 : else
3148 0 : appendPQExpBufferStr(dbQry, "'c' AS datlocprovider, NULL AS datlocale, NULL AS datcollversion, ");
3149 120 : if (fout->remoteVersion >= 160000)
3150 120 : appendPQExpBufferStr(dbQry, "daticurules, ");
3151 : else
3152 0 : appendPQExpBufferStr(dbQry, "NULL AS daticurules, ");
3153 120 : appendPQExpBufferStr(dbQry,
3154 : "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
3155 : "shobj_description(oid, 'pg_database') AS description "
3156 : "FROM pg_database "
3157 : "WHERE datname = current_database()");
3158 :
3159 120 : res = ExecuteSqlQueryForSingleRow(fout, dbQry->data);
3160 :
3161 120 : i_tableoid = PQfnumber(res, "tableoid");
3162 120 : i_oid = PQfnumber(res, "oid");
3163 120 : i_datname = PQfnumber(res, "datname");
3164 120 : i_datdba = PQfnumber(res, "datdba");
3165 120 : i_encoding = PQfnumber(res, "encoding");
3166 120 : i_datlocprovider = PQfnumber(res, "datlocprovider");
3167 120 : i_collate = PQfnumber(res, "datcollate");
3168 120 : i_ctype = PQfnumber(res, "datctype");
3169 120 : i_datlocale = PQfnumber(res, "datlocale");
3170 120 : i_daticurules = PQfnumber(res, "daticurules");
3171 120 : i_frozenxid = PQfnumber(res, "datfrozenxid");
3172 120 : i_minmxid = PQfnumber(res, "datminmxid");
3173 120 : i_datacl = PQfnumber(res, "datacl");
3174 120 : i_acldefault = PQfnumber(res, "acldefault");
3175 120 : i_datistemplate = PQfnumber(res, "datistemplate");
3176 120 : i_datconnlimit = PQfnumber(res, "datconnlimit");
3177 120 : i_datcollversion = PQfnumber(res, "datcollversion");
3178 120 : i_tablespace = PQfnumber(res, "tablespace");
3179 :
3180 120 : dbCatId.tableoid = atooid(PQgetvalue(res, 0, i_tableoid));
3181 120 : dbCatId.oid = atooid(PQgetvalue(res, 0, i_oid));
3182 120 : datname = PQgetvalue(res, 0, i_datname);
3183 120 : dba = getRoleName(PQgetvalue(res, 0, i_datdba));
3184 120 : encoding = PQgetvalue(res, 0, i_encoding);
3185 120 : datlocprovider = PQgetvalue(res, 0, i_datlocprovider);
3186 120 : collate = PQgetvalue(res, 0, i_collate);
3187 120 : ctype = PQgetvalue(res, 0, i_ctype);
3188 120 : if (!PQgetisnull(res, 0, i_datlocale))
3189 28 : locale = PQgetvalue(res, 0, i_datlocale);
3190 : else
3191 92 : locale = NULL;
3192 120 : if (!PQgetisnull(res, 0, i_daticurules))
3193 0 : icurules = PQgetvalue(res, 0, i_daticurules);
3194 : else
3195 120 : icurules = NULL;
3196 120 : frozenxid = atooid(PQgetvalue(res, 0, i_frozenxid));
3197 120 : minmxid = atooid(PQgetvalue(res, 0, i_minmxid));
3198 120 : dbdacl.acl = PQgetvalue(res, 0, i_datacl);
3199 120 : dbdacl.acldefault = PQgetvalue(res, 0, i_acldefault);
3200 120 : datistemplate = PQgetvalue(res, 0, i_datistemplate);
3201 120 : datconnlimit = PQgetvalue(res, 0, i_datconnlimit);
3202 120 : tablespace = PQgetvalue(res, 0, i_tablespace);
3203 :
3204 120 : qdatname = pg_strdup(fmtId(datname));
3205 :
3206 : /*
3207 : * Prepare the CREATE DATABASE command. We must specify OID (if we want
3208 : * to preserve that), as well as the encoding, locale, and tablespace
3209 : * since those can't be altered later. Other DB properties are left to
3210 : * the DATABASE PROPERTIES entry, so that they can be applied after
3211 : * reconnecting to the target DB.
3212 : *
3213 : * For binary upgrade, we use the FILE_COPY strategy because testing has
3214 : * shown it to be faster. When the server is in binary upgrade mode, it
3215 : * will also skip the checkpoints this strategy ordinarily performs.
3216 : */
3217 120 : if (dopt->binary_upgrade)
3218 : {
3219 26 : appendPQExpBuffer(creaQry,
3220 : "CREATE DATABASE %s WITH TEMPLATE = template0 "
3221 : "OID = %u STRATEGY = FILE_COPY",
3222 : qdatname, dbCatId.oid);
3223 : }
3224 : else
3225 : {
3226 94 : appendPQExpBuffer(creaQry, "CREATE DATABASE %s WITH TEMPLATE = template0",
3227 : qdatname);
3228 : }
3229 120 : if (strlen(encoding) > 0)
3230 : {
3231 120 : appendPQExpBufferStr(creaQry, " ENCODING = ");
3232 120 : appendStringLiteralAH(creaQry, encoding, fout);
3233 : }
3234 :
3235 120 : appendPQExpBufferStr(creaQry, " LOCALE_PROVIDER = ");
3236 120 : if (datlocprovider[0] == 'b')
3237 28 : appendPQExpBufferStr(creaQry, "builtin");
3238 92 : else if (datlocprovider[0] == 'c')
3239 92 : appendPQExpBufferStr(creaQry, "libc");
3240 0 : else if (datlocprovider[0] == 'i')
3241 0 : appendPQExpBufferStr(creaQry, "icu");
3242 : else
3243 0 : pg_fatal("unrecognized locale provider: %s",
3244 : datlocprovider);
3245 :
3246 120 : if (strlen(collate) > 0 && strcmp(collate, ctype) == 0)
3247 : {
3248 120 : appendPQExpBufferStr(creaQry, " LOCALE = ");
3249 120 : appendStringLiteralAH(creaQry, collate, fout);
3250 : }
3251 : else
3252 : {
3253 0 : if (strlen(collate) > 0)
3254 : {
3255 0 : appendPQExpBufferStr(creaQry, " LC_COLLATE = ");
3256 0 : appendStringLiteralAH(creaQry, collate, fout);
3257 : }
3258 0 : if (strlen(ctype) > 0)
3259 : {
3260 0 : appendPQExpBufferStr(creaQry, " LC_CTYPE = ");
3261 0 : appendStringLiteralAH(creaQry, ctype, fout);
3262 : }
3263 : }
3264 120 : if (locale)
3265 : {
3266 28 : if (datlocprovider[0] == 'b')
3267 28 : appendPQExpBufferStr(creaQry, " BUILTIN_LOCALE = ");
3268 : else
3269 0 : appendPQExpBufferStr(creaQry, " ICU_LOCALE = ");
3270 :
3271 28 : appendStringLiteralAH(creaQry, locale, fout);
3272 : }
3273 :
3274 120 : if (icurules)
3275 : {
3276 0 : appendPQExpBufferStr(creaQry, " ICU_RULES = ");
3277 0 : appendStringLiteralAH(creaQry, icurules, fout);
3278 : }
3279 :
3280 : /*
3281 : * For binary upgrade, carry over the collation version. For normal
3282 : * dump/restore, omit the version, so that it is computed upon restore.
3283 : */
3284 120 : if (dopt->binary_upgrade)
3285 : {
3286 26 : if (!PQgetisnull(res, 0, i_datcollversion))
3287 : {
3288 26 : appendPQExpBufferStr(creaQry, " COLLATION_VERSION = ");
3289 26 : appendStringLiteralAH(creaQry,
3290 : PQgetvalue(res, 0, i_datcollversion),
3291 : fout);
3292 : }
3293 : }
3294 :
3295 : /*
3296 : * Note: looking at dopt->outputNoTablespaces here is completely the wrong
3297 : * thing; the decision whether to specify a tablespace should be left till
3298 : * pg_restore, so that pg_restore --no-tablespaces applies. Ideally we'd
3299 : * label the DATABASE entry with the tablespace and let the normal
3300 : * tablespace selection logic work ... but CREATE DATABASE doesn't pay
3301 : * attention to default_tablespace, so that won't work.
3302 : */
3303 120 : if (strlen(tablespace) > 0 && strcmp(tablespace, "pg_default") != 0 &&
3304 0 : !dopt->outputNoTablespaces)
3305 0 : appendPQExpBuffer(creaQry, " TABLESPACE = %s",
3306 : fmtId(tablespace));
3307 120 : appendPQExpBufferStr(creaQry, ";\n");
3308 :
3309 120 : appendPQExpBuffer(delQry, "DROP DATABASE %s;\n",
3310 : qdatname);
3311 :
3312 120 : dbDumpId = createDumpId();
3313 :
3314 120 : ArchiveEntry(fout,
3315 : dbCatId, /* catalog ID */
3316 : dbDumpId, /* dump ID */
3317 120 : ARCHIVE_OPTS(.tag = datname,
3318 : .owner = dba,
3319 : .description = "DATABASE",
3320 : .section = SECTION_PRE_DATA,
3321 : .createStmt = creaQry->data,
3322 : .dropStmt = delQry->data));
3323 :
3324 : /* Compute correct tag for archive entry */
3325 120 : appendPQExpBuffer(labelq, "DATABASE %s", qdatname);
3326 :
3327 : /* Dump DB comment if any */
3328 : {
3329 : /*
3330 : * 8.2 and up keep comments on shared objects in a shared table, so we
3331 : * cannot use the dumpComment() code used for other database objects.
3332 : * Be careful that the ArchiveEntry parameters match that function.
3333 : */
3334 120 : char *comment = PQgetvalue(res, 0, PQfnumber(res, "description"));
3335 :
3336 120 : if (comment && *comment && !dopt->no_comments)
3337 : {
3338 50 : resetPQExpBuffer(dbQry);
3339 :
3340 : /*
3341 : * Generates warning when loaded into a differently-named
3342 : * database.
3343 : */
3344 50 : appendPQExpBuffer(dbQry, "COMMENT ON DATABASE %s IS ", qdatname);
3345 50 : appendStringLiteralAH(dbQry, comment, fout);
3346 50 : appendPQExpBufferStr(dbQry, ";\n");
3347 :
3348 50 : ArchiveEntry(fout, nilCatalogId, createDumpId(),
3349 50 : ARCHIVE_OPTS(.tag = labelq->data,
3350 : .owner = dba,
3351 : .description = "COMMENT",
3352 : .section = SECTION_NONE,
3353 : .createStmt = dbQry->data,
3354 : .deps = &dbDumpId,
3355 : .nDeps = 1));
3356 : }
3357 : }
3358 :
3359 : /* Dump DB security label, if enabled */
3360 120 : if (!dopt->no_security_labels)
3361 : {
3362 : PGresult *shres;
3363 : PQExpBuffer seclabelQry;
3364 :
3365 120 : seclabelQry = createPQExpBuffer();
3366 :
3367 120 : buildShSecLabelQuery("pg_database", dbCatId.oid, seclabelQry);
3368 120 : shres = ExecuteSqlQuery(fout, seclabelQry->data, PGRES_TUPLES_OK);
3369 120 : resetPQExpBuffer(seclabelQry);
3370 120 : emitShSecLabels(conn, shres, seclabelQry, "DATABASE", datname);
3371 120 : if (seclabelQry->len > 0)
3372 0 : ArchiveEntry(fout, nilCatalogId, createDumpId(),
3373 0 : ARCHIVE_OPTS(.tag = labelq->data,
3374 : .owner = dba,
3375 : .description = "SECURITY LABEL",
3376 : .section = SECTION_NONE,
3377 : .createStmt = seclabelQry->data,
3378 : .deps = &dbDumpId,
3379 : .nDeps = 1));
3380 120 : destroyPQExpBuffer(seclabelQry);
3381 120 : PQclear(shres);
3382 : }
3383 :
3384 : /*
3385 : * Dump ACL if any. Note that we do not support initial privileges
3386 : * (pg_init_privs) on databases.
3387 : */
3388 120 : dbdacl.privtype = 0;
3389 120 : dbdacl.initprivs = NULL;
3390 :
3391 120 : dumpACL(fout, dbDumpId, InvalidDumpId, "DATABASE",
3392 : qdatname, NULL, NULL,
3393 : NULL, dba, &dbdacl);
3394 :
3395 : /*
3396 : * Now construct a DATABASE PROPERTIES archive entry to restore any
3397 : * non-default database-level properties. (The reason this must be
3398 : * separate is that we cannot put any additional commands into the TOC
3399 : * entry that has CREATE DATABASE. pg_restore would execute such a group
3400 : * in an implicit transaction block, and the backend won't allow CREATE
3401 : * DATABASE in that context.)
3402 : */
3403 120 : resetPQExpBuffer(creaQry);
3404 120 : resetPQExpBuffer(delQry);
3405 :
3406 120 : if (strlen(datconnlimit) > 0 && strcmp(datconnlimit, "-1") != 0)
3407 0 : appendPQExpBuffer(creaQry, "ALTER DATABASE %s CONNECTION LIMIT = %s;\n",
3408 : qdatname, datconnlimit);
3409 :
3410 120 : if (strcmp(datistemplate, "t") == 0)
3411 : {
3412 8 : appendPQExpBuffer(creaQry, "ALTER DATABASE %s IS_TEMPLATE = true;\n",
3413 : qdatname);
3414 :
3415 : /*
3416 : * The backend won't accept DROP DATABASE on a template database. We
3417 : * can deal with that by removing the template marking before the DROP
3418 : * gets issued. We'd prefer to use ALTER DATABASE IF EXISTS here, but
3419 : * since no such command is currently supported, fake it with a direct
3420 : * UPDATE on pg_database.
3421 : */
3422 8 : appendPQExpBufferStr(delQry, "UPDATE pg_catalog.pg_database "
3423 : "SET datistemplate = false WHERE datname = ");
3424 8 : appendStringLiteralAH(delQry, datname, fout);
3425 8 : appendPQExpBufferStr(delQry, ";\n");
3426 : }
3427 :
3428 : /*
3429 : * We do not restore pg_database.dathasloginevt because it is set
3430 : * automatically on login event trigger creation.
3431 : */
3432 :
3433 : /* Add database-specific SET options */
3434 120 : dumpDatabaseConfig(fout, creaQry, datname, dbCatId.oid);
3435 :
3436 : /*
3437 : * We stick this binary-upgrade query into the DATABASE PROPERTIES archive
3438 : * entry, too, for lack of a better place.
3439 : */
3440 120 : if (dopt->binary_upgrade)
3441 : {
3442 26 : appendPQExpBufferStr(creaQry, "\n-- For binary upgrade, set datfrozenxid and datminmxid.\n");
3443 26 : appendPQExpBuffer(creaQry, "UPDATE pg_catalog.pg_database\n"
3444 : "SET datfrozenxid = '%u', datminmxid = '%u'\n"
3445 : "WHERE datname = ",
3446 : frozenxid, minmxid);
3447 26 : appendStringLiteralAH(creaQry, datname, fout);
3448 26 : appendPQExpBufferStr(creaQry, ";\n");
3449 : }
3450 :
3451 120 : if (creaQry->len > 0)
3452 34 : ArchiveEntry(fout, nilCatalogId, createDumpId(),
3453 34 : ARCHIVE_OPTS(.tag = datname,
3454 : .owner = dba,
3455 : .description = "DATABASE PROPERTIES",
3456 : .section = SECTION_PRE_DATA,
3457 : .createStmt = creaQry->data,
3458 : .dropStmt = delQry->data,
3459 : .deps = &dbDumpId));
3460 :
3461 : /*
3462 : * pg_largeobject comes from the old system intact, so set its
3463 : * relfrozenxids, relminmxids and relfilenode.
3464 : */
3465 120 : if (dopt->binary_upgrade)
3466 : {
3467 : PGresult *lo_res;
3468 26 : PQExpBuffer loFrozenQry = createPQExpBuffer();
3469 26 : PQExpBuffer loOutQry = createPQExpBuffer();
3470 26 : PQExpBuffer loHorizonQry = createPQExpBuffer();
3471 : int ii_relfrozenxid,
3472 : ii_relfilenode,
3473 : ii_oid,
3474 : ii_relminmxid;
3475 :
3476 : /*
3477 : * pg_largeobject
3478 : */
3479 26 : if (fout->remoteVersion >= 90300)
3480 26 : appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, relminmxid, relfilenode, oid\n"
3481 : "FROM pg_catalog.pg_class\n"
3482 : "WHERE oid IN (%u, %u);\n",
3483 : LargeObjectRelationId, LargeObjectLOidPNIndexId);
3484 : else
3485 0 : appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, 0 AS relminmxid, relfilenode, oid\n"
3486 : "FROM pg_catalog.pg_class\n"
3487 : "WHERE oid IN (%u, %u);\n",
3488 : LargeObjectRelationId, LargeObjectLOidPNIndexId);
3489 :
3490 26 : lo_res = ExecuteSqlQuery(fout, loFrozenQry->data, PGRES_TUPLES_OK);
3491 :
3492 26 : ii_relfrozenxid = PQfnumber(lo_res, "relfrozenxid");
3493 26 : ii_relminmxid = PQfnumber(lo_res, "relminmxid");
3494 26 : ii_relfilenode = PQfnumber(lo_res, "relfilenode");
3495 26 : ii_oid = PQfnumber(lo_res, "oid");
3496 :
3497 26 : appendPQExpBufferStr(loHorizonQry, "\n-- For binary upgrade, set pg_largeobject relfrozenxid and relminmxid\n");
3498 26 : appendPQExpBufferStr(loOutQry, "\n-- For binary upgrade, preserve pg_largeobject and index relfilenodes\n");
3499 78 : for (int i = 0; i < PQntuples(lo_res); ++i)
3500 : {
3501 : Oid oid;
3502 : RelFileNumber relfilenumber;
3503 :
3504 52 : appendPQExpBuffer(loHorizonQry, "UPDATE pg_catalog.pg_class\n"
3505 : "SET relfrozenxid = '%u', relminmxid = '%u'\n"
3506 : "WHERE oid = %u;\n",
3507 52 : atooid(PQgetvalue(lo_res, i, ii_relfrozenxid)),
3508 52 : atooid(PQgetvalue(lo_res, i, ii_relminmxid)),
3509 52 : atooid(PQgetvalue(lo_res, i, ii_oid)));
3510 :
3511 52 : oid = atooid(PQgetvalue(lo_res, i, ii_oid));
3512 52 : relfilenumber = atooid(PQgetvalue(lo_res, i, ii_relfilenode));
3513 :
3514 52 : if (oid == LargeObjectRelationId)
3515 26 : appendPQExpBuffer(loOutQry,
3516 : "SELECT pg_catalog.binary_upgrade_set_next_heap_relfilenode('%u'::pg_catalog.oid);\n",
3517 : relfilenumber);
3518 26 : else if (oid == LargeObjectLOidPNIndexId)
3519 26 : appendPQExpBuffer(loOutQry,
3520 : "SELECT pg_catalog.binary_upgrade_set_next_index_relfilenode('%u'::pg_catalog.oid);\n",
3521 : relfilenumber);
3522 : }
3523 :
3524 26 : appendPQExpBufferStr(loOutQry,
3525 : "TRUNCATE pg_catalog.pg_largeobject;\n");
3526 26 : appendPQExpBufferStr(loOutQry, loHorizonQry->data);
3527 :
3528 26 : ArchiveEntry(fout, nilCatalogId, createDumpId(),
3529 26 : ARCHIVE_OPTS(.tag = "pg_largeobject",
3530 : .description = "pg_largeobject",
3531 : .section = SECTION_PRE_DATA,
3532 : .createStmt = loOutQry->data));
3533 :
3534 26 : PQclear(lo_res);
3535 :
3536 26 : destroyPQExpBuffer(loFrozenQry);
3537 26 : destroyPQExpBuffer(loHorizonQry);
3538 26 : destroyPQExpBuffer(loOutQry);
3539 : }
3540 :
3541 120 : PQclear(res);
3542 :
3543 120 : free(qdatname);
3544 120 : destroyPQExpBuffer(dbQry);
3545 120 : destroyPQExpBuffer(delQry);
3546 120 : destroyPQExpBuffer(creaQry);
3547 120 : destroyPQExpBuffer(labelq);
3548 120 : }
3549 :
3550 : /*
3551 : * Collect any database-specific or role-and-database-specific SET options
3552 : * for this database, and append them to outbuf.
3553 : */
3554 : static void
3555 120 : dumpDatabaseConfig(Archive *AH, PQExpBuffer outbuf,
3556 : const char *dbname, Oid dboid)
3557 : {
3558 120 : PGconn *conn = GetConnection(AH);
3559 120 : PQExpBuffer buf = createPQExpBuffer();
3560 : PGresult *res;
3561 :
3562 : /* First collect database-specific options */
3563 120 : printfPQExpBuffer(buf, "SELECT unnest(setconfig) FROM pg_db_role_setting "
3564 : "WHERE setrole = 0 AND setdatabase = '%u'::oid",
3565 : dboid);
3566 :
3567 120 : res = ExecuteSqlQuery(AH, buf->data, PGRES_TUPLES_OK);
3568 :
3569 180 : for (int i = 0; i < PQntuples(res); i++)
3570 60 : makeAlterConfigCommand(conn, PQgetvalue(res, i, 0),
3571 : "DATABASE", dbname, NULL, NULL,
3572 : outbuf);
3573 :
3574 120 : PQclear(res);
3575 :
3576 : /* Now look for role-and-database-specific options */
3577 120 : printfPQExpBuffer(buf, "SELECT rolname, unnest(setconfig) "
3578 : "FROM pg_db_role_setting s, pg_roles r "
3579 : "WHERE setrole = r.oid AND setdatabase = '%u'::oid",
3580 : dboid);
3581 :
3582 120 : res = ExecuteSqlQuery(AH, buf->data, PGRES_TUPLES_OK);
3583 :
3584 120 : for (int i = 0; i < PQntuples(res); i++)
3585 0 : makeAlterConfigCommand(conn, PQgetvalue(res, i, 1),
3586 0 : "ROLE", PQgetvalue(res, i, 0),
3587 : "DATABASE", dbname,
3588 : outbuf);
3589 :
3590 120 : PQclear(res);
3591 :
3592 120 : destroyPQExpBuffer(buf);
3593 120 : }
3594 :
3595 : /*
3596 : * dumpEncoding: put the correct encoding into the archive
3597 : */
3598 : static void
3599 308 : dumpEncoding(Archive *AH)
3600 : {
3601 308 : const char *encname = pg_encoding_to_char(AH->encoding);
3602 308 : PQExpBuffer qry = createPQExpBuffer();
3603 :
3604 308 : pg_log_info("saving encoding = %s", encname);
3605 :
3606 308 : appendPQExpBufferStr(qry, "SET client_encoding = ");
3607 308 : appendStringLiteralAH(qry, encname, AH);
3608 308 : appendPQExpBufferStr(qry, ";\n");
3609 :
3610 308 : ArchiveEntry(AH, nilCatalogId, createDumpId(),
3611 308 : ARCHIVE_OPTS(.tag = "ENCODING",
3612 : .description = "ENCODING",
3613 : .section = SECTION_PRE_DATA,
3614 : .createStmt = qry->data));
3615 :
3616 308 : destroyPQExpBuffer(qry);
3617 308 : }
3618 :
3619 :
3620 : /*
3621 : * dumpStdStrings: put the correct escape string behavior into the archive
3622 : */
3623 : static void
3624 308 : dumpStdStrings(Archive *AH)
3625 : {
3626 308 : const char *stdstrings = AH->std_strings ? "on" : "off";
3627 308 : PQExpBuffer qry = createPQExpBuffer();
3628 :
3629 308 : pg_log_info("saving \"standard_conforming_strings = %s\"",
3630 : stdstrings);
3631 :
3632 308 : appendPQExpBuffer(qry, "SET standard_conforming_strings = '%s';\n",
3633 : stdstrings);
3634 :
3635 308 : ArchiveEntry(AH, nilCatalogId, createDumpId(),
3636 308 : ARCHIVE_OPTS(.tag = "STDSTRINGS",
3637 : .description = "STDSTRINGS",
3638 : .section = SECTION_PRE_DATA,
3639 : .createStmt = qry->data));
3640 :
3641 308 : destroyPQExpBuffer(qry);
3642 308 : }
3643 :
3644 : /*
3645 : * dumpSearchPath: record the active search_path in the archive
3646 : */
3647 : static void
3648 308 : dumpSearchPath(Archive *AH)
3649 : {
3650 308 : PQExpBuffer qry = createPQExpBuffer();
3651 308 : PQExpBuffer path = createPQExpBuffer();
3652 : PGresult *res;
3653 308 : char **schemanames = NULL;
3654 308 : int nschemanames = 0;
3655 : int i;
3656 :
3657 : /*
3658 : * We use the result of current_schemas(), not the search_path GUC,
3659 : * because that might contain wildcards such as "$user", which won't
3660 : * necessarily have the same value during restore. Also, this way avoids
3661 : * listing schemas that may appear in search_path but not actually exist,
3662 : * which seems like a prudent exclusion.
3663 : */
3664 308 : res = ExecuteSqlQueryForSingleRow(AH,
3665 : "SELECT pg_catalog.current_schemas(false)");
3666 :
3667 308 : if (!parsePGArray(PQgetvalue(res, 0, 0), &schemanames, &nschemanames))
3668 0 : pg_fatal("could not parse result of current_schemas()");
3669 :
3670 : /*
3671 : * We use set_config(), not a simple "SET search_path" command, because
3672 : * the latter has less-clean behavior if the search path is empty. While
3673 : * that's likely to get fixed at some point, it seems like a good idea to
3674 : * be as backwards-compatible as possible in what we put into archives.
3675 : */
3676 308 : for (i = 0; i < nschemanames; i++)
3677 : {
3678 0 : if (i > 0)
3679 0 : appendPQExpBufferStr(path, ", ");
3680 0 : appendPQExpBufferStr(path, fmtId(schemanames[i]));
3681 : }
3682 :
3683 308 : appendPQExpBufferStr(qry, "SELECT pg_catalog.set_config('search_path', ");
3684 308 : appendStringLiteralAH(qry, path->data, AH);
3685 308 : appendPQExpBufferStr(qry, ", false);\n");
3686 :
3687 308 : pg_log_info("saving \"search_path = %s\"", path->data);
3688 :
3689 308 : ArchiveEntry(AH, nilCatalogId, createDumpId(),
3690 308 : ARCHIVE_OPTS(.tag = "SEARCHPATH",
3691 : .description = "SEARCHPATH",
3692 : .section = SECTION_PRE_DATA,
3693 : .createStmt = qry->data));
3694 :
3695 : /* Also save it in AH->searchpath, in case we're doing plain text dump */
3696 308 : AH->searchpath = pg_strdup(qry->data);
3697 :
3698 308 : free(schemanames);
3699 308 : PQclear(res);
3700 308 : destroyPQExpBuffer(qry);
3701 308 : destroyPQExpBuffer(path);
3702 308 : }
3703 :
3704 :
3705 : /*
3706 : * getLOs:
3707 : * Collect schema-level data about large objects
3708 : */
3709 : static void
3710 256 : getLOs(Archive *fout)
3711 : {
3712 256 : DumpOptions *dopt = fout->dopt;
3713 256 : PQExpBuffer loQry = createPQExpBuffer();
3714 : PGresult *res;
3715 : int ntups;
3716 : int i;
3717 : int n;
3718 : int i_oid;
3719 : int i_lomowner;
3720 : int i_lomacl;
3721 : int i_acldefault;
3722 :
3723 256 : pg_log_info("reading large objects");
3724 :
3725 : /*
3726 : * Fetch LO OIDs and owner/ACL data. Order the data so that all the blobs
3727 : * with the same owner/ACL appear together.
3728 : */
3729 256 : appendPQExpBufferStr(loQry,
3730 : "SELECT oid, lomowner, lomacl, "
3731 : "acldefault('L', lomowner) AS acldefault "
3732 : "FROM pg_largeobject_metadata "
3733 : "ORDER BY lomowner, lomacl::pg_catalog.text, oid");
3734 :
3735 256 : res = ExecuteSqlQuery(fout, loQry->data, PGRES_TUPLES_OK);
3736 :
3737 256 : i_oid = PQfnumber(res, "oid");
3738 256 : i_lomowner = PQfnumber(res, "lomowner");
3739 256 : i_lomacl = PQfnumber(res, "lomacl");
3740 256 : i_acldefault = PQfnumber(res, "acldefault");
3741 :
3742 256 : ntups = PQntuples(res);
3743 :
3744 : /*
3745 : * Group the blobs into suitably-sized groups that have the same owner and
3746 : * ACL setting, and build a metadata and a data DumpableObject for each
3747 : * group. (If we supported initprivs for blobs, we'd have to insist that
3748 : * groups also share initprivs settings, since the DumpableObject only has
3749 : * room for one.) i is the index of the first tuple in the current group,
3750 : * and n is the number of tuples we include in the group.
3751 : */
3752 402 : for (i = 0; i < ntups; i += n)
3753 : {
3754 146 : Oid thisoid = atooid(PQgetvalue(res, i, i_oid));
3755 146 : char *thisowner = PQgetvalue(res, i, i_lomowner);
3756 146 : char *thisacl = PQgetvalue(res, i, i_lomacl);
3757 : LoInfo *loinfo;
3758 : DumpableObject *lodata;
3759 : char namebuf[64];
3760 :
3761 : /* Scan to find first tuple not to be included in group */
3762 146 : n = 1;
3763 166 : while (n < MAX_BLOBS_PER_ARCHIVE_ENTRY && i + n < ntups)
3764 : {
3765 88 : if (strcmp(thisowner, PQgetvalue(res, i + n, i_lomowner)) != 0 ||
3766 88 : strcmp(thisacl, PQgetvalue(res, i + n, i_lomacl)) != 0)
3767 : break;
3768 20 : n++;
3769 : }
3770 :
3771 : /* Build the metadata DumpableObject */
3772 146 : loinfo = (LoInfo *) pg_malloc(offsetof(LoInfo, looids) + n * sizeof(Oid));
3773 :
3774 146 : loinfo->dobj.objType = DO_LARGE_OBJECT;
3775 146 : loinfo->dobj.catId.tableoid = LargeObjectRelationId;
3776 146 : loinfo->dobj.catId.oid = thisoid;
3777 146 : AssignDumpId(&loinfo->dobj);
3778 :
3779 146 : if (n > 1)
3780 10 : snprintf(namebuf, sizeof(namebuf), "%u..%u", thisoid,
3781 10 : atooid(PQgetvalue(res, i + n - 1, i_oid)));
3782 : else
3783 136 : snprintf(namebuf, sizeof(namebuf), "%u", thisoid);
3784 146 : loinfo->dobj.name = pg_strdup(namebuf);
3785 146 : loinfo->dacl.acl = pg_strdup(thisacl);
3786 146 : loinfo->dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault));
3787 146 : loinfo->dacl.privtype = 0;
3788 146 : loinfo->dacl.initprivs = NULL;
3789 146 : loinfo->rolname = getRoleName(thisowner);
3790 146 : loinfo->numlos = n;
3791 146 : loinfo->looids[0] = thisoid;
3792 : /* Collect OIDs of the remaining blobs in this group */
3793 166 : for (int k = 1; k < n; k++)
3794 : {
3795 : CatalogId extraID;
3796 :
3797 20 : loinfo->looids[k] = atooid(PQgetvalue(res, i + k, i_oid));
3798 :
3799 : /* Make sure we can look up loinfo by any of the blobs' OIDs */
3800 20 : extraID.tableoid = LargeObjectRelationId;
3801 20 : extraID.oid = loinfo->looids[k];
3802 20 : recordAdditionalCatalogID(extraID, &loinfo->dobj);
3803 : }
3804 :
3805 : /* LOs have data */
3806 146 : loinfo->dobj.components |= DUMP_COMPONENT_DATA;
3807 :
3808 : /* Mark whether LO group has a non-empty ACL */
3809 146 : if (!PQgetisnull(res, i, i_lomacl))
3810 68 : loinfo->dobj.components |= DUMP_COMPONENT_ACL;
3811 :
3812 : /*
3813 : * In binary-upgrade mode for LOs, we do *not* dump out the LO data,
3814 : * as it will be copied by pg_upgrade, which simply copies the
3815 : * pg_largeobject table. We *do* however dump out anything but the
3816 : * data, as pg_upgrade copies just pg_largeobject, but not
3817 : * pg_largeobject_metadata, after the dump is restored.
3818 : */
3819 146 : if (dopt->binary_upgrade)
3820 6 : loinfo->dobj.dump &= ~DUMP_COMPONENT_DATA;
3821 :
3822 : /*
3823 : * Create a "BLOBS" data item for the group, too. This is just a
3824 : * placeholder for sorting; it carries no data now.
3825 : */
3826 146 : lodata = (DumpableObject *) pg_malloc(sizeof(DumpableObject));
3827 146 : lodata->objType = DO_LARGE_OBJECT_DATA;
3828 146 : lodata->catId = nilCatalogId;
3829 146 : AssignDumpId(lodata);
3830 146 : lodata->name = pg_strdup(namebuf);
3831 146 : lodata->components |= DUMP_COMPONENT_DATA;
3832 : /* Set up explicit dependency from data to metadata */
3833 146 : lodata->dependencies = (DumpId *) pg_malloc(sizeof(DumpId));
3834 146 : lodata->dependencies[0] = loinfo->dobj.dumpId;
3835 146 : lodata->nDeps = lodata->allocDeps = 1;
3836 : }
3837 :
3838 256 : PQclear(res);
3839 256 : destroyPQExpBuffer(loQry);
3840 256 : }
3841 :
3842 : /*
3843 : * dumpLO
3844 : *
3845 : * dump the definition (metadata) of the given large object group
3846 : */
3847 : static void
3848 146 : dumpLO(Archive *fout, const LoInfo *loinfo)
3849 : {
3850 146 : PQExpBuffer cquery = createPQExpBuffer();
3851 :
3852 : /*
3853 : * The "definition" is just a newline-separated list of OIDs. We need to
3854 : * put something into the dropStmt too, but it can just be a comment.
3855 : */
3856 312 : for (int i = 0; i < loinfo->numlos; i++)
3857 166 : appendPQExpBuffer(cquery, "%u\n", loinfo->looids[i]);
3858 :
3859 146 : if (loinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
3860 146 : ArchiveEntry(fout, loinfo->dobj.catId, loinfo->dobj.dumpId,
3861 146 : ARCHIVE_OPTS(.tag = loinfo->dobj.name,
3862 : .owner = loinfo->rolname,
3863 : .description = "BLOB METADATA",
3864 : .section = SECTION_DATA,
3865 : .createStmt = cquery->data,
3866 : .dropStmt = "-- dummy"));
3867 :
3868 : /*
3869 : * Dump per-blob comments and seclabels if any. We assume these are rare
3870 : * enough that it's okay to generate retail TOC entries for them.
3871 : */
3872 146 : if (loinfo->dobj.dump & (DUMP_COMPONENT_COMMENT |
3873 : DUMP_COMPONENT_SECLABEL))
3874 : {
3875 176 : for (int i = 0; i < loinfo->numlos; i++)
3876 : {
3877 : CatalogId catId;
3878 : char namebuf[32];
3879 :
3880 : /* Build identifying info for this blob */
3881 98 : catId.tableoid = loinfo->dobj.catId.tableoid;
3882 98 : catId.oid = loinfo->looids[i];
3883 98 : snprintf(namebuf, sizeof(namebuf), "%u", loinfo->looids[i]);
3884 :
3885 98 : if (loinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
3886 98 : dumpComment(fout, "LARGE OBJECT", namebuf,
3887 : NULL, loinfo->rolname,
3888 : catId, 0, loinfo->dobj.dumpId);
3889 :
3890 98 : if (loinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
3891 0 : dumpSecLabel(fout, "LARGE OBJECT", namebuf,
3892 : NULL, loinfo->rolname,
3893 : catId, 0, loinfo->dobj.dumpId);
3894 : }
3895 : }
3896 :
3897 : /*
3898 : * Dump the ACLs if any (remember that all blobs in the group will have
3899 : * the same ACL). If there's just one blob, dump a simple ACL entry; if
3900 : * there's more, make a "LARGE OBJECTS" entry that really contains only
3901 : * the ACL for the first blob. _printTocEntry() will be cued by the tag
3902 : * string to emit a mutated version for each blob.
3903 : */
3904 146 : if (loinfo->dobj.dump & DUMP_COMPONENT_ACL)
3905 : {
3906 : char namebuf[32];
3907 :
3908 : /* Build identifying info for the first blob */
3909 68 : snprintf(namebuf, sizeof(namebuf), "%u", loinfo->looids[0]);
3910 :
3911 68 : if (loinfo->numlos > 1)
3912 : {
3913 : char tagbuf[64];
3914 :
3915 0 : snprintf(tagbuf, sizeof(tagbuf), "LARGE OBJECTS %u..%u",
3916 0 : loinfo->looids[0], loinfo->looids[loinfo->numlos - 1]);
3917 :
3918 0 : dumpACL(fout, loinfo->dobj.dumpId, InvalidDumpId,
3919 : "LARGE OBJECT", namebuf, NULL, NULL,
3920 : tagbuf, loinfo->rolname, &loinfo->dacl);
3921 : }
3922 : else
3923 : {
3924 68 : dumpACL(fout, loinfo->dobj.dumpId, InvalidDumpId,
3925 : "LARGE OBJECT", namebuf, NULL, NULL,
3926 : NULL, loinfo->rolname, &loinfo->dacl);
3927 : }
3928 : }
3929 :
3930 146 : destroyPQExpBuffer(cquery);
3931 146 : }
3932 :
3933 : /*
3934 : * dumpLOs:
3935 : * dump the data contents of the large objects in the given group
3936 : */
3937 : static int
3938 132 : dumpLOs(Archive *fout, const void *arg)
3939 : {
3940 132 : const LoInfo *loinfo = (const LoInfo *) arg;
3941 132 : PGconn *conn = GetConnection(fout);
3942 : char buf[LOBBUFSIZE];
3943 :
3944 132 : pg_log_info("saving large objects \"%s\"", loinfo->dobj.name);
3945 :
3946 280 : for (int i = 0; i < loinfo->numlos; i++)
3947 : {
3948 148 : Oid loOid = loinfo->looids[i];
3949 : int loFd;
3950 : int cnt;
3951 :
3952 : /* Open the LO */
3953 148 : loFd = lo_open(conn, loOid, INV_READ);
3954 148 : if (loFd == -1)
3955 0 : pg_fatal("could not open large object %u: %s",
3956 : loOid, PQerrorMessage(conn));
3957 :
3958 148 : StartLO(fout, loOid);
3959 :
3960 : /* Now read it in chunks, sending data to archive */
3961 : do
3962 : {
3963 226 : cnt = lo_read(conn, loFd, buf, LOBBUFSIZE);
3964 226 : if (cnt < 0)
3965 0 : pg_fatal("error reading large object %u: %s",
3966 : loOid, PQerrorMessage(conn));
3967 :
3968 226 : WriteData(fout, buf, cnt);
3969 226 : } while (cnt > 0);
3970 :
3971 148 : lo_close(conn, loFd);
3972 :
3973 148 : EndLO(fout, loOid);
3974 : }
3975 :
3976 132 : return 1;
3977 : }
3978 :
3979 : /*
3980 : * getPolicies
3981 : * get information about all RLS policies on dumpable tables.
3982 : */
3983 : void
3984 308 : getPolicies(Archive *fout, TableInfo tblinfo[], int numTables)
3985 : {
3986 : PQExpBuffer query;
3987 : PQExpBuffer tbloids;
3988 : PGresult *res;
3989 : PolicyInfo *polinfo;
3990 : int i_oid;
3991 : int i_tableoid;
3992 : int i_polrelid;
3993 : int i_polname;
3994 : int i_polcmd;
3995 : int i_polpermissive;
3996 : int i_polroles;
3997 : int i_polqual;
3998 : int i_polwithcheck;
3999 : int i,
4000 : j,
4001 : ntups;
4002 :
4003 : /* No policies before 9.5 */
4004 308 : if (fout->remoteVersion < 90500)
4005 0 : return;
4006 :
4007 308 : query = createPQExpBuffer();
4008 308 : tbloids = createPQExpBuffer();
4009 :
4010 : /*
4011 : * Identify tables of interest, and check which ones have RLS enabled.
4012 : */
4013 308 : appendPQExpBufferChar(tbloids, '{');
4014 80978 : for (i = 0; i < numTables; i++)
4015 : {
4016 80670 : TableInfo *tbinfo = &tblinfo[i];
4017 :
4018 : /* Ignore row security on tables not to be dumped */
4019 80670 : if (!(tbinfo->dobj.dump & DUMP_COMPONENT_POLICY))
4020 68678 : continue;
4021 :
4022 : /* It can't have RLS or policies if it's not a table */
4023 11992 : if (tbinfo->relkind != RELKIND_RELATION &&
4024 3588 : tbinfo->relkind != RELKIND_PARTITIONED_TABLE)
4025 2544 : continue;
4026 :
4027 : /* Add it to the list of table OIDs to be probed below */
4028 9448 : if (tbloids->len > 1) /* do we have more than the '{'? */
4029 9252 : appendPQExpBufferChar(tbloids, ',');
4030 9448 : appendPQExpBuffer(tbloids, "%u", tbinfo->dobj.catId.oid);
4031 :
4032 : /* Is RLS enabled? (That's separate from whether it has policies) */
4033 9448 : if (tbinfo->rowsec)
4034 : {
4035 104 : tbinfo->dobj.components |= DUMP_COMPONENT_POLICY;
4036 :
4037 : /*
4038 : * We represent RLS being enabled on a table by creating a
4039 : * PolicyInfo object with null polname.
4040 : *
4041 : * Note: use tableoid 0 so that this object won't be mistaken for
4042 : * something that pg_depend entries apply to.
4043 : */
4044 104 : polinfo = pg_malloc(sizeof(PolicyInfo));
4045 104 : polinfo->dobj.objType = DO_POLICY;
4046 104 : polinfo->dobj.catId.tableoid = 0;
4047 104 : polinfo->dobj.catId.oid = tbinfo->dobj.catId.oid;
4048 104 : AssignDumpId(&polinfo->dobj);
4049 104 : polinfo->dobj.namespace = tbinfo->dobj.namespace;
4050 104 : polinfo->dobj.name = pg_strdup(tbinfo->dobj.name);
4051 104 : polinfo->poltable = tbinfo;
4052 104 : polinfo->polname = NULL;
4053 104 : polinfo->polcmd = '\0';
4054 104 : polinfo->polpermissive = 0;
4055 104 : polinfo->polroles = NULL;
4056 104 : polinfo->polqual = NULL;
4057 104 : polinfo->polwithcheck = NULL;
4058 : }
4059 : }
4060 308 : appendPQExpBufferChar(tbloids, '}');
4061 :
4062 : /*
4063 : * Now, read all RLS policies belonging to the tables of interest, and
4064 : * create PolicyInfo objects for them. (Note that we must filter the
4065 : * results server-side not locally, because we dare not apply pg_get_expr
4066 : * to tables we don't have lock on.)
4067 : */
4068 308 : pg_log_info("reading row-level security policies");
4069 :
4070 308 : printfPQExpBuffer(query,
4071 : "SELECT pol.oid, pol.tableoid, pol.polrelid, pol.polname, pol.polcmd, ");
4072 308 : if (fout->remoteVersion >= 100000)
4073 308 : appendPQExpBufferStr(query, "pol.polpermissive, ");
4074 : else
4075 0 : appendPQExpBufferStr(query, "'t' as polpermissive, ");
4076 308 : appendPQExpBuffer(query,
4077 : "CASE WHEN pol.polroles = '{0}' THEN NULL ELSE "
4078 : " pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.quote_ident(rolname) from pg_catalog.pg_roles WHERE oid = ANY(pol.polroles)), ', ') END AS polroles, "
4079 : "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid) AS polqual, "
4080 : "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid) AS polwithcheck "
4081 : "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
4082 : "JOIN pg_catalog.pg_policy pol ON (src.tbloid = pol.polrelid)",
4083 : tbloids->data);
4084 :
4085 308 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4086 :
4087 308 : ntups = PQntuples(res);
4088 308 : if (ntups > 0)
4089 : {
4090 84 : i_oid = PQfnumber(res, "oid");
4091 84 : i_tableoid = PQfnumber(res, "tableoid");
4092 84 : i_polrelid = PQfnumber(res, "polrelid");
4093 84 : i_polname = PQfnumber(res, "polname");
4094 84 : i_polcmd = PQfnumber(res, "polcmd");
4095 84 : i_polpermissive = PQfnumber(res, "polpermissive");
4096 84 : i_polroles = PQfnumber(res, "polroles");
4097 84 : i_polqual = PQfnumber(res, "polqual");
4098 84 : i_polwithcheck = PQfnumber(res, "polwithcheck");
4099 :
4100 84 : polinfo = pg_malloc(ntups * sizeof(PolicyInfo));
4101 :
4102 618 : for (j = 0; j < ntups; j++)
4103 : {
4104 534 : Oid polrelid = atooid(PQgetvalue(res, j, i_polrelid));
4105 534 : TableInfo *tbinfo = findTableByOid(polrelid);
4106 :
4107 534 : tbinfo->dobj.components |= DUMP_COMPONENT_POLICY;
4108 :
4109 534 : polinfo[j].dobj.objType = DO_POLICY;
4110 534 : polinfo[j].dobj.catId.tableoid =
4111 534 : atooid(PQgetvalue(res, j, i_tableoid));
4112 534 : polinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
4113 534 : AssignDumpId(&polinfo[j].dobj);
4114 534 : polinfo[j].dobj.namespace = tbinfo->dobj.namespace;
4115 534 : polinfo[j].poltable = tbinfo;
4116 534 : polinfo[j].polname = pg_strdup(PQgetvalue(res, j, i_polname));
4117 534 : polinfo[j].dobj.name = pg_strdup(polinfo[j].polname);
4118 :
4119 534 : polinfo[j].polcmd = *(PQgetvalue(res, j, i_polcmd));
4120 534 : polinfo[j].polpermissive = *(PQgetvalue(res, j, i_polpermissive)) == 't';
4121 :
4122 534 : if (PQgetisnull(res, j, i_polroles))
4123 238 : polinfo[j].polroles = NULL;
4124 : else
4125 296 : polinfo[j].polroles = pg_strdup(PQgetvalue(res, j, i_polroles));
4126 :
4127 534 : if (PQgetisnull(res, j, i_polqual))
4128 74 : polinfo[j].polqual = NULL;
4129 : else
4130 460 : polinfo[j].polqual = pg_strdup(PQgetvalue(res, j, i_polqual));
4131 :
4132 534 : if (PQgetisnull(res, j, i_polwithcheck))
4133 282 : polinfo[j].polwithcheck = NULL;
4134 : else
4135 252 : polinfo[j].polwithcheck
4136 252 : = pg_strdup(PQgetvalue(res, j, i_polwithcheck));
4137 : }
4138 : }
4139 :
4140 308 : PQclear(res);
4141 :
4142 308 : destroyPQExpBuffer(query);
4143 308 : destroyPQExpBuffer(tbloids);
4144 : }
4145 :
4146 : /*
4147 : * dumpPolicy
4148 : * dump the definition of the given policy
4149 : */
4150 : static void
4151 638 : dumpPolicy(Archive *fout, const PolicyInfo *polinfo)
4152 : {
4153 638 : DumpOptions *dopt = fout->dopt;
4154 638 : TableInfo *tbinfo = polinfo->poltable;
4155 : PQExpBuffer query;
4156 : PQExpBuffer delqry;
4157 : PQExpBuffer polprefix;
4158 : char *qtabname;
4159 : const char *cmd;
4160 : char *tag;
4161 :
4162 : /* Do nothing in data-only dump */
4163 638 : if (dopt->dataOnly)
4164 56 : return;
4165 :
4166 : /*
4167 : * If polname is NULL, then this record is just indicating that ROW LEVEL
4168 : * SECURITY is enabled for the table. Dump as ALTER TABLE <table> ENABLE
4169 : * ROW LEVEL SECURITY.
4170 : */
4171 582 : if (polinfo->polname == NULL)
4172 : {
4173 96 : query = createPQExpBuffer();
4174 :
4175 96 : appendPQExpBuffer(query, "ALTER TABLE %s ENABLE ROW LEVEL SECURITY;",
4176 96 : fmtQualifiedDumpable(tbinfo));
4177 :
4178 : /*
4179 : * We must emit the ROW SECURITY object's dependency on its table
4180 : * explicitly, because it will not match anything in pg_depend (unlike
4181 : * the case for other PolicyInfo objects).
4182 : */
4183 96 : if (polinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
4184 96 : ArchiveEntry(fout, polinfo->dobj.catId, polinfo->dobj.dumpId,
4185 96 : ARCHIVE_OPTS(.tag = polinfo->dobj.name,
4186 : .namespace = polinfo->dobj.namespace->dobj.name,
4187 : .owner = tbinfo->rolname,
4188 : .description = "ROW SECURITY",
4189 : .section = SECTION_POST_DATA,
4190 : .createStmt = query->data,
4191 : .deps = &(tbinfo->dobj.dumpId),
4192 : .nDeps = 1));
4193 :
4194 96 : destroyPQExpBuffer(query);
4195 96 : return;
4196 : }
4197 :
4198 486 : if (polinfo->polcmd == '*')
4199 162 : cmd = "";
4200 324 : else if (polinfo->polcmd == 'r')
4201 86 : cmd = " FOR SELECT";
4202 238 : else if (polinfo->polcmd == 'a')
4203 66 : cmd = " FOR INSERT";
4204 172 : else if (polinfo->polcmd == 'w')
4205 86 : cmd = " FOR UPDATE";
4206 86 : else if (polinfo->polcmd == 'd')
4207 86 : cmd = " FOR DELETE";
4208 : else
4209 0 : pg_fatal("unexpected policy command type: %c",
4210 : polinfo->polcmd);
4211 :
4212 486 : query = createPQExpBuffer();
4213 486 : delqry = createPQExpBuffer();
4214 486 : polprefix = createPQExpBuffer();
4215 :
4216 486 : qtabname = pg_strdup(fmtId(tbinfo->dobj.name));
4217 :
4218 486 : appendPQExpBuffer(query, "CREATE POLICY %s", fmtId(polinfo->polname));
4219 :
4220 486 : appendPQExpBuffer(query, " ON %s%s%s", fmtQualifiedDumpable(tbinfo),
4221 486 : !polinfo->polpermissive ? " AS RESTRICTIVE" : "", cmd);
4222 :
4223 486 : if (polinfo->polroles != NULL)
4224 264 : appendPQExpBuffer(query, " TO %s", polinfo->polroles);
4225 :
4226 486 : if (polinfo->polqual != NULL)
4227 420 : appendPQExpBuffer(query, " USING (%s)", polinfo->polqual);
4228 :
4229 486 : if (polinfo->polwithcheck != NULL)
4230 228 : appendPQExpBuffer(query, " WITH CHECK (%s)", polinfo->polwithcheck);
4231 :
4232 486 : appendPQExpBufferStr(query, ";\n");
4233 :
4234 486 : appendPQExpBuffer(delqry, "DROP POLICY %s", fmtId(polinfo->polname));
4235 486 : appendPQExpBuffer(delqry, " ON %s;\n", fmtQualifiedDumpable(tbinfo));
4236 :
4237 486 : appendPQExpBuffer(polprefix, "POLICY %s ON",
4238 486 : fmtId(polinfo->polname));
4239 :
4240 486 : tag = psprintf("%s %s", tbinfo->dobj.name, polinfo->dobj.name);
4241 :
4242 486 : if (polinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
4243 486 : ArchiveEntry(fout, polinfo->dobj.catId, polinfo->dobj.dumpId,
4244 486 : ARCHIVE_OPTS(.tag = tag,
4245 : .namespace = polinfo->dobj.namespace->dobj.name,
4246 : .owner = tbinfo->rolname,
4247 : .description = "POLICY",
4248 : .section = SECTION_POST_DATA,
4249 : .createStmt = query->data,
4250 : .dropStmt = delqry->data));
4251 :
4252 486 : if (polinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
4253 0 : dumpComment(fout, polprefix->data, qtabname,
4254 0 : tbinfo->dobj.namespace->dobj.name, tbinfo->rolname,
4255 : polinfo->dobj.catId, 0, polinfo->dobj.dumpId);
4256 :
4257 486 : free(tag);
4258 486 : destroyPQExpBuffer(query);
4259 486 : destroyPQExpBuffer(delqry);
4260 486 : destroyPQExpBuffer(polprefix);
4261 486 : free(qtabname);
4262 : }
4263 :
4264 : /*
4265 : * getPublications
4266 : * get information about publications
4267 : */
4268 : void
4269 308 : getPublications(Archive *fout)
4270 : {
4271 308 : DumpOptions *dopt = fout->dopt;
4272 : PQExpBuffer query;
4273 : PGresult *res;
4274 : PublicationInfo *pubinfo;
4275 : int i_tableoid;
4276 : int i_oid;
4277 : int i_pubname;
4278 : int i_pubowner;
4279 : int i_puballtables;
4280 : int i_pubinsert;
4281 : int i_pubupdate;
4282 : int i_pubdelete;
4283 : int i_pubtruncate;
4284 : int i_pubviaroot;
4285 : int i,
4286 : ntups;
4287 :
4288 308 : if (dopt->no_publications || fout->remoteVersion < 100000)
4289 0 : return;
4290 :
4291 308 : query = createPQExpBuffer();
4292 :
4293 : /* Get the publications. */
4294 308 : if (fout->remoteVersion >= 130000)
4295 308 : appendPQExpBufferStr(query,
4296 : "SELECT p.tableoid, p.oid, p.pubname, "
4297 : "p.pubowner, "
4298 : "p.puballtables, p.pubinsert, p.pubupdate, p.pubdelete, p.pubtruncate, p.pubviaroot "
4299 : "FROM pg_publication p");
4300 0 : else if (fout->remoteVersion >= 110000)
4301 0 : appendPQExpBufferStr(query,
4302 : "SELECT p.tableoid, p.oid, p.pubname, "
4303 : "p.pubowner, "
4304 : "p.puballtables, p.pubinsert, p.pubupdate, p.pubdelete, p.pubtruncate, false AS pubviaroot "
4305 : "FROM pg_publication p");
4306 : else
4307 0 : appendPQExpBufferStr(query,
4308 : "SELECT p.tableoid, p.oid, p.pubname, "
4309 : "p.pubowner, "
4310 : "p.puballtables, p.pubinsert, p.pubupdate, p.pubdelete, false AS pubtruncate, false AS pubviaroot "
4311 : "FROM pg_publication p");
4312 :
4313 308 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4314 :
4315 308 : ntups = PQntuples(res);
4316 :
4317 308 : if (ntups == 0)
4318 220 : goto cleanup;
4319 :
4320 88 : i_tableoid = PQfnumber(res, "tableoid");
4321 88 : i_oid = PQfnumber(res, "oid");
4322 88 : i_pubname = PQfnumber(res, "pubname");
4323 88 : i_pubowner = PQfnumber(res, "pubowner");
4324 88 : i_puballtables = PQfnumber(res, "puballtables");
4325 88 : i_pubinsert = PQfnumber(res, "pubinsert");
4326 88 : i_pubupdate = PQfnumber(res, "pubupdate");
4327 88 : i_pubdelete = PQfnumber(res, "pubdelete");
4328 88 : i_pubtruncate = PQfnumber(res, "pubtruncate");
4329 88 : i_pubviaroot = PQfnumber(res, "pubviaroot");
4330 :
4331 88 : pubinfo = pg_malloc(ntups * sizeof(PublicationInfo));
4332 :
4333 434 : for (i = 0; i < ntups; i++)
4334 : {
4335 346 : pubinfo[i].dobj.objType = DO_PUBLICATION;
4336 346 : pubinfo[i].dobj.catId.tableoid =
4337 346 : atooid(PQgetvalue(res, i, i_tableoid));
4338 346 : pubinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4339 346 : AssignDumpId(&pubinfo[i].dobj);
4340 346 : pubinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_pubname));
4341 346 : pubinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_pubowner));
4342 346 : pubinfo[i].puballtables =
4343 346 : (strcmp(PQgetvalue(res, i, i_puballtables), "t") == 0);
4344 346 : pubinfo[i].pubinsert =
4345 346 : (strcmp(PQgetvalue(res, i, i_pubinsert), "t") == 0);
4346 346 : pubinfo[i].pubupdate =
4347 346 : (strcmp(PQgetvalue(res, i, i_pubupdate), "t") == 0);
4348 346 : pubinfo[i].pubdelete =
4349 346 : (strcmp(PQgetvalue(res, i, i_pubdelete), "t") == 0);
4350 346 : pubinfo[i].pubtruncate =
4351 346 : (strcmp(PQgetvalue(res, i, i_pubtruncate), "t") == 0);
4352 346 : pubinfo[i].pubviaroot =
4353 346 : (strcmp(PQgetvalue(res, i, i_pubviaroot), "t") == 0);
4354 :
4355 : /* Decide whether we want to dump it */
4356 346 : selectDumpableObject(&(pubinfo[i].dobj), fout);
4357 : }
4358 :
4359 88 : cleanup:
4360 308 : PQclear(res);
4361 :
4362 308 : destroyPQExpBuffer(query);
4363 : }
4364 :
4365 : /*
4366 : * dumpPublication
4367 : * dump the definition of the given publication
4368 : */
4369 : static void
4370 282 : dumpPublication(Archive *fout, const PublicationInfo *pubinfo)
4371 : {
4372 282 : DumpOptions *dopt = fout->dopt;
4373 : PQExpBuffer delq;
4374 : PQExpBuffer query;
4375 : char *qpubname;
4376 282 : bool first = true;
4377 :
4378 : /* Do nothing in data-only dump */
4379 282 : if (dopt->dataOnly)
4380 24 : return;
4381 :
4382 258 : delq = createPQExpBuffer();
4383 258 : query = createPQExpBuffer();
4384 :
4385 258 : qpubname = pg_strdup(fmtId(pubinfo->dobj.name));
4386 :
4387 258 : appendPQExpBuffer(delq, "DROP PUBLICATION %s;\n",
4388 : qpubname);
4389 :
4390 258 : appendPQExpBuffer(query, "CREATE PUBLICATION %s",
4391 : qpubname);
4392 :
4393 258 : if (pubinfo->puballtables)
4394 66 : appendPQExpBufferStr(query, " FOR ALL TABLES");
4395 :
4396 258 : appendPQExpBufferStr(query, " WITH (publish = '");
4397 258 : if (pubinfo->pubinsert)
4398 : {
4399 194 : appendPQExpBufferStr(query, "insert");
4400 194 : first = false;
4401 : }
4402 :
4403 258 : if (pubinfo->pubupdate)
4404 : {
4405 194 : if (!first)
4406 194 : appendPQExpBufferStr(query, ", ");
4407 :
4408 194 : appendPQExpBufferStr(query, "update");
4409 194 : first = false;
4410 : }
4411 :
4412 258 : if (pubinfo->pubdelete)
4413 : {
4414 194 : if (!first)
4415 194 : appendPQExpBufferStr(query, ", ");
4416 :
4417 194 : appendPQExpBufferStr(query, "delete");
4418 194 : first = false;
4419 : }
4420 :
4421 258 : if (pubinfo->pubtruncate)
4422 : {
4423 194 : if (!first)
4424 194 : appendPQExpBufferStr(query, ", ");
4425 :
4426 194 : appendPQExpBufferStr(query, "truncate");
4427 194 : first = false;
4428 : }
4429 :
4430 258 : appendPQExpBufferChar(query, '\'');
4431 :
4432 258 : if (pubinfo->pubviaroot)
4433 0 : appendPQExpBufferStr(query, ", publish_via_partition_root = true");
4434 :
4435 258 : appendPQExpBufferStr(query, ");\n");
4436 :
4437 258 : if (pubinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
4438 258 : ArchiveEntry(fout, pubinfo->dobj.catId, pubinfo->dobj.dumpId,
4439 258 : ARCHIVE_OPTS(.tag = pubinfo->dobj.name,
4440 : .owner = pubinfo->rolname,
4441 : .description = "PUBLICATION",
4442 : .section = SECTION_POST_DATA,
4443 : .createStmt = query->data,
4444 : .dropStmt = delq->data));
4445 :
4446 258 : if (pubinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
4447 64 : dumpComment(fout, "PUBLICATION", qpubname,
4448 : NULL, pubinfo->rolname,
4449 : pubinfo->dobj.catId, 0, pubinfo->dobj.dumpId);
4450 :
4451 258 : if (pubinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
4452 0 : dumpSecLabel(fout, "PUBLICATION", qpubname,
4453 : NULL, pubinfo->rolname,
4454 : pubinfo->dobj.catId, 0, pubinfo->dobj.dumpId);
4455 :
4456 258 : destroyPQExpBuffer(delq);
4457 258 : destroyPQExpBuffer(query);
4458 258 : free(qpubname);
4459 : }
4460 :
4461 : /*
4462 : * getPublicationNamespaces
4463 : * get information about publication membership for dumpable schemas.
4464 : */
4465 : void
4466 308 : getPublicationNamespaces(Archive *fout)
4467 : {
4468 : PQExpBuffer query;
4469 : PGresult *res;
4470 : PublicationSchemaInfo *pubsinfo;
4471 308 : DumpOptions *dopt = fout->dopt;
4472 : int i_tableoid;
4473 : int i_oid;
4474 : int i_pnpubid;
4475 : int i_pnnspid;
4476 : int i,
4477 : j,
4478 : ntups;
4479 :
4480 308 : if (dopt->no_publications || fout->remoteVersion < 150000)
4481 0 : return;
4482 :
4483 308 : query = createPQExpBuffer();
4484 :
4485 : /* Collect all publication membership info. */
4486 308 : appendPQExpBufferStr(query,
4487 : "SELECT tableoid, oid, pnpubid, pnnspid "
4488 : "FROM pg_catalog.pg_publication_namespace");
4489 308 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4490 :
4491 308 : ntups = PQntuples(res);
4492 :
4493 308 : i_tableoid = PQfnumber(res, "tableoid");
4494 308 : i_oid = PQfnumber(res, "oid");
4495 308 : i_pnpubid = PQfnumber(res, "pnpubid");
4496 308 : i_pnnspid = PQfnumber(res, "pnnspid");
4497 :
4498 : /* this allocation may be more than we need */
4499 308 : pubsinfo = pg_malloc(ntups * sizeof(PublicationSchemaInfo));
4500 308 : j = 0;
4501 :
4502 480 : for (i = 0; i < ntups; i++)
4503 : {
4504 172 : Oid pnpubid = atooid(PQgetvalue(res, i, i_pnpubid));
4505 172 : Oid pnnspid = atooid(PQgetvalue(res, i, i_pnnspid));
4506 : PublicationInfo *pubinfo;
4507 : NamespaceInfo *nspinfo;
4508 :
4509 : /*
4510 : * Ignore any entries for which we aren't interested in either the
4511 : * publication or the rel.
4512 : */
4513 172 : pubinfo = findPublicationByOid(pnpubid);
4514 172 : if (pubinfo == NULL)
4515 0 : continue;
4516 172 : nspinfo = findNamespaceByOid(pnnspid);
4517 172 : if (nspinfo == NULL)
4518 0 : continue;
4519 :
4520 : /*
4521 : * We always dump publication namespaces unless the corresponding
4522 : * namespace is excluded from the dump.
4523 : */
4524 172 : if (nspinfo->dobj.dump == DUMP_COMPONENT_NONE)
4525 30 : continue;
4526 :
4527 : /* OK, make a DumpableObject for this relationship */
4528 142 : pubsinfo[j].dobj.objType = DO_PUBLICATION_TABLE_IN_SCHEMA;
4529 142 : pubsinfo[j].dobj.catId.tableoid =
4530 142 : atooid(PQgetvalue(res, i, i_tableoid));
4531 142 : pubsinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4532 142 : AssignDumpId(&pubsinfo[j].dobj);
4533 142 : pubsinfo[j].dobj.namespace = nspinfo->dobj.namespace;
4534 142 : pubsinfo[j].dobj.name = nspinfo->dobj.name;
4535 142 : pubsinfo[j].publication = pubinfo;
4536 142 : pubsinfo[j].pubschema = nspinfo;
4537 :
4538 : /* Decide whether we want to dump it */
4539 142 : selectDumpablePublicationObject(&(pubsinfo[j].dobj), fout);
4540 :
4541 142 : j++;
4542 : }
4543 :
4544 308 : PQclear(res);
4545 308 : destroyPQExpBuffer(query);
4546 : }
4547 :
4548 : /*
4549 : * getPublicationTables
4550 : * get information about publication membership for dumpable tables.
4551 : */
4552 : void
4553 308 : getPublicationTables(Archive *fout, TableInfo tblinfo[], int numTables)
4554 : {
4555 : PQExpBuffer query;
4556 : PGresult *res;
4557 : PublicationRelInfo *pubrinfo;
4558 308 : DumpOptions *dopt = fout->dopt;
4559 : int i_tableoid;
4560 : int i_oid;
4561 : int i_prpubid;
4562 : int i_prrelid;
4563 : int i_prrelqual;
4564 : int i_prattrs;
4565 : int i,
4566 : j,
4567 : ntups;
4568 :
4569 308 : if (dopt->no_publications || fout->remoteVersion < 100000)
4570 0 : return;
4571 :
4572 308 : query = createPQExpBuffer();
4573 :
4574 : /* Collect all publication membership info. */
4575 308 : if (fout->remoteVersion >= 150000)
4576 308 : appendPQExpBufferStr(query,
4577 : "SELECT tableoid, oid, prpubid, prrelid, "
4578 : "pg_catalog.pg_get_expr(prqual, prrelid) AS prrelqual, "
4579 : "(CASE\n"
4580 : " WHEN pr.prattrs IS NOT NULL THEN\n"
4581 : " (SELECT array_agg(attname)\n"
4582 : " FROM\n"
4583 : " pg_catalog.generate_series(0, pg_catalog.array_upper(pr.prattrs::pg_catalog.int2[], 1)) s,\n"
4584 : " pg_catalog.pg_attribute\n"
4585 : " WHERE attrelid = pr.prrelid AND attnum = prattrs[s])\n"
4586 : " ELSE NULL END) prattrs "
4587 : "FROM pg_catalog.pg_publication_rel pr");
4588 : else
4589 0 : appendPQExpBufferStr(query,
4590 : "SELECT tableoid, oid, prpubid, prrelid, "
4591 : "NULL AS prrelqual, NULL AS prattrs "
4592 : "FROM pg_catalog.pg_publication_rel");
4593 308 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4594 :
4595 308 : ntups = PQntuples(res);
4596 :
4597 308 : i_tableoid = PQfnumber(res, "tableoid");
4598 308 : i_oid = PQfnumber(res, "oid");
4599 308 : i_prpubid = PQfnumber(res, "prpubid");
4600 308 : i_prrelid = PQfnumber(res, "prrelid");
4601 308 : i_prrelqual = PQfnumber(res, "prrelqual");
4602 308 : i_prattrs = PQfnumber(res, "prattrs");
4603 :
4604 : /* this allocation may be more than we need */
4605 308 : pubrinfo = pg_malloc(ntups * sizeof(PublicationRelInfo));
4606 308 : j = 0;
4607 :
4608 910 : for (i = 0; i < ntups; i++)
4609 : {
4610 602 : Oid prpubid = atooid(PQgetvalue(res, i, i_prpubid));
4611 602 : Oid prrelid = atooid(PQgetvalue(res, i, i_prrelid));
4612 : PublicationInfo *pubinfo;
4613 : TableInfo *tbinfo;
4614 :
4615 : /*
4616 : * Ignore any entries for which we aren't interested in either the
4617 : * publication or the rel.
4618 : */
4619 602 : pubinfo = findPublicationByOid(prpubid);
4620 602 : if (pubinfo == NULL)
4621 0 : continue;
4622 602 : tbinfo = findTableByOid(prrelid);
4623 602 : if (tbinfo == NULL)
4624 0 : continue;
4625 :
4626 : /*
4627 : * Ignore publication membership of tables whose definitions are not
4628 : * to be dumped.
4629 : */
4630 602 : if (!(tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
4631 92 : continue;
4632 :
4633 : /* OK, make a DumpableObject for this relationship */
4634 510 : pubrinfo[j].dobj.objType = DO_PUBLICATION_REL;
4635 510 : pubrinfo[j].dobj.catId.tableoid =
4636 510 : atooid(PQgetvalue(res, i, i_tableoid));
4637 510 : pubrinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4638 510 : AssignDumpId(&pubrinfo[j].dobj);
4639 510 : pubrinfo[j].dobj.namespace = tbinfo->dobj.namespace;
4640 510 : pubrinfo[j].dobj.name = tbinfo->dobj.name;
4641 510 : pubrinfo[j].publication = pubinfo;
4642 510 : pubrinfo[j].pubtable = tbinfo;
4643 510 : if (PQgetisnull(res, i, i_prrelqual))
4644 292 : pubrinfo[j].pubrelqual = NULL;
4645 : else
4646 218 : pubrinfo[j].pubrelqual = pg_strdup(PQgetvalue(res, i, i_prrelqual));
4647 :
4648 510 : if (!PQgetisnull(res, i, i_prattrs))
4649 : {
4650 : char **attnames;
4651 : int nattnames;
4652 : PQExpBuffer attribs;
4653 :
4654 144 : if (!parsePGArray(PQgetvalue(res, i, i_prattrs),
4655 : &attnames, &nattnames))
4656 0 : pg_fatal("could not parse %s array", "prattrs");
4657 144 : attribs = createPQExpBuffer();
4658 432 : for (int k = 0; k < nattnames; k++)
4659 : {
4660 288 : if (k > 0)
4661 144 : appendPQExpBufferStr(attribs, ", ");
4662 :
4663 288 : appendPQExpBufferStr(attribs, fmtId(attnames[k]));
4664 : }
4665 144 : pubrinfo[j].pubrattrs = attribs->data;
4666 : }
4667 : else
4668 366 : pubrinfo[j].pubrattrs = NULL;
4669 :
4670 : /* Decide whether we want to dump it */
4671 510 : selectDumpablePublicationObject(&(pubrinfo[j].dobj), fout);
4672 :
4673 510 : j++;
4674 : }
4675 :
4676 308 : PQclear(res);
4677 308 : destroyPQExpBuffer(query);
4678 : }
4679 :
4680 : /*
4681 : * dumpPublicationNamespace
4682 : * dump the definition of the given publication schema mapping.
4683 : */
4684 : static void
4685 138 : dumpPublicationNamespace(Archive *fout, const PublicationSchemaInfo *pubsinfo)
4686 : {
4687 138 : DumpOptions *dopt = fout->dopt;
4688 138 : NamespaceInfo *schemainfo = pubsinfo->pubschema;
4689 138 : PublicationInfo *pubinfo = pubsinfo->publication;
4690 : PQExpBuffer query;
4691 : char *tag;
4692 :
4693 : /* Do nothing in data-only dump */
4694 138 : if (dopt->dataOnly)
4695 12 : return;
4696 :
4697 126 : tag = psprintf("%s %s", pubinfo->dobj.name, schemainfo->dobj.name);
4698 :
4699 126 : query = createPQExpBuffer();
4700 :
4701 126 : appendPQExpBuffer(query, "ALTER PUBLICATION %s ", fmtId(pubinfo->dobj.name));
4702 126 : appendPQExpBuffer(query, "ADD TABLES IN SCHEMA %s;\n", fmtId(schemainfo->dobj.name));
4703 :
4704 : /*
4705 : * There is no point in creating drop query as the drop is done by schema
4706 : * drop.
4707 : */
4708 126 : if (pubsinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
4709 126 : ArchiveEntry(fout, pubsinfo->dobj.catId, pubsinfo->dobj.dumpId,
4710 126 : ARCHIVE_OPTS(.tag = tag,
4711 : .namespace = schemainfo->dobj.name,
4712 : .owner = pubinfo->rolname,
4713 : .description = "PUBLICATION TABLES IN SCHEMA",
4714 : .section = SECTION_POST_DATA,
4715 : .createStmt = query->data));
4716 :
4717 : /* These objects can't currently have comments or seclabels */
4718 :
4719 126 : free(tag);
4720 126 : destroyPQExpBuffer(query);
4721 : }
4722 :
4723 : /*
4724 : * dumpPublicationTable
4725 : * dump the definition of the given publication table mapping
4726 : */
4727 : static void
4728 470 : dumpPublicationTable(Archive *fout, const PublicationRelInfo *pubrinfo)
4729 : {
4730 470 : DumpOptions *dopt = fout->dopt;
4731 470 : PublicationInfo *pubinfo = pubrinfo->publication;
4732 470 : TableInfo *tbinfo = pubrinfo->pubtable;
4733 : PQExpBuffer query;
4734 : char *tag;
4735 :
4736 : /* Do nothing in data-only dump */
4737 470 : if (dopt->dataOnly)
4738 42 : return;
4739 :
4740 428 : tag = psprintf("%s %s", pubinfo->dobj.name, tbinfo->dobj.name);
4741 :
4742 428 : query = createPQExpBuffer();
4743 :
4744 428 : appendPQExpBuffer(query, "ALTER PUBLICATION %s ADD TABLE ONLY",
4745 428 : fmtId(pubinfo->dobj.name));
4746 428 : appendPQExpBuffer(query, " %s",
4747 428 : fmtQualifiedDumpable(tbinfo));
4748 :
4749 428 : if (pubrinfo->pubrattrs)
4750 124 : appendPQExpBuffer(query, " (%s)", pubrinfo->pubrattrs);
4751 :
4752 428 : if (pubrinfo->pubrelqual)
4753 : {
4754 : /*
4755 : * It's necessary to add parentheses around the expression because
4756 : * pg_get_expr won't supply the parentheses for things like WHERE
4757 : * TRUE.
4758 : */
4759 184 : appendPQExpBuffer(query, " WHERE (%s)", pubrinfo->pubrelqual);
4760 : }
4761 428 : appendPQExpBufferStr(query, ";\n");
4762 :
4763 : /*
4764 : * There is no point in creating a drop query as the drop is done by table
4765 : * drop. (If you think to change this, see also _printTocEntry().)
4766 : * Although this object doesn't really have ownership as such, set the
4767 : * owner field anyway to ensure that the command is run by the correct
4768 : * role at restore time.
4769 : */
4770 428 : if (pubrinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
4771 428 : ArchiveEntry(fout, pubrinfo->dobj.catId, pubrinfo->dobj.dumpId,
4772 428 : ARCHIVE_OPTS(.tag = tag,
4773 : .namespace = tbinfo->dobj.namespace->dobj.name,
4774 : .owner = pubinfo->rolname,
4775 : .description = "PUBLICATION TABLE",
4776 : .section = SECTION_POST_DATA,
4777 : .createStmt = query->data));
4778 :
4779 : /* These objects can't currently have comments or seclabels */
4780 :
4781 428 : free(tag);
4782 428 : destroyPQExpBuffer(query);
4783 : }
4784 :
4785 : /*
4786 : * Is the currently connected user a superuser?
4787 : */
4788 : static bool
4789 308 : is_superuser(Archive *fout)
4790 : {
4791 308 : ArchiveHandle *AH = (ArchiveHandle *) fout;
4792 : const char *val;
4793 :
4794 308 : val = PQparameterStatus(AH->connection, "is_superuser");
4795 :
4796 308 : if (val && strcmp(val, "on") == 0)
4797 302 : return true;
4798 :
4799 6 : return false;
4800 : }
4801 :
4802 : /*
4803 : * Set the given value to restrict_nonsystem_relation_kind value. Since
4804 : * restrict_nonsystem_relation_kind is introduced in minor version releases,
4805 : * the setting query is effective only where available.
4806 : */
4807 : static void
4808 376 : set_restrict_relation_kind(Archive *AH, const char *value)
4809 : {
4810 376 : PQExpBuffer query = createPQExpBuffer();
4811 : PGresult *res;
4812 :
4813 376 : appendPQExpBuffer(query,
4814 : "SELECT set_config(name, '%s', false) "
4815 : "FROM pg_settings "
4816 : "WHERE name = 'restrict_nonsystem_relation_kind'",
4817 : value);
4818 376 : res = ExecuteSqlQuery(AH, query->data, PGRES_TUPLES_OK);
4819 :
4820 376 : PQclear(res);
4821 376 : destroyPQExpBuffer(query);
4822 376 : }
4823 :
4824 : /*
4825 : * getSubscriptions
4826 : * get information about subscriptions
4827 : */
4828 : void
4829 308 : getSubscriptions(Archive *fout)
4830 : {
4831 308 : DumpOptions *dopt = fout->dopt;
4832 : PQExpBuffer query;
4833 : PGresult *res;
4834 : SubscriptionInfo *subinfo;
4835 : int i_tableoid;
4836 : int i_oid;
4837 : int i_subname;
4838 : int i_subowner;
4839 : int i_subbinary;
4840 : int i_substream;
4841 : int i_subtwophasestate;
4842 : int i_subdisableonerr;
4843 : int i_subpasswordrequired;
4844 : int i_subrunasowner;
4845 : int i_subconninfo;
4846 : int i_subslotname;
4847 : int i_subsynccommit;
4848 : int i_subpublications;
4849 : int i_suborigin;
4850 : int i_suboriginremotelsn;
4851 : int i_subenabled;
4852 : int i_subfailover;
4853 : int i,
4854 : ntups;
4855 :
4856 308 : if (dopt->no_subscriptions || fout->remoteVersion < 100000)
4857 0 : return;
4858 :
4859 308 : if (!is_superuser(fout))
4860 : {
4861 : int n;
4862 :
4863 6 : res = ExecuteSqlQuery(fout,
4864 : "SELECT count(*) FROM pg_subscription "
4865 : "WHERE subdbid = (SELECT oid FROM pg_database"
4866 : " WHERE datname = current_database())",
4867 : PGRES_TUPLES_OK);
4868 6 : n = atoi(PQgetvalue(res, 0, 0));
4869 6 : if (n > 0)
4870 4 : pg_log_warning("subscriptions not dumped because current user is not a superuser");
4871 6 : PQclear(res);
4872 6 : return;
4873 : }
4874 :
4875 302 : query = createPQExpBuffer();
4876 :
4877 : /* Get the subscriptions in current database. */
4878 302 : appendPQExpBufferStr(query,
4879 : "SELECT s.tableoid, s.oid, s.subname,\n"
4880 : " s.subowner,\n"
4881 : " s.subconninfo, s.subslotname, s.subsynccommit,\n"
4882 : " s.subpublications,\n");
4883 :
4884 302 : if (fout->remoteVersion >= 140000)
4885 302 : appendPQExpBufferStr(query, " s.subbinary,\n");
4886 : else
4887 0 : appendPQExpBufferStr(query, " false AS subbinary,\n");
4888 :
4889 302 : if (fout->remoteVersion >= 140000)
4890 302 : appendPQExpBufferStr(query, " s.substream,\n");
4891 : else
4892 0 : appendPQExpBufferStr(query, " 'f' AS substream,\n");
4893 :
4894 302 : if (fout->remoteVersion >= 150000)
4895 302 : appendPQExpBufferStr(query,
4896 : " s.subtwophasestate,\n"
4897 : " s.subdisableonerr,\n");
4898 : else
4899 0 : appendPQExpBuffer(query,
4900 : " '%c' AS subtwophasestate,\n"
4901 : " false AS subdisableonerr,\n",
4902 : LOGICALREP_TWOPHASE_STATE_DISABLED);
4903 :
4904 302 : if (fout->remoteVersion >= 160000)
4905 302 : appendPQExpBufferStr(query,
4906 : " s.subpasswordrequired,\n"
4907 : " s.subrunasowner,\n"
4908 : " s.suborigin,\n");
4909 : else
4910 0 : appendPQExpBuffer(query,
4911 : " 't' AS subpasswordrequired,\n"
4912 : " 't' AS subrunasowner,\n"
4913 : " '%s' AS suborigin,\n",
4914 : LOGICALREP_ORIGIN_ANY);
4915 :
4916 302 : if (dopt->binary_upgrade && fout->remoteVersion >= 170000)
4917 28 : appendPQExpBufferStr(query, " o.remote_lsn AS suboriginremotelsn,\n"
4918 : " s.subenabled,\n");
4919 : else
4920 274 : appendPQExpBufferStr(query, " NULL AS suboriginremotelsn,\n"
4921 : " false AS subenabled,\n");
4922 :
4923 302 : if (fout->remoteVersion >= 170000)
4924 302 : appendPQExpBufferStr(query,
4925 : " s.subfailover\n");
4926 : else
4927 0 : appendPQExpBuffer(query,
4928 : " false AS subfailover\n");
4929 :
4930 302 : appendPQExpBufferStr(query,
4931 : "FROM pg_subscription s\n");
4932 :
4933 302 : if (dopt->binary_upgrade && fout->remoteVersion >= 170000)
4934 28 : appendPQExpBufferStr(query,
4935 : "LEFT JOIN pg_catalog.pg_replication_origin_status o \n"
4936 : " ON o.external_id = 'pg_' || s.oid::text \n");
4937 :
4938 302 : appendPQExpBufferStr(query,
4939 : "WHERE s.subdbid = (SELECT oid FROM pg_database\n"
4940 : " WHERE datname = current_database())");
4941 :
4942 302 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4943 :
4944 302 : ntups = PQntuples(res);
4945 :
4946 : /*
4947 : * Get subscription fields. We don't include subskiplsn in the dump as
4948 : * after restoring the dump this value may no longer be relevant.
4949 : */
4950 302 : i_tableoid = PQfnumber(res, "tableoid");
4951 302 : i_oid = PQfnumber(res, "oid");
4952 302 : i_subname = PQfnumber(res, "subname");
4953 302 : i_subowner = PQfnumber(res, "subowner");
4954 302 : i_subbinary = PQfnumber(res, "subbinary");
4955 302 : i_substream = PQfnumber(res, "substream");
4956 302 : i_subtwophasestate = PQfnumber(res, "subtwophasestate");
4957 302 : i_subdisableonerr = PQfnumber(res, "subdisableonerr");
4958 302 : i_subpasswordrequired = PQfnumber(res, "subpasswordrequired");
4959 302 : i_subrunasowner = PQfnumber(res, "subrunasowner");
4960 302 : i_subconninfo = PQfnumber(res, "subconninfo");
4961 302 : i_subslotname = PQfnumber(res, "subslotname");
4962 302 : i_subsynccommit = PQfnumber(res, "subsynccommit");
4963 302 : i_subpublications = PQfnumber(res, "subpublications");
4964 302 : i_suborigin = PQfnumber(res, "suborigin");
4965 302 : i_suboriginremotelsn = PQfnumber(res, "suboriginremotelsn");
4966 302 : i_subenabled = PQfnumber(res, "subenabled");
4967 302 : i_subfailover = PQfnumber(res, "subfailover");
4968 :
4969 302 : subinfo = pg_malloc(ntups * sizeof(SubscriptionInfo));
4970 :
4971 552 : for (i = 0; i < ntups; i++)
4972 : {
4973 250 : subinfo[i].dobj.objType = DO_SUBSCRIPTION;
4974 250 : subinfo[i].dobj.catId.tableoid =
4975 250 : atooid(PQgetvalue(res, i, i_tableoid));
4976 250 : subinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4977 250 : AssignDumpId(&subinfo[i].dobj);
4978 250 : subinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_subname));
4979 250 : subinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_subowner));
4980 :
4981 500 : subinfo[i].subbinary =
4982 250 : pg_strdup(PQgetvalue(res, i, i_subbinary));
4983 500 : subinfo[i].substream =
4984 250 : pg_strdup(PQgetvalue(res, i, i_substream));
4985 500 : subinfo[i].subtwophasestate =
4986 250 : pg_strdup(PQgetvalue(res, i, i_subtwophasestate));
4987 500 : subinfo[i].subdisableonerr =
4988 250 : pg_strdup(PQgetvalue(res, i, i_subdisableonerr));
4989 500 : subinfo[i].subpasswordrequired =
4990 250 : pg_strdup(PQgetvalue(res, i, i_subpasswordrequired));
4991 500 : subinfo[i].subrunasowner =
4992 250 : pg_strdup(PQgetvalue(res, i, i_subrunasowner));
4993 500 : subinfo[i].subconninfo =
4994 250 : pg_strdup(PQgetvalue(res, i, i_subconninfo));
4995 250 : if (PQgetisnull(res, i, i_subslotname))
4996 0 : subinfo[i].subslotname = NULL;
4997 : else
4998 250 : subinfo[i].subslotname =
4999 250 : pg_strdup(PQgetvalue(res, i, i_subslotname));
5000 500 : subinfo[i].subsynccommit =
5001 250 : pg_strdup(PQgetvalue(res, i, i_subsynccommit));
5002 500 : subinfo[i].subpublications =
5003 250 : pg_strdup(PQgetvalue(res, i, i_subpublications));
5004 250 : subinfo[i].suborigin = pg_strdup(PQgetvalue(res, i, i_suborigin));
5005 250 : if (PQgetisnull(res, i, i_suboriginremotelsn))
5006 248 : subinfo[i].suboriginremotelsn = NULL;
5007 : else
5008 2 : subinfo[i].suboriginremotelsn =
5009 2 : pg_strdup(PQgetvalue(res, i, i_suboriginremotelsn));
5010 500 : subinfo[i].subenabled =
5011 250 : pg_strdup(PQgetvalue(res, i, i_subenabled));
5012 500 : subinfo[i].subfailover =
5013 250 : pg_strdup(PQgetvalue(res, i, i_subfailover));
5014 :
5015 : /* Decide whether we want to dump it */
5016 250 : selectDumpableObject(&(subinfo[i].dobj), fout);
5017 : }
5018 302 : PQclear(res);
5019 :
5020 302 : destroyPQExpBuffer(query);
5021 : }
5022 :
5023 : /*
5024 : * getSubscriptionTables
5025 : * Get information about subscription membership for dumpable tables. This
5026 : * will be used only in binary-upgrade mode for PG17 or later versions.
5027 : */
5028 : void
5029 308 : getSubscriptionTables(Archive *fout)
5030 : {
5031 308 : DumpOptions *dopt = fout->dopt;
5032 308 : SubscriptionInfo *subinfo = NULL;
5033 : SubRelInfo *subrinfo;
5034 : PGresult *res;
5035 : int i_srsubid;
5036 : int i_srrelid;
5037 : int i_srsubstate;
5038 : int i_srsublsn;
5039 : int ntups;
5040 308 : Oid last_srsubid = InvalidOid;
5041 :
5042 308 : if (dopt->no_subscriptions || !dopt->binary_upgrade ||
5043 28 : fout->remoteVersion < 170000)
5044 280 : return;
5045 :
5046 28 : res = ExecuteSqlQuery(fout,
5047 : "SELECT srsubid, srrelid, srsubstate, srsublsn "
5048 : "FROM pg_catalog.pg_subscription_rel "
5049 : "ORDER BY srsubid",
5050 : PGRES_TUPLES_OK);
5051 28 : ntups = PQntuples(res);
5052 28 : if (ntups == 0)
5053 26 : goto cleanup;
5054 :
5055 : /* Get pg_subscription_rel attributes */
5056 2 : i_srsubid = PQfnumber(res, "srsubid");
5057 2 : i_srrelid = PQfnumber(res, "srrelid");
5058 2 : i_srsubstate = PQfnumber(res, "srsubstate");
5059 2 : i_srsublsn = PQfnumber(res, "srsublsn");
5060 :
5061 2 : subrinfo = pg_malloc(ntups * sizeof(SubRelInfo));
5062 6 : for (int i = 0; i < ntups; i++)
5063 : {
5064 4 : Oid cur_srsubid = atooid(PQgetvalue(res, i, i_srsubid));
5065 4 : Oid relid = atooid(PQgetvalue(res, i, i_srrelid));
5066 : TableInfo *tblinfo;
5067 :
5068 : /*
5069 : * If we switched to a new subscription, check if the subscription
5070 : * exists.
5071 : */
5072 4 : if (cur_srsubid != last_srsubid)
5073 : {
5074 4 : subinfo = findSubscriptionByOid(cur_srsubid);
5075 4 : if (subinfo == NULL)
5076 0 : pg_fatal("subscription with OID %u does not exist", cur_srsubid);
5077 :
5078 4 : last_srsubid = cur_srsubid;
5079 : }
5080 :
5081 4 : tblinfo = findTableByOid(relid);
5082 4 : if (tblinfo == NULL)
5083 0 : pg_fatal("failed sanity check, table with OID %u not found",
5084 : relid);
5085 :
5086 : /* OK, make a DumpableObject for this relationship */
5087 4 : subrinfo[i].dobj.objType = DO_SUBSCRIPTION_REL;
5088 4 : subrinfo[i].dobj.catId.tableoid = relid;
5089 4 : subrinfo[i].dobj.catId.oid = cur_srsubid;
5090 4 : AssignDumpId(&subrinfo[i].dobj);
5091 4 : subrinfo[i].dobj.name = pg_strdup(subinfo->dobj.name);
5092 4 : subrinfo[i].tblinfo = tblinfo;
5093 4 : subrinfo[i].srsubstate = PQgetvalue(res, i, i_srsubstate)[0];
5094 4 : if (PQgetisnull(res, i, i_srsublsn))
5095 2 : subrinfo[i].srsublsn = NULL;
5096 : else
5097 2 : subrinfo[i].srsublsn = pg_strdup(PQgetvalue(res, i, i_srsublsn));
5098 :
5099 4 : subrinfo[i].subinfo = subinfo;
5100 :
5101 : /* Decide whether we want to dump it */
5102 4 : selectDumpableObject(&(subrinfo[i].dobj), fout);
5103 : }
5104 :
5105 2 : cleanup:
5106 28 : PQclear(res);
5107 : }
5108 :
5109 : /*
5110 : * dumpSubscriptionTable
5111 : * Dump the definition of the given subscription table mapping. This will be
5112 : * used only in binary-upgrade mode for PG17 or later versions.
5113 : */
5114 : static void
5115 4 : dumpSubscriptionTable(Archive *fout, const SubRelInfo *subrinfo)
5116 : {
5117 4 : DumpOptions *dopt = fout->dopt;
5118 4 : SubscriptionInfo *subinfo = subrinfo->subinfo;
5119 : PQExpBuffer query;
5120 : char *tag;
5121 :
5122 : /* Do nothing in data-only dump */
5123 4 : if (dopt->dataOnly)
5124 0 : return;
5125 :
5126 : Assert(fout->dopt->binary_upgrade && fout->remoteVersion >= 170000);
5127 :
5128 4 : tag = psprintf("%s %s", subinfo->dobj.name, subrinfo->dobj.name);
5129 :
5130 4 : query = createPQExpBuffer();
5131 :
5132 4 : if (subinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
5133 : {
5134 : /*
5135 : * binary_upgrade_add_sub_rel_state will add the subscription relation
5136 : * to pg_subscription_rel table. This will be used only in
5137 : * binary-upgrade mode.
5138 : */
5139 4 : appendPQExpBufferStr(query,
5140 : "\n-- For binary upgrade, must preserve the subscriber table.\n");
5141 4 : appendPQExpBufferStr(query,
5142 : "SELECT pg_catalog.binary_upgrade_add_sub_rel_state(");
5143 4 : appendStringLiteralAH(query, subrinfo->dobj.name, fout);
5144 4 : appendPQExpBuffer(query,
5145 : ", %u, '%c'",
5146 4 : subrinfo->tblinfo->dobj.catId.oid,
5147 4 : subrinfo->srsubstate);
5148 :
5149 4 : if (subrinfo->srsublsn && subrinfo->srsublsn[0] != '\0')
5150 2 : appendPQExpBuffer(query, ", '%s'", subrinfo->srsublsn);
5151 : else
5152 2 : appendPQExpBuffer(query, ", NULL");
5153 :
5154 4 : appendPQExpBufferStr(query, ");\n");
5155 : }
5156 :
5157 : /*
5158 : * There is no point in creating a drop query as the drop is done by table
5159 : * drop. (If you think to change this, see also _printTocEntry().)
5160 : * Although this object doesn't really have ownership as such, set the
5161 : * owner field anyway to ensure that the command is run by the correct
5162 : * role at restore time.
5163 : */
5164 4 : if (subrinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
5165 4 : ArchiveEntry(fout, subrinfo->dobj.catId, subrinfo->dobj.dumpId,
5166 4 : ARCHIVE_OPTS(.tag = tag,
5167 : .namespace = subrinfo->tblinfo->dobj.namespace->dobj.name,
5168 : .owner = subinfo->rolname,
5169 : .description = "SUBSCRIPTION TABLE",
5170 : .section = SECTION_POST_DATA,
5171 : .createStmt = query->data));
5172 :
5173 : /* These objects can't currently have comments or seclabels */
5174 :
5175 4 : free(tag);
5176 4 : destroyPQExpBuffer(query);
5177 : }
5178 :
5179 : /*
5180 : * dumpSubscription
5181 : * dump the definition of the given subscription
5182 : */
5183 : static void
5184 214 : dumpSubscription(Archive *fout, const SubscriptionInfo *subinfo)
5185 : {
5186 214 : DumpOptions *dopt = fout->dopt;
5187 : PQExpBuffer delq;
5188 : PQExpBuffer query;
5189 : PQExpBuffer publications;
5190 : char *qsubname;
5191 214 : char **pubnames = NULL;
5192 214 : int npubnames = 0;
5193 : int i;
5194 214 : char two_phase_disabled[] = {LOGICALREP_TWOPHASE_STATE_DISABLED, '\0'};
5195 :
5196 : /* Do nothing in data-only dump */
5197 214 : if (dopt->dataOnly)
5198 18 : return;
5199 :
5200 196 : delq = createPQExpBuffer();
5201 196 : query = createPQExpBuffer();
5202 :
5203 196 : qsubname = pg_strdup(fmtId(subinfo->dobj.name));
5204 :
5205 196 : appendPQExpBuffer(delq, "DROP SUBSCRIPTION %s;\n",
5206 : qsubname);
5207 :
5208 196 : appendPQExpBuffer(query, "CREATE SUBSCRIPTION %s CONNECTION ",
5209 : qsubname);
5210 196 : appendStringLiteralAH(query, subinfo->subconninfo, fout);
5211 :
5212 : /* Build list of quoted publications and append them to query. */
5213 196 : if (!parsePGArray(subinfo->subpublications, &pubnames, &npubnames))
5214 0 : pg_fatal("could not parse %s array", "subpublications");
5215 :
5216 196 : publications = createPQExpBuffer();
5217 392 : for (i = 0; i < npubnames; i++)
5218 : {
5219 196 : if (i > 0)
5220 0 : appendPQExpBufferStr(publications, ", ");
5221 :
5222 196 : appendPQExpBufferStr(publications, fmtId(pubnames[i]));
5223 : }
5224 :
5225 196 : appendPQExpBuffer(query, " PUBLICATION %s WITH (connect = false, slot_name = ", publications->data);
5226 196 : if (subinfo->subslotname)
5227 196 : appendStringLiteralAH(query, subinfo->subslotname, fout);
5228 : else
5229 0 : appendPQExpBufferStr(query, "NONE");
5230 :
5231 196 : if (strcmp(subinfo->subbinary, "t") == 0)
5232 0 : appendPQExpBufferStr(query, ", binary = true");
5233 :
5234 196 : if (strcmp(subinfo->substream, "t") == 0)
5235 0 : appendPQExpBufferStr(query, ", streaming = on");
5236 196 : else if (strcmp(subinfo->substream, "p") == 0)
5237 0 : appendPQExpBufferStr(query, ", streaming = parallel");
5238 :
5239 196 : if (strcmp(subinfo->subtwophasestate, two_phase_disabled) != 0)
5240 0 : appendPQExpBufferStr(query, ", two_phase = on");
5241 :
5242 196 : if (strcmp(subinfo->subdisableonerr, "t") == 0)
5243 0 : appendPQExpBufferStr(query, ", disable_on_error = true");
5244 :
5245 196 : if (strcmp(subinfo->subpasswordrequired, "t") != 0)
5246 0 : appendPQExpBuffer(query, ", password_required = false");
5247 :
5248 196 : if (strcmp(subinfo->subrunasowner, "t") == 0)
5249 0 : appendPQExpBufferStr(query, ", run_as_owner = true");
5250 :
5251 196 : if (strcmp(subinfo->subfailover, "t") == 0)
5252 2 : appendPQExpBufferStr(query, ", failover = true");
5253 :
5254 196 : if (strcmp(subinfo->subsynccommit, "off") != 0)
5255 0 : appendPQExpBuffer(query, ", synchronous_commit = %s", fmtId(subinfo->subsynccommit));
5256 :
5257 196 : if (pg_strcasecmp(subinfo->suborigin, LOGICALREP_ORIGIN_ANY) != 0)
5258 64 : appendPQExpBuffer(query, ", origin = %s", subinfo->suborigin);
5259 :
5260 196 : appendPQExpBufferStr(query, ");\n");
5261 :
5262 : /*
5263 : * In binary-upgrade mode, we allow the replication to continue after the
5264 : * upgrade.
5265 : */
5266 196 : if (dopt->binary_upgrade && fout->remoteVersion >= 170000)
5267 : {
5268 10 : if (subinfo->suboriginremotelsn)
5269 : {
5270 : /*
5271 : * Preserve the remote_lsn for the subscriber's replication
5272 : * origin. This value is required to start the replication from
5273 : * the position before the upgrade. This value will be stale if
5274 : * the publisher gets upgraded before the subscriber node.
5275 : * However, this shouldn't be a problem as the upgrade of the
5276 : * publisher ensures that all the transactions were replicated
5277 : * before upgrading it.
5278 : */
5279 2 : appendPQExpBufferStr(query,
5280 : "\n-- For binary upgrade, must preserve the remote_lsn for the subscriber's replication origin.\n");
5281 2 : appendPQExpBufferStr(query,
5282 : "SELECT pg_catalog.binary_upgrade_replorigin_advance(");
5283 2 : appendStringLiteralAH(query, subinfo->dobj.name, fout);
5284 2 : appendPQExpBuffer(query, ", '%s');\n", subinfo->suboriginremotelsn);
5285 : }
5286 :
5287 10 : if (strcmp(subinfo->subenabled, "t") == 0)
5288 : {
5289 : /*
5290 : * Enable the subscription to allow the replication to continue
5291 : * after the upgrade.
5292 : */
5293 2 : appendPQExpBufferStr(query,
5294 : "\n-- For binary upgrade, must preserve the subscriber's running state.\n");
5295 2 : appendPQExpBuffer(query, "ALTER SUBSCRIPTION %s ENABLE;\n", qsubname);
5296 : }
5297 : }
5298 :
5299 196 : if (subinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
5300 196 : ArchiveEntry(fout, subinfo->dobj.catId, subinfo->dobj.dumpId,
5301 196 : ARCHIVE_OPTS(.tag = subinfo->dobj.name,
5302 : .owner = subinfo->rolname,
5303 : .description = "SUBSCRIPTION",
5304 : .section = SECTION_POST_DATA,
5305 : .createStmt = query->data,
5306 : .dropStmt = delq->data));
5307 :
5308 196 : if (subinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
5309 64 : dumpComment(fout, "SUBSCRIPTION", qsubname,
5310 : NULL, subinfo->rolname,
5311 : subinfo->dobj.catId, 0, subinfo->dobj.dumpId);
5312 :
5313 196 : if (subinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
5314 0 : dumpSecLabel(fout, "SUBSCRIPTION", qsubname,
5315 : NULL, subinfo->rolname,
5316 : subinfo->dobj.catId, 0, subinfo->dobj.dumpId);
5317 :
5318 196 : destroyPQExpBuffer(publications);
5319 196 : free(pubnames);
5320 :
5321 196 : destroyPQExpBuffer(delq);
5322 196 : destroyPQExpBuffer(query);
5323 196 : free(qsubname);
5324 : }
5325 :
5326 : /*
5327 : * Given a "create query", append as many ALTER ... DEPENDS ON EXTENSION as
5328 : * the object needs.
5329 : */
5330 : static void
5331 9676 : append_depends_on_extension(Archive *fout,
5332 : PQExpBuffer create,
5333 : const DumpableObject *dobj,
5334 : const char *catalog,
5335 : const char *keyword,
5336 : const char *objname)
5337 : {
5338 9676 : if (dobj->depends_on_ext)
5339 : {
5340 : char *nm;
5341 : PGresult *res;
5342 : PQExpBuffer query;
5343 : int ntups;
5344 : int i_extname;
5345 : int i;
5346 :
5347 : /* dodge fmtId() non-reentrancy */
5348 84 : nm = pg_strdup(objname);
5349 :
5350 84 : query = createPQExpBuffer();
5351 84 : appendPQExpBuffer(query,
5352 : "SELECT e.extname "
5353 : "FROM pg_catalog.pg_depend d, pg_catalog.pg_extension e "
5354 : "WHERE d.refobjid = e.oid AND classid = '%s'::pg_catalog.regclass "
5355 : "AND objid = '%u'::pg_catalog.oid AND deptype = 'x' "
5356 : "AND refclassid = 'pg_catalog.pg_extension'::pg_catalog.regclass",
5357 : catalog,
5358 : dobj->catId.oid);
5359 84 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5360 84 : ntups = PQntuples(res);
5361 84 : i_extname = PQfnumber(res, "extname");
5362 168 : for (i = 0; i < ntups; i++)
5363 : {
5364 84 : appendPQExpBuffer(create, "\nALTER %s %s DEPENDS ON EXTENSION %s;",
5365 : keyword, nm,
5366 84 : fmtId(PQgetvalue(res, i, i_extname)));
5367 : }
5368 :
5369 84 : PQclear(res);
5370 84 : destroyPQExpBuffer(query);
5371 84 : pg_free(nm);
5372 : }
5373 9676 : }
5374 :
5375 : static Oid
5376 0 : get_next_possible_free_pg_type_oid(Archive *fout, PQExpBuffer upgrade_query)
5377 : {
5378 : /*
5379 : * If the old version didn't assign an array type, but the new version
5380 : * does, we must select an unused type OID to assign. This currently only
5381 : * happens for domains, when upgrading pre-v11 to v11 and up.
5382 : *
5383 : * Note: local state here is kind of ugly, but we must have some, since we
5384 : * mustn't choose the same unused OID more than once.
5385 : */
5386 : static Oid next_possible_free_oid = FirstNormalObjectId;
5387 : PGresult *res;
5388 : bool is_dup;
5389 :
5390 : do
5391 : {
5392 0 : ++next_possible_free_oid;
5393 0 : printfPQExpBuffer(upgrade_query,
5394 : "SELECT EXISTS(SELECT 1 "
5395 : "FROM pg_catalog.pg_type "
5396 : "WHERE oid = '%u'::pg_catalog.oid);",
5397 : next_possible_free_oid);
5398 0 : res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
5399 0 : is_dup = (PQgetvalue(res, 0, 0)[0] == 't');
5400 0 : PQclear(res);
5401 0 : } while (is_dup);
5402 :
5403 0 : return next_possible_free_oid;
5404 : }
5405 :
5406 : static void
5407 1624 : binary_upgrade_set_type_oids_by_type_oid(Archive *fout,
5408 : PQExpBuffer upgrade_buffer,
5409 : Oid pg_type_oid,
5410 : bool force_array_type,
5411 : bool include_multirange_type)
5412 : {
5413 1624 : PQExpBuffer upgrade_query = createPQExpBuffer();
5414 : PGresult *res;
5415 : Oid pg_type_array_oid;
5416 : Oid pg_type_multirange_oid;
5417 : Oid pg_type_multirange_array_oid;
5418 : TypeInfo *tinfo;
5419 :
5420 1624 : appendPQExpBufferStr(upgrade_buffer, "\n-- For binary upgrade, must preserve pg_type oid\n");
5421 1624 : appendPQExpBuffer(upgrade_buffer,
5422 : "SELECT pg_catalog.binary_upgrade_set_next_pg_type_oid('%u'::pg_catalog.oid);\n\n",
5423 : pg_type_oid);
5424 :
5425 1624 : tinfo = findTypeByOid(pg_type_oid);
5426 1624 : if (tinfo)
5427 1624 : pg_type_array_oid = tinfo->typarray;
5428 : else
5429 0 : pg_type_array_oid = InvalidOid;
5430 :
5431 1624 : if (!OidIsValid(pg_type_array_oid) && force_array_type)
5432 0 : pg_type_array_oid = get_next_possible_free_pg_type_oid(fout, upgrade_query);
5433 :
5434 1624 : if (OidIsValid(pg_type_array_oid))
5435 : {
5436 1620 : appendPQExpBufferStr(upgrade_buffer,
5437 : "\n-- For binary upgrade, must preserve pg_type array oid\n");
5438 1620 : appendPQExpBuffer(upgrade_buffer,
5439 : "SELECT pg_catalog.binary_upgrade_set_next_array_pg_type_oid('%u'::pg_catalog.oid);\n\n",
5440 : pg_type_array_oid);
5441 : }
5442 :
5443 : /*
5444 : * Pre-set the multirange type oid and its own array type oid.
5445 : */
5446 1624 : if (include_multirange_type)
5447 : {
5448 12 : if (fout->remoteVersion >= 140000)
5449 : {
5450 12 : printfPQExpBuffer(upgrade_query,
5451 : "SELECT t.oid, t.typarray "
5452 : "FROM pg_catalog.pg_type t "
5453 : "JOIN pg_catalog.pg_range r "
5454 : "ON t.oid = r.rngmultitypid "
5455 : "WHERE r.rngtypid = '%u'::pg_catalog.oid;",
5456 : pg_type_oid);
5457 :
5458 12 : res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
5459 :
5460 12 : pg_type_multirange_oid = atooid(PQgetvalue(res, 0, PQfnumber(res, "oid")));
5461 12 : pg_type_multirange_array_oid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typarray")));
5462 :
5463 12 : PQclear(res);
5464 : }
5465 : else
5466 : {
5467 0 : pg_type_multirange_oid = get_next_possible_free_pg_type_oid(fout, upgrade_query);
5468 0 : pg_type_multirange_array_oid = get_next_possible_free_pg_type_oid(fout, upgrade_query);
5469 : }
5470 :
5471 12 : appendPQExpBufferStr(upgrade_buffer,
5472 : "\n-- For binary upgrade, must preserve multirange pg_type oid\n");
5473 12 : appendPQExpBuffer(upgrade_buffer,
5474 : "SELECT pg_catalog.binary_upgrade_set_next_multirange_pg_type_oid('%u'::pg_catalog.oid);\n\n",
5475 : pg_type_multirange_oid);
5476 12 : appendPQExpBufferStr(upgrade_buffer,
5477 : "\n-- For binary upgrade, must preserve multirange pg_type array oid\n");
5478 12 : appendPQExpBuffer(upgrade_buffer,
5479 : "SELECT pg_catalog.binary_upgrade_set_next_multirange_array_pg_type_oid('%u'::pg_catalog.oid);\n\n",
5480 : pg_type_multirange_array_oid);
5481 : }
5482 :
5483 1624 : destroyPQExpBuffer(upgrade_query);
5484 1624 : }
5485 :
5486 : static void
5487 1488 : binary_upgrade_set_type_oids_by_rel(Archive *fout,
5488 : PQExpBuffer upgrade_buffer,
5489 : const TableInfo *tbinfo)
5490 : {
5491 1488 : Oid pg_type_oid = tbinfo->reltype;
5492 :
5493 1488 : if (OidIsValid(pg_type_oid))
5494 1488 : binary_upgrade_set_type_oids_by_type_oid(fout, upgrade_buffer,
5495 : pg_type_oid, false, false);
5496 1488 : }
5497 :
5498 : /*
5499 : * bsearch() comparator for BinaryUpgradeClassOidItem
5500 : */
5501 : static int
5502 21320 : BinaryUpgradeClassOidItemCmp(const void *p1, const void *p2)
5503 : {
5504 21320 : BinaryUpgradeClassOidItem v1 = *((const BinaryUpgradeClassOidItem *) p1);
5505 21320 : BinaryUpgradeClassOidItem v2 = *((const BinaryUpgradeClassOidItem *) p2);
5506 :
5507 21320 : return pg_cmp_u32(v1.oid, v2.oid);
5508 : }
5509 :
5510 : /*
5511 : * collectBinaryUpgradeClassOids
5512 : *
5513 : * Construct a table of pg_class information required for
5514 : * binary_upgrade_set_pg_class_oids(). The table is sorted by OID for speed in
5515 : * lookup.
5516 : */
5517 : static void
5518 28 : collectBinaryUpgradeClassOids(Archive *fout)
5519 : {
5520 : PGresult *res;
5521 : const char *query;
5522 :
5523 28 : query = "SELECT c.oid, c.relkind, c.relfilenode, c.reltoastrelid, "
5524 : "ct.relfilenode, i.indexrelid, cti.relfilenode "
5525 : "FROM pg_catalog.pg_class c LEFT JOIN pg_catalog.pg_index i "
5526 : "ON (c.reltoastrelid = i.indrelid AND i.indisvalid) "
5527 : "LEFT JOIN pg_catalog.pg_class ct ON (c.reltoastrelid = ct.oid) "
5528 : "LEFT JOIN pg_catalog.pg_class AS cti ON (i.indexrelid = cti.oid) "
5529 : "ORDER BY c.oid;";
5530 :
5531 28 : res = ExecuteSqlQuery(fout, query, PGRES_TUPLES_OK);
5532 :
5533 28 : nbinaryUpgradeClassOids = PQntuples(res);
5534 28 : binaryUpgradeClassOids = (BinaryUpgradeClassOidItem *)
5535 28 : pg_malloc(nbinaryUpgradeClassOids * sizeof(BinaryUpgradeClassOidItem));
5536 :
5537 14908 : for (int i = 0; i < nbinaryUpgradeClassOids; i++)
5538 : {
5539 14880 : binaryUpgradeClassOids[i].oid = atooid(PQgetvalue(res, i, 0));
5540 14880 : binaryUpgradeClassOids[i].relkind = *PQgetvalue(res, i, 1);
5541 14880 : binaryUpgradeClassOids[i].relfilenumber = atooid(PQgetvalue(res, i, 2));
5542 14880 : binaryUpgradeClassOids[i].toast_oid = atooid(PQgetvalue(res, i, 3));
5543 14880 : binaryUpgradeClassOids[i].toast_relfilenumber = atooid(PQgetvalue(res, i, 4));
5544 14880 : binaryUpgradeClassOids[i].toast_index_oid = atooid(PQgetvalue(res, i, 5));
5545 14880 : binaryUpgradeClassOids[i].toast_index_relfilenumber = atooid(PQgetvalue(res, i, 6));
5546 : }
5547 :
5548 28 : PQclear(res);
5549 28 : }
5550 :
5551 : static void
5552 2172 : binary_upgrade_set_pg_class_oids(Archive *fout,
5553 : PQExpBuffer upgrade_buffer, Oid pg_class_oid)
5554 : {
5555 2172 : BinaryUpgradeClassOidItem key = {0};
5556 : BinaryUpgradeClassOidItem *entry;
5557 :
5558 : Assert(binaryUpgradeClassOids);
5559 :
5560 : /*
5561 : * Preserve the OID and relfilenumber of the table, table's index, table's
5562 : * toast table and toast table's index if any.
5563 : *
5564 : * One complexity is that the current table definition might not require
5565 : * the creation of a TOAST table, but the old database might have a TOAST
5566 : * table that was created earlier, before some wide columns were dropped.
5567 : * By setting the TOAST oid we force creation of the TOAST heap and index
5568 : * by the new backend, so we can copy the files during binary upgrade
5569 : * without worrying about this case.
5570 : */
5571 2172 : key.oid = pg_class_oid;
5572 2172 : entry = bsearch(&key, binaryUpgradeClassOids, nbinaryUpgradeClassOids,
5573 : sizeof(BinaryUpgradeClassOidItem),
5574 : BinaryUpgradeClassOidItemCmp);
5575 :
5576 2172 : appendPQExpBufferStr(upgrade_buffer,
5577 : "\n-- For binary upgrade, must preserve pg_class oids and relfilenodes\n");
5578 :
5579 2172 : if (entry->relkind != RELKIND_INDEX &&
5580 1686 : entry->relkind != RELKIND_PARTITIONED_INDEX)
5581 : {
5582 1636 : appendPQExpBuffer(upgrade_buffer,
5583 : "SELECT pg_catalog.binary_upgrade_set_next_heap_pg_class_oid('%u'::pg_catalog.oid);\n",
5584 : pg_class_oid);
5585 :
5586 : /*
5587 : * Not every relation has storage. Also, in a pre-v12 database,
5588 : * partitioned tables have a relfilenumber, which should not be
5589 : * preserved when upgrading.
5590 : */
5591 1636 : if (RelFileNumberIsValid(entry->relfilenumber) &&
5592 1330 : entry->relkind != RELKIND_PARTITIONED_TABLE)
5593 1330 : appendPQExpBuffer(upgrade_buffer,
5594 : "SELECT pg_catalog.binary_upgrade_set_next_heap_relfilenode('%u'::pg_catalog.oid);\n",
5595 : entry->relfilenumber);
5596 :
5597 : /*
5598 : * In a pre-v12 database, partitioned tables might be marked as having
5599 : * toast tables, but we should ignore them if so.
5600 : */
5601 1636 : if (OidIsValid(entry->toast_oid) &&
5602 544 : entry->relkind != RELKIND_PARTITIONED_TABLE)
5603 : {
5604 544 : appendPQExpBuffer(upgrade_buffer,
5605 : "SELECT pg_catalog.binary_upgrade_set_next_toast_pg_class_oid('%u'::pg_catalog.oid);\n",
5606 : entry->toast_oid);
5607 544 : appendPQExpBuffer(upgrade_buffer,
5608 : "SELECT pg_catalog.binary_upgrade_set_next_toast_relfilenode('%u'::pg_catalog.oid);\n",
5609 : entry->toast_relfilenumber);
5610 :
5611 : /* every toast table has an index */
5612 544 : appendPQExpBuffer(upgrade_buffer,
5613 : "SELECT pg_catalog.binary_upgrade_set_next_index_pg_class_oid('%u'::pg_catalog.oid);\n",
5614 : entry->toast_index_oid);
5615 544 : appendPQExpBuffer(upgrade_buffer,
5616 : "SELECT pg_catalog.binary_upgrade_set_next_index_relfilenode('%u'::pg_catalog.oid);\n",
5617 : entry->toast_index_relfilenumber);
5618 : }
5619 : }
5620 : else
5621 : {
5622 : /* Preserve the OID and relfilenumber of the index */
5623 536 : appendPQExpBuffer(upgrade_buffer,
5624 : "SELECT pg_catalog.binary_upgrade_set_next_index_pg_class_oid('%u'::pg_catalog.oid);\n",
5625 : pg_class_oid);
5626 536 : appendPQExpBuffer(upgrade_buffer,
5627 : "SELECT pg_catalog.binary_upgrade_set_next_index_relfilenode('%u'::pg_catalog.oid);\n",
5628 : entry->relfilenumber);
5629 : }
5630 :
5631 2172 : appendPQExpBufferChar(upgrade_buffer, '\n');
5632 2172 : }
5633 :
5634 : /*
5635 : * If the DumpableObject is a member of an extension, add a suitable
5636 : * ALTER EXTENSION ADD command to the creation commands in upgrade_buffer.
5637 : *
5638 : * For somewhat historical reasons, objname should already be quoted,
5639 : * but not objnamespace (if any).
5640 : */
5641 : static void
5642 2610 : binary_upgrade_extension_member(PQExpBuffer upgrade_buffer,
5643 : const DumpableObject *dobj,
5644 : const char *objtype,
5645 : const char *objname,
5646 : const char *objnamespace)
5647 : {
5648 2610 : DumpableObject *extobj = NULL;
5649 : int i;
5650 :
5651 2610 : if (!dobj->ext_member)
5652 2578 : return;
5653 :
5654 : /*
5655 : * Find the parent extension. We could avoid this search if we wanted to
5656 : * add a link field to DumpableObject, but the space costs of that would
5657 : * be considerable. We assume that member objects could only have a
5658 : * direct dependency on their own extension, not any others.
5659 : */
5660 32 : for (i = 0; i < dobj->nDeps; i++)
5661 : {
5662 32 : extobj = findObjectByDumpId(dobj->dependencies[i]);
5663 32 : if (extobj && extobj->objType == DO_EXTENSION)
5664 32 : break;
5665 0 : extobj = NULL;
5666 : }
5667 32 : if (extobj == NULL)
5668 0 : pg_fatal("could not find parent extension for %s %s",
5669 : objtype, objname);
5670 :
5671 32 : appendPQExpBufferStr(upgrade_buffer,
5672 : "\n-- For binary upgrade, handle extension membership the hard way\n");
5673 32 : appendPQExpBuffer(upgrade_buffer, "ALTER EXTENSION %s ADD %s ",
5674 32 : fmtId(extobj->name),
5675 : objtype);
5676 32 : if (objnamespace && *objnamespace)
5677 26 : appendPQExpBuffer(upgrade_buffer, "%s.", fmtId(objnamespace));
5678 32 : appendPQExpBuffer(upgrade_buffer, "%s;\n", objname);
5679 : }
5680 :
5681 : /*
5682 : * getNamespaces:
5683 : * get information about all namespaces in the system catalogs
5684 : */
5685 : void
5686 310 : getNamespaces(Archive *fout)
5687 : {
5688 : PGresult *res;
5689 : int ntups;
5690 : int i;
5691 : PQExpBuffer query;
5692 : NamespaceInfo *nsinfo;
5693 : int i_tableoid;
5694 : int i_oid;
5695 : int i_nspname;
5696 : int i_nspowner;
5697 : int i_nspacl;
5698 : int i_acldefault;
5699 :
5700 310 : query = createPQExpBuffer();
5701 :
5702 : /*
5703 : * we fetch all namespaces including system ones, so that every object we
5704 : * read in can be linked to a containing namespace.
5705 : */
5706 310 : appendPQExpBufferStr(query, "SELECT n.tableoid, n.oid, n.nspname, "
5707 : "n.nspowner, "
5708 : "n.nspacl, "
5709 : "acldefault('n', n.nspowner) AS acldefault "
5710 : "FROM pg_namespace n");
5711 :
5712 310 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5713 :
5714 310 : ntups = PQntuples(res);
5715 :
5716 310 : nsinfo = (NamespaceInfo *) pg_malloc(ntups * sizeof(NamespaceInfo));
5717 :
5718 310 : i_tableoid = PQfnumber(res, "tableoid");
5719 310 : i_oid = PQfnumber(res, "oid");
5720 310 : i_nspname = PQfnumber(res, "nspname");
5721 310 : i_nspowner = PQfnumber(res, "nspowner");
5722 310 : i_nspacl = PQfnumber(res, "nspacl");
5723 310 : i_acldefault = PQfnumber(res, "acldefault");
5724 :
5725 2860 : for (i = 0; i < ntups; i++)
5726 : {
5727 : const char *nspowner;
5728 :
5729 2550 : nsinfo[i].dobj.objType = DO_NAMESPACE;
5730 2550 : nsinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
5731 2550 : nsinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
5732 2550 : AssignDumpId(&nsinfo[i].dobj);
5733 2550 : nsinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_nspname));
5734 2550 : nsinfo[i].dacl.acl = pg_strdup(PQgetvalue(res, i, i_nspacl));
5735 2550 : nsinfo[i].dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault));
5736 2550 : nsinfo[i].dacl.privtype = 0;
5737 2550 : nsinfo[i].dacl.initprivs = NULL;
5738 2550 : nspowner = PQgetvalue(res, i, i_nspowner);
5739 2550 : nsinfo[i].nspowner = atooid(nspowner);
5740 2550 : nsinfo[i].rolname = getRoleName(nspowner);
5741 :
5742 : /* Decide whether to dump this namespace */
5743 2550 : selectDumpableNamespace(&nsinfo[i], fout);
5744 :
5745 : /* Mark whether namespace has an ACL */
5746 2550 : if (!PQgetisnull(res, i, i_nspacl))
5747 1036 : nsinfo[i].dobj.components |= DUMP_COMPONENT_ACL;
5748 :
5749 : /*
5750 : * We ignore any pg_init_privs.initprivs entry for the public schema
5751 : * and assume a predetermined default, for several reasons. First,
5752 : * dropping and recreating the schema removes its pg_init_privs entry,
5753 : * but an empty destination database starts with this ACL nonetheless.
5754 : * Second, we support dump/reload of public schema ownership changes.
5755 : * ALTER SCHEMA OWNER filters nspacl through aclnewowner(), but
5756 : * initprivs continues to reflect the initial owner. Hence,
5757 : * synthesize the value that nspacl will have after the restore's
5758 : * ALTER SCHEMA OWNER. Third, this makes the destination database
5759 : * match the source's ACL, even if the latter was an initdb-default
5760 : * ACL, which changed in v15. An upgrade pulls in changes to most
5761 : * system object ACLs that the DBA had not customized. We've made the
5762 : * public schema depart from that, because changing its ACL so easily
5763 : * breaks applications.
5764 : */
5765 2550 : if (strcmp(nsinfo[i].dobj.name, "public") == 0)
5766 : {
5767 302 : PQExpBuffer aclarray = createPQExpBuffer();
5768 302 : PQExpBuffer aclitem = createPQExpBuffer();
5769 :
5770 : /* Standard ACL as of v15 is {owner=UC/owner,=U/owner} */
5771 302 : appendPQExpBufferChar(aclarray, '{');
5772 302 : quoteAclUserName(aclitem, nsinfo[i].rolname);
5773 302 : appendPQExpBufferStr(aclitem, "=UC/");
5774 302 : quoteAclUserName(aclitem, nsinfo[i].rolname);
5775 302 : appendPGArray(aclarray, aclitem->data);
5776 302 : resetPQExpBuffer(aclitem);
5777 302 : appendPQExpBufferStr(aclitem, "=U/");
5778 302 : quoteAclUserName(aclitem, nsinfo[i].rolname);
5779 302 : appendPGArray(aclarray, aclitem->data);
5780 302 : appendPQExpBufferChar(aclarray, '}');
5781 :
5782 302 : nsinfo[i].dacl.privtype = 'i';
5783 302 : nsinfo[i].dacl.initprivs = pstrdup(aclarray->data);
5784 302 : nsinfo[i].dobj.components |= DUMP_COMPONENT_ACL;
5785 :
5786 302 : destroyPQExpBuffer(aclarray);
5787 302 : destroyPQExpBuffer(aclitem);
5788 : }
5789 : }
5790 :
5791 310 : PQclear(res);
5792 310 : destroyPQExpBuffer(query);
5793 310 : }
5794 :
5795 : /*
5796 : * findNamespace:
5797 : * given a namespace OID, look up the info read by getNamespaces
5798 : */
5799 : static NamespaceInfo *
5800 962770 : findNamespace(Oid nsoid)
5801 : {
5802 : NamespaceInfo *nsinfo;
5803 :
5804 962770 : nsinfo = findNamespaceByOid(nsoid);
5805 962770 : if (nsinfo == NULL)
5806 0 : pg_fatal("schema with OID %u does not exist", nsoid);
5807 962770 : return nsinfo;
5808 : }
5809 :
5810 : /*
5811 : * getExtensions:
5812 : * read all extensions in the system catalogs and return them in the
5813 : * ExtensionInfo* structure
5814 : *
5815 : * numExtensions is set to the number of extensions read in
5816 : */
5817 : ExtensionInfo *
5818 310 : getExtensions(Archive *fout, int *numExtensions)
5819 : {
5820 310 : DumpOptions *dopt = fout->dopt;
5821 : PGresult *res;
5822 : int ntups;
5823 : int i;
5824 : PQExpBuffer query;
5825 310 : ExtensionInfo *extinfo = NULL;
5826 : int i_tableoid;
5827 : int i_oid;
5828 : int i_extname;
5829 : int i_nspname;
5830 : int i_extrelocatable;
5831 : int i_extversion;
5832 : int i_extconfig;
5833 : int i_extcondition;
5834 :
5835 310 : query = createPQExpBuffer();
5836 :
5837 310 : appendPQExpBufferStr(query, "SELECT x.tableoid, x.oid, "
5838 : "x.extname, n.nspname, x.extrelocatable, x.extversion, x.extconfig, x.extcondition "
5839 : "FROM pg_extension x "
5840 : "JOIN pg_namespace n ON n.oid = x.extnamespace");
5841 :
5842 310 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5843 :
5844 310 : ntups = PQntuples(res);
5845 310 : if (ntups == 0)
5846 0 : goto cleanup;
5847 :
5848 310 : extinfo = (ExtensionInfo *) pg_malloc(ntups * sizeof(ExtensionInfo));
5849 :
5850 310 : i_tableoid = PQfnumber(res, "tableoid");
5851 310 : i_oid = PQfnumber(res, "oid");
5852 310 : i_extname = PQfnumber(res, "extname");
5853 310 : i_nspname = PQfnumber(res, "nspname");
5854 310 : i_extrelocatable = PQfnumber(res, "extrelocatable");
5855 310 : i_extversion = PQfnumber(res, "extversion");
5856 310 : i_extconfig = PQfnumber(res, "extconfig");
5857 310 : i_extcondition = PQfnumber(res, "extcondition");
5858 :
5859 670 : for (i = 0; i < ntups; i++)
5860 : {
5861 360 : extinfo[i].dobj.objType = DO_EXTENSION;
5862 360 : extinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
5863 360 : extinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
5864 360 : AssignDumpId(&extinfo[i].dobj);
5865 360 : extinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_extname));
5866 360 : extinfo[i].namespace = pg_strdup(PQgetvalue(res, i, i_nspname));
5867 360 : extinfo[i].relocatable = *(PQgetvalue(res, i, i_extrelocatable)) == 't';
5868 360 : extinfo[i].extversion = pg_strdup(PQgetvalue(res, i, i_extversion));
5869 360 : extinfo[i].extconfig = pg_strdup(PQgetvalue(res, i, i_extconfig));
5870 360 : extinfo[i].extcondition = pg_strdup(PQgetvalue(res, i, i_extcondition));
5871 :
5872 : /* Decide whether we want to dump it */
5873 360 : selectDumpableExtension(&(extinfo[i]), dopt);
5874 : }
5875 :
5876 310 : cleanup:
5877 310 : PQclear(res);
5878 310 : destroyPQExpBuffer(query);
5879 :
5880 310 : *numExtensions = ntups;
5881 :
5882 310 : return extinfo;
5883 : }
5884 :
5885 : /*
5886 : * getTypes:
5887 : * get information about all types in the system catalogs
5888 : *
5889 : * NB: this must run after getFuncs() because we assume we can do
5890 : * findFuncByOid().
5891 : */
5892 : void
5893 308 : getTypes(Archive *fout)
5894 : {
5895 : PGresult *res;
5896 : int ntups;
5897 : int i;
5898 308 : PQExpBuffer query = createPQExpBuffer();
5899 : TypeInfo *tyinfo;
5900 : ShellTypeInfo *stinfo;
5901 : int i_tableoid;
5902 : int i_oid;
5903 : int i_typname;
5904 : int i_typnamespace;
5905 : int i_typacl;
5906 : int i_acldefault;
5907 : int i_typowner;
5908 : int i_typelem;
5909 : int i_typrelid;
5910 : int i_typrelkind;
5911 : int i_typtype;
5912 : int i_typisdefined;
5913 : int i_isarray;
5914 : int i_typarray;
5915 :
5916 : /*
5917 : * we include even the built-in types because those may be used as array
5918 : * elements by user-defined types
5919 : *
5920 : * we filter out the built-in types when we dump out the types
5921 : *
5922 : * same approach for undefined (shell) types and array types
5923 : *
5924 : * Note: as of 8.3 we can reliably detect whether a type is an
5925 : * auto-generated array type by checking the element type's typarray.
5926 : * (Before that the test is capable of generating false positives.) We
5927 : * still check for name beginning with '_', though, so as to avoid the
5928 : * cost of the subselect probe for all standard types. This would have to
5929 : * be revisited if the backend ever allows renaming of array types.
5930 : */
5931 308 : appendPQExpBufferStr(query, "SELECT tableoid, oid, typname, "
5932 : "typnamespace, typacl, "
5933 : "acldefault('T', typowner) AS acldefault, "
5934 : "typowner, "
5935 : "typelem, typrelid, typarray, "
5936 : "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
5937 : "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
5938 : "typtype, typisdefined, "
5939 : "typname[0] = '_' AND typelem != 0 AND "
5940 : "(SELECT typarray FROM pg_type te WHERE oid = pg_type.typelem) = oid AS isarray "
5941 : "FROM pg_type");
5942 :
5943 308 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5944 :
5945 308 : ntups = PQntuples(res);
5946 :
5947 308 : tyinfo = (TypeInfo *) pg_malloc(ntups * sizeof(TypeInfo));
5948 :
5949 308 : i_tableoid = PQfnumber(res, "tableoid");
5950 308 : i_oid = PQfnumber(res, "oid");
5951 308 : i_typname = PQfnumber(res, "typname");
5952 308 : i_typnamespace = PQfnumber(res, "typnamespace");
5953 308 : i_typacl = PQfnumber(res, "typacl");
5954 308 : i_acldefault = PQfnumber(res, "acldefault");
5955 308 : i_typowner = PQfnumber(res, "typowner");
5956 308 : i_typelem = PQfnumber(res, "typelem");
5957 308 : i_typrelid = PQfnumber(res, "typrelid");
5958 308 : i_typrelkind = PQfnumber(res, "typrelkind");
5959 308 : i_typtype = PQfnumber(res, "typtype");
5960 308 : i_typisdefined = PQfnumber(res, "typisdefined");
5961 308 : i_isarray = PQfnumber(res, "isarray");
5962 308 : i_typarray = PQfnumber(res, "typarray");
5963 :
5964 221744 : for (i = 0; i < ntups; i++)
5965 : {
5966 221436 : tyinfo[i].dobj.objType = DO_TYPE;
5967 221436 : tyinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
5968 221436 : tyinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
5969 221436 : AssignDumpId(&tyinfo[i].dobj);
5970 221436 : tyinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_typname));
5971 442872 : tyinfo[i].dobj.namespace =
5972 221436 : findNamespace(atooid(PQgetvalue(res, i, i_typnamespace)));
5973 221436 : tyinfo[i].dacl.acl = pg_strdup(PQgetvalue(res, i, i_typacl));
5974 221436 : tyinfo[i].dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault));
5975 221436 : tyinfo[i].dacl.privtype = 0;
5976 221436 : tyinfo[i].dacl.initprivs = NULL;
5977 221436 : tyinfo[i].ftypname = NULL; /* may get filled later */
5978 221436 : tyinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_typowner));
5979 221436 : tyinfo[i].typelem = atooid(PQgetvalue(res, i, i_typelem));
5980 221436 : tyinfo[i].typrelid = atooid(PQgetvalue(res, i, i_typrelid));
5981 221436 : tyinfo[i].typrelkind = *PQgetvalue(res, i, i_typrelkind);
5982 221436 : tyinfo[i].typtype = *PQgetvalue(res, i, i_typtype);
5983 221436 : tyinfo[i].shellType = NULL;
5984 :
5985 221436 : if (strcmp(PQgetvalue(res, i, i_typisdefined), "t") == 0)
5986 221336 : tyinfo[i].isDefined = true;
5987 : else
5988 100 : tyinfo[i].isDefined = false;
5989 :
5990 221436 : if (strcmp(PQgetvalue(res, i, i_isarray), "t") == 0)
5991 106202 : tyinfo[i].isArray = true;
5992 : else
5993 115234 : tyinfo[i].isArray = false;
5994 :
5995 221436 : tyinfo[i].typarray = atooid(PQgetvalue(res, i, i_typarray));
5996 :
5997 221436 : if (tyinfo[i].typtype == TYPTYPE_MULTIRANGE)
5998 2076 : tyinfo[i].isMultirange = true;
5999 : else
6000 219360 : tyinfo[i].isMultirange = false;
6001 :
6002 : /* Decide whether we want to dump it */
6003 221436 : selectDumpableType(&tyinfo[i], fout);
6004 :
6005 : /* Mark whether type has an ACL */
6006 221436 : if (!PQgetisnull(res, i, i_typacl))
6007 394 : tyinfo[i].dobj.components |= DUMP_COMPONENT_ACL;
6008 :
6009 : /*
6010 : * If it's a domain, fetch info about its constraints, if any
6011 : */
6012 221436 : tyinfo[i].nDomChecks = 0;
6013 221436 : tyinfo[i].domChecks = NULL;
6014 221436 : if ((tyinfo[i].dobj.dump & DUMP_COMPONENT_DEFINITION) &&
6015 25990 : tyinfo[i].typtype == TYPTYPE_DOMAIN)
6016 272 : getDomainConstraints(fout, &(tyinfo[i]));
6017 :
6018 : /*
6019 : * If it's a base type, make a DumpableObject representing a shell
6020 : * definition of the type. We will need to dump that ahead of the I/O
6021 : * functions for the type. Similarly, range types need a shell
6022 : * definition in case they have a canonicalize function.
6023 : *
6024 : * Note: the shell type doesn't have a catId. You might think it
6025 : * should copy the base type's catId, but then it might capture the
6026 : * pg_depend entries for the type, which we don't want.
6027 : */
6028 221436 : if ((tyinfo[i].dobj.dump & DUMP_COMPONENT_DEFINITION) &&
6029 25990 : (tyinfo[i].typtype == TYPTYPE_BASE ||
6030 12576 : tyinfo[i].typtype == TYPTYPE_RANGE))
6031 : {
6032 13634 : stinfo = (ShellTypeInfo *) pg_malloc(sizeof(ShellTypeInfo));
6033 13634 : stinfo->dobj.objType = DO_SHELL_TYPE;
6034 13634 : stinfo->dobj.catId = nilCatalogId;
6035 13634 : AssignDumpId(&stinfo->dobj);
6036 13634 : stinfo->dobj.name = pg_strdup(tyinfo[i].dobj.name);
6037 13634 : stinfo->dobj.namespace = tyinfo[i].dobj.namespace;
6038 13634 : stinfo->baseType = &(tyinfo[i]);
6039 13634 : tyinfo[i].shellType = stinfo;
6040 :
6041 : /*
6042 : * Initially mark the shell type as not to be dumped. We'll only
6043 : * dump it if the I/O or canonicalize functions need to be dumped;
6044 : * this is taken care of while sorting dependencies.
6045 : */
6046 13634 : stinfo->dobj.dump = DUMP_COMPONENT_NONE;
6047 : }
6048 : }
6049 :
6050 308 : PQclear(res);
6051 :
6052 308 : destroyPQExpBuffer(query);
6053 308 : }
6054 :
6055 : /*
6056 : * getOperators:
6057 : * get information about all operators in the system catalogs
6058 : */
6059 : void
6060 308 : getOperators(Archive *fout)
6061 : {
6062 : PGresult *res;
6063 : int ntups;
6064 : int i;
6065 308 : PQExpBuffer query = createPQExpBuffer();
6066 : OprInfo *oprinfo;
6067 : int i_tableoid;
6068 : int i_oid;
6069 : int i_oprname;
6070 : int i_oprnamespace;
6071 : int i_oprowner;
6072 : int i_oprkind;
6073 : int i_oprcode;
6074 :
6075 : /*
6076 : * find all operators, including builtin operators; we filter out
6077 : * system-defined operators at dump-out time.
6078 : */
6079 :
6080 308 : appendPQExpBufferStr(query, "SELECT tableoid, oid, oprname, "
6081 : "oprnamespace, "
6082 : "oprowner, "
6083 : "oprkind, "
6084 : "oprcode::oid AS oprcode "
6085 : "FROM pg_operator");
6086 :
6087 308 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
6088 :
6089 308 : ntups = PQntuples(res);
6090 :
6091 308 : oprinfo = (OprInfo *) pg_malloc(ntups * sizeof(OprInfo));
6092 :
6093 308 : i_tableoid = PQfnumber(res, "tableoid");
6094 308 : i_oid = PQfnumber(res, "oid");
6095 308 : i_oprname = PQfnumber(res, "oprname");
6096 308 : i_oprnamespace = PQfnumber(res, "oprnamespace");
6097 308 : i_oprowner = PQfnumber(res, "oprowner");
6098 308 : i_oprkind = PQfnumber(res, "oprkind");
6099 308 : i_oprcode = PQfnumber(res, "oprcode");
6100 :
6101 246680 : for (i = 0; i < ntups; i++)
6102 : {
6103 246372 : oprinfo[i].dobj.objType = DO_OPERATOR;
6104 246372 : oprinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
6105 246372 : oprinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
6106 246372 : AssignDumpId(&oprinfo[i].dobj);
6107 246372 : oprinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_oprname));
6108 492744 : oprinfo[i].dobj.namespace =
6109 246372 : findNamespace(atooid(PQgetvalue(res, i, i_oprnamespace)));
6110 246372 : oprinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_oprowner));
6111 246372 : oprinfo[i].oprkind = (PQgetvalue(res, i, i_oprkind))[0];
6112 246372 : oprinfo[i].oprcode = atooid(PQgetvalue(res, i, i_oprcode));
6113 :
6114 : /* Decide whether we want to dump it */
6115 246372 : selectDumpableObject(&(oprinfo[i].dobj), fout);
6116 : }
6117 :
6118 308 : PQclear(res);
6119 :
6120 308 : destroyPQExpBuffer(query);
6121 308 : }
6122 :
6123 : /*
6124 : * getCollations:
6125 : * get information about all collations in the system catalogs
6126 : */
6127 : void
6128 308 : getCollations(Archive *fout)
6129 : {
6130 : PGresult *res;
6131 : int ntups;
6132 : int i;
6133 : PQExpBuffer query;
6134 : CollInfo *collinfo;
6135 : int i_tableoid;
6136 : int i_oid;
6137 : int i_collname;
6138 : int i_collnamespace;
6139 : int i_collowner;
6140 :
6141 308 : query = createPQExpBuffer();
6142 :
6143 : /*
6144 : * find all collations, including builtin collations; we filter out
6145 : * system-defined collations at dump-out time.
6146 : */
6147 :
6148 308 : appendPQExpBufferStr(query, "SELECT tableoid, oid, collname, "
6149 : "collnamespace, "
6150 : "collowner "
6151 : "FROM pg_collation");
6152 :
6153 308 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
6154 :
6155 308 : ntups = PQntuples(res);
6156 :
6157 308 : collinfo = (CollInfo *) pg_malloc(ntups * sizeof(CollInfo));
6158 :
6159 308 : i_tableoid = PQfnumber(res, "tableoid");
6160 308 : i_oid = PQfnumber(res, "oid");
6161 308 : i_collname = PQfnumber(res, "collname");
6162 308 : i_collnamespace = PQfnumber(res, "collnamespace");
6163 308 : i_collowner = PQfnumber(res, "collowner");
6164 :
6165 244444 : for (i = 0; i < ntups; i++)
6166 : {
6167 244136 : collinfo[i].dobj.objType = DO_COLLATION;
6168 244136 : collinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
6169 244136 : collinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
6170 244136 : AssignDumpId(&collinfo[i].dobj);
6171 244136 : collinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_collname));
6172 488272 : collinfo[i].dobj.namespace =
6173 244136 : findNamespace(atooid(PQgetvalue(res, i, i_collnamespace)));
6174 244136 : collinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_collowner));
6175 :
6176 : /* Decide whether we want to dump it */
6177 244136 : selectDumpableObject(&(collinfo[i].dobj), fout);
6178 : }
6179 :
6180 308 : PQclear(res);
6181 :
6182 308 : destroyPQExpBuffer(query);
6183 308 : }
6184 :
6185 : /*
6186 : * getConversions:
6187 : * get information about all conversions in the system catalogs
6188 : */
6189 : void
6190 308 : getConversions(Archive *fout)
6191 : {
6192 : PGresult *res;
6193 : int ntups;
6194 : int i;
6195 : PQExpBuffer query;
6196 : ConvInfo *convinfo;
6197 : int i_tableoid;
6198 : int i_oid;
6199 : int i_conname;
6200 : int i_connamespace;
6201 : int i_conowner;
6202 :
6203 308 : query = createPQExpBuffer();
6204 :
6205 : /*
6206 : * find all conversions, including builtin conversions; we filter out
6207 : * system-defined conversions at dump-out time.
6208 : */
6209 :
6210 308 : appendPQExpBufferStr(query, "SELECT tableoid, oid, conname, "
6211 : "connamespace, "
6212 : "conowner "
6213 : "FROM pg_conversion");
6214 :
6215 308 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
6216 :
6217 308 : ntups = PQntuples(res);
6218 :
6219 308 : convinfo = (ConvInfo *) pg_malloc(ntups * sizeof(ConvInfo));
6220 :
6221 308 : i_tableoid = PQfnumber(res, "tableoid");
6222 308 : i_oid = PQfnumber(res, "oid");
6223 308 : i_conname = PQfnumber(res, "conname");
6224 308 : i_connamespace = PQfnumber(res, "connamespace");
6225 308 : i_conowner = PQfnumber(res, "conowner");
6226 :
6227 39818 : for (i = 0; i < ntups; i++)
6228 : {
6229 39510 : convinfo[i].dobj.objType = DO_CONVERSION;
6230 39510 : convinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
6231 39510 : convinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
6232 39510 : AssignDumpId(&convinfo[i].dobj);
6233 39510 : convinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_conname));
6234 79020 : convinfo[i].dobj.namespace =
6235 39510 : findNamespace(atooid(PQgetvalue(res, i, i_connamespace)));
6236 39510 : convinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_conowner));
6237 :
6238 : /* Decide whether we want to dump it */
6239 39510 : selectDumpableObject(&(convinfo[i].dobj), fout);
6240 : }
6241 :
6242 308 : PQclear(res);
6243 :
6244 308 : destroyPQExpBuffer(query);
6245 308 : }
6246 :
6247 : /*
6248 : * getAccessMethods:
6249 : * get information about all user-defined access methods
6250 : */
6251 : void
6252 308 : getAccessMethods(Archive *fout)
6253 : {
6254 : PGresult *res;
6255 : int ntups;
6256 : int i;
6257 : PQExpBuffer query;
6258 : AccessMethodInfo *aminfo;
6259 : int i_tableoid;
6260 : int i_oid;
6261 : int i_amname;
6262 : int i_amhandler;
6263 : int i_amtype;
6264 :
6265 : /* Before 9.6, there are no user-defined access methods */
6266 308 : if (fout->remoteVersion < 90600)
6267 0 : return;
6268 :
6269 308 : query = createPQExpBuffer();
6270 :
6271 : /* Select all access methods from pg_am table */
6272 308 : appendPQExpBufferStr(query, "SELECT tableoid, oid, amname, amtype, "
6273 : "amhandler::pg_catalog.regproc AS amhandler "
6274 : "FROM pg_am");
6275 :
6276 308 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
6277 :
6278 308 : ntups = PQntuples(res);
6279 :
6280 308 : aminfo = (AccessMethodInfo *) pg_malloc(ntups * sizeof(AccessMethodInfo));
6281 :
6282 308 : i_tableoid = PQfnumber(res, "tableoid");
6283 308 : i_oid = PQfnumber(res, "oid");
6284 308 : i_amname = PQfnumber(res, "amname");
6285 308 : i_amhandler = PQfnumber(res, "amhandler");
6286 308 : i_amtype = PQfnumber(res, "amtype");
6287 :
6288 2700 : for (i = 0; i < ntups; i++)
6289 : {
6290 2392 : aminfo[i].dobj.objType = DO_ACCESS_METHOD;
6291 2392 : aminfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
6292 2392 : aminfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
6293 2392 : AssignDumpId(&aminfo[i].dobj);
6294 2392 : aminfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_amname));
6295 2392 : aminfo[i].dobj.namespace = NULL;
6296 2392 : aminfo[i].amhandler = pg_strdup(PQgetvalue(res, i, i_amhandler));
6297 2392 : aminfo[i].amtype = *(PQgetvalue(res, i, i_amtype));
6298 :
6299 : /* Decide whether we want to dump it */
6300 2392 : selectDumpableAccessMethod(&(aminfo[i]), fout);
6301 : }
6302 :
6303 308 : PQclear(res);
6304 :
6305 308 : destroyPQExpBuffer(query);
6306 : }
6307 :
6308 :
6309 : /*
6310 : * getOpclasses:
6311 : * get information about all opclasses in the system catalogs
6312 : */
6313 : void
6314 308 : getOpclasses(Archive *fout)
6315 : {
6316 : PGresult *res;
6317 : int ntups;
6318 : int i;
6319 308 : PQExpBuffer query = createPQExpBuffer();
6320 : OpclassInfo *opcinfo;
6321 : int i_tableoid;
6322 : int i_oid;
6323 : int i_opcname;
6324 : int i_opcnamespace;
6325 : int i_opcowner;
6326 :
6327 : /*
6328 : * find all opclasses, including builtin opclasses; we filter out
6329 : * system-defined opclasses at dump-out time.
6330 : */
6331 :
6332 308 : appendPQExpBufferStr(query, "SELECT tableoid, oid, opcname, "
6333 : "opcnamespace, "
6334 : "opcowner "
6335 : "FROM pg_opclass");
6336 :
6337 308 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
6338 :
6339 308 : ntups = PQntuples(res);
6340 :
6341 308 : opcinfo = (OpclassInfo *) pg_malloc(ntups * sizeof(OpclassInfo));
6342 :
6343 308 : i_tableoid = PQfnumber(res, "tableoid");
6344 308 : i_oid = PQfnumber(res, "oid");
6345 308 : i_opcname = PQfnumber(res, "opcname");
6346 308 : i_opcnamespace = PQfnumber(res, "opcnamespace");
6347 308 : i_opcowner = PQfnumber(res, "opcowner");
6348 :
6349 55124 : for (i = 0; i < ntups; i++)
6350 : {
6351 54816 : opcinfo[i].dobj.objType = DO_OPCLASS;
6352 54816 : opcinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
6353 54816 : opcinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
6354 54816 : AssignDumpId(&opcinfo[i].dobj);
6355 54816 : opcinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_opcname));
6356 109632 : opcinfo[i].dobj.namespace =
6357 54816 : findNamespace(atooid(PQgetvalue(res, i, i_opcnamespace)));
6358 54816 : opcinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_opcowner));
6359 :
6360 : /* Decide whether we want to dump it */
6361 54816 : selectDumpableObject(&(opcinfo[i].dobj), fout);
6362 : }
6363 :
6364 308 : PQclear(res);
6365 :
6366 308 : destroyPQExpBuffer(query);
6367 308 : }
6368 :
6369 : /*
6370 : * getOpfamilies:
6371 : * get information about all opfamilies in the system catalogs
6372 : */
6373 : void
6374 308 : getOpfamilies(Archive *fout)
6375 : {
6376 : PGresult *res;
6377 : int ntups;
6378 : int i;
6379 : PQExpBuffer query;
6380 : OpfamilyInfo *opfinfo;
6381 : int i_tableoid;
6382 : int i_oid;
6383 : int i_opfname;
6384 : int i_opfnamespace;
6385 : int i_opfowner;
6386 :
6387 308 : query = createPQExpBuffer();
6388 :
6389 : /*
6390 : * find all opfamilies, including builtin opfamilies; we filter out
6391 : * system-defined opfamilies at dump-out time.
6392 : */
6393 :
6394 308 : appendPQExpBufferStr(query, "SELECT tableoid, oid, opfname, "
6395 : "opfnamespace, "
6396 : "opfowner "
6397 : "FROM pg_opfamily");
6398 :
6399 308 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
6400 :
6401 308 : ntups = PQntuples(res);
6402 :
6403 308 : opfinfo = (OpfamilyInfo *) pg_malloc(ntups * sizeof(OpfamilyInfo));
6404 :
6405 308 : i_tableoid = PQfnumber(res, "tableoid");
6406 308 : i_oid = PQfnumber(res, "oid");
6407 308 : i_opfname = PQfnumber(res, "opfname");
6408 308 : i_opfnamespace = PQfnumber(res, "opfnamespace");
6409 308 : i_opfowner = PQfnumber(res, "opfowner");
6410 :
6411 45546 : for (i = 0; i < ntups; i++)
6412 : {
6413 45238 : opfinfo[i].dobj.objType = DO_OPFAMILY;
6414 45238 : opfinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
6415 45238 : opfinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
6416 45238 : AssignDumpId(&opfinfo[i].dobj);
6417 45238 : opfinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_opfname));
6418 90476 : opfinfo[i].dobj.namespace =
6419 45238 : findNamespace(atooid(PQgetvalue(res, i, i_opfnamespace)));
6420 45238 : opfinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_opfowner));
6421 :
6422 : /* Decide whether we want to dump it */
6423 45238 : selectDumpableObject(&(opfinfo[i].dobj), fout);
6424 : }
6425 :
6426 308 : PQclear(res);
6427 :
6428 308 : destroyPQExpBuffer(query);
6429 308 : }
6430 :
6431 : /*
6432 : * getAggregates:
6433 : * get information about all user-defined aggregates in the system catalogs
6434 : */
6435 : void
6436 308 : getAggregates(Archive *fout)
6437 : {
6438 308 : DumpOptions *dopt = fout->dopt;
6439 : PGresult *res;
6440 : int ntups;
6441 : int i;
6442 308 : PQExpBuffer query = createPQExpBuffer();
6443 : AggInfo *agginfo;
6444 : int i_tableoid;
6445 : int i_oid;
6446 : int i_aggname;
6447 : int i_aggnamespace;
6448 : int i_pronargs;
6449 : int i_proargtypes;
6450 : int i_proowner;
6451 : int i_aggacl;
6452 : int i_acldefault;
6453 :
6454 : /*
6455 : * Find all interesting aggregates. See comment in getFuncs() for the
6456 : * rationale behind the filtering logic.
6457 : */
6458 308 : if (fout->remoteVersion >= 90600)
6459 : {
6460 : const char *agg_check;
6461 :
6462 616 : agg_check = (fout->remoteVersion >= 110000 ? "p.prokind = 'a'"
6463 308 : : "p.proisagg");
6464 :
6465 308 : appendPQExpBuffer(query, "SELECT p.tableoid, p.oid, "
6466 : "p.proname AS aggname, "
6467 : "p.pronamespace AS aggnamespace, "
6468 : "p.pronargs, p.proargtypes, "
6469 : "p.proowner, "
6470 : "p.proacl AS aggacl, "
6471 : "acldefault('f', p.proowner) AS acldefault "
6472 : "FROM pg_proc p "
6473 : "LEFT JOIN pg_init_privs pip ON "
6474 : "(p.oid = pip.objoid "
6475 : "AND pip.classoid = 'pg_proc'::regclass "
6476 : "AND pip.objsubid = 0) "
6477 : "WHERE %s AND ("
6478 : "p.pronamespace != "
6479 : "(SELECT oid FROM pg_namespace "
6480 : "WHERE nspname = 'pg_catalog') OR "
6481 : "p.proacl IS DISTINCT FROM pip.initprivs",
6482 : agg_check);
6483 308 : if (dopt->binary_upgrade)
6484 28 : appendPQExpBufferStr(query,
6485 : " OR EXISTS(SELECT 1 FROM pg_depend WHERE "
6486 : "classid = 'pg_proc'::regclass AND "
6487 : "objid = p.oid AND "
6488 : "refclassid = 'pg_extension'::regclass AND "
6489 : "deptype = 'e')");
6490 308 : appendPQExpBufferChar(query, ')');
6491 : }
6492 : else
6493 : {
6494 0 : appendPQExpBufferStr(query, "SELECT tableoid, oid, proname AS aggname, "
6495 : "pronamespace AS aggnamespace, "
6496 : "pronargs, proargtypes, "
6497 : "proowner, "
6498 : "proacl AS aggacl, "
6499 : "acldefault('f', proowner) AS acldefault "
6500 : "FROM pg_proc p "
6501 : "WHERE proisagg AND ("
6502 : "pronamespace != "
6503 : "(SELECT oid FROM pg_namespace "
6504 : "WHERE nspname = 'pg_catalog')");
6505 0 : if (dopt->binary_upgrade)
6506 0 : appendPQExpBufferStr(query,
6507 : " OR EXISTS(SELECT 1 FROM pg_depend WHERE "
6508 : "classid = 'pg_proc'::regclass AND "
6509 : "objid = p.oid AND "
6510 : "refclassid = 'pg_extension'::regclass AND "
6511 : "deptype = 'e')");
6512 0 : appendPQExpBufferChar(query, ')');
6513 : }
6514 :
6515 308 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
6516 :
6517 308 : ntups = PQntuples(res);
6518 :
6519 308 : agginfo = (AggInfo *) pg_malloc(ntups * sizeof(AggInfo));
6520 :
6521 308 : i_tableoid = PQfnumber(res, "tableoid");
6522 308 : i_oid = PQfnumber(res, "oid");
6523 308 : i_aggname = PQfnumber(res, "aggname");
6524 308 : i_aggnamespace = PQfnumber(res, "aggnamespace");
6525 308 : i_pronargs = PQfnumber(res, "pronargs");
6526 308 : i_proargtypes = PQfnumber(res, "proargtypes");
6527 308 : i_proowner = PQfnumber(res, "proowner");
6528 308 : i_aggacl = PQfnumber(res, "aggacl");
6529 308 : i_acldefault = PQfnumber(res, "acldefault");
6530 :
6531 1102 : for (i = 0; i < ntups; i++)
6532 : {
6533 794 : agginfo[i].aggfn.dobj.objType = DO_AGG;
6534 794 : agginfo[i].aggfn.dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
6535 794 : agginfo[i].aggfn.dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
6536 794 : AssignDumpId(&agginfo[i].aggfn.dobj);
6537 794 : agginfo[i].aggfn.dobj.name = pg_strdup(PQgetvalue(res, i, i_aggname));
6538 1588 : agginfo[i].aggfn.dobj.namespace =
6539 794 : findNamespace(atooid(PQgetvalue(res, i, i_aggnamespace)));
6540 794 : agginfo[i].aggfn.dacl.acl = pg_strdup(PQgetvalue(res, i, i_aggacl));
6541 794 : agginfo[i].aggfn.dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault));
6542 794 : agginfo[i].aggfn.dacl.privtype = 0;
6543 794 : agginfo[i].aggfn.dacl.initprivs = NULL;
6544 794 : agginfo[i].aggfn.rolname = getRoleName(PQgetvalue(res, i, i_proowner));
6545 794 : agginfo[i].aggfn.lang = InvalidOid; /* not currently interesting */
6546 794 : agginfo[i].aggfn.prorettype = InvalidOid; /* not saved */
6547 794 : agginfo[i].aggfn.nargs = atoi(PQgetvalue(res, i, i_pronargs));
6548 794 : if (agginfo[i].aggfn.nargs == 0)
6549 112 : agginfo[i].aggfn.argtypes = NULL;
6550 : else
6551 : {
6552 682 : agginfo[i].aggfn.argtypes = (Oid *) pg_malloc(agginfo[i].aggfn.nargs * sizeof(Oid));
6553 682 : parseOidArray(PQgetvalue(res, i, i_proargtypes),
6554 682 : agginfo[i].aggfn.argtypes,
6555 682 : agginfo[i].aggfn.nargs);
6556 : }
6557 794 : agginfo[i].aggfn.postponed_def = false; /* might get set during sort */
6558 :
6559 : /* Decide whether we want to dump it */
6560 794 : selectDumpableObject(&(agginfo[i].aggfn.dobj), fout);
6561 :
6562 : /* Mark whether aggregate has an ACL */
6563 794 : if (!PQgetisnull(res, i, i_aggacl))
6564 50 : agginfo[i].aggfn.dobj.components |= DUMP_COMPONENT_ACL;
6565 : }
6566 :
6567 308 : PQclear(res);
6568 :
6569 308 : destroyPQExpBuffer(query);
6570 308 : }
6571 :
6572 : /*
6573 : * getFuncs:
6574 : * get information about all user-defined functions in the system catalogs
6575 : */
6576 : void
6577 308 : getFuncs(Archive *fout)
6578 : {
6579 308 : DumpOptions *dopt = fout->dopt;
6580 : PGresult *res;
6581 : int ntups;
6582 : int i;
6583 308 : PQExpBuffer query = createPQExpBuffer();
6584 : FuncInfo *finfo;
6585 : int i_tableoid;
6586 : int i_oid;
6587 : int i_proname;
6588 : int i_pronamespace;
6589 : int i_proowner;
6590 : int i_prolang;
6591 : int i_pronargs;
6592 : int i_proargtypes;
6593 : int i_prorettype;
6594 : int i_proacl;
6595 : int i_acldefault;
6596 :
6597 : /*
6598 : * Find all interesting functions. This is a bit complicated:
6599 : *
6600 : * 1. Always exclude aggregates; those are handled elsewhere.
6601 : *
6602 : * 2. Always exclude functions that are internally dependent on something
6603 : * else, since presumably those will be created as a result of creating
6604 : * the something else. This currently acts only to suppress constructor
6605 : * functions for range types. Note this is OK only because the
6606 : * constructors don't have any dependencies the range type doesn't have;
6607 : * otherwise we might not get creation ordering correct.
6608 : *
6609 : * 3. Otherwise, we normally exclude functions in pg_catalog. However, if
6610 : * they're members of extensions and we are in binary-upgrade mode then
6611 : * include them, since we want to dump extension members individually in
6612 : * that mode. Also, if they are used by casts or transforms then we need
6613 : * to gather the information about them, though they won't be dumped if
6614 : * they are built-in. Also, in 9.6 and up, include functions in
6615 : * pg_catalog if they have an ACL different from what's shown in
6616 : * pg_init_privs (so we have to join to pg_init_privs; annoying).
6617 : */
6618 308 : if (fout->remoteVersion >= 90600)
6619 : {
6620 : const char *not_agg_check;
6621 :
6622 616 : not_agg_check = (fout->remoteVersion >= 110000 ? "p.prokind <> 'a'"
6623 308 : : "NOT p.proisagg");
6624 :
6625 308 : appendPQExpBuffer(query,
6626 : "SELECT p.tableoid, p.oid, p.proname, p.prolang, "
6627 : "p.pronargs, p.proargtypes, p.prorettype, "
6628 : "p.proacl, "
6629 : "acldefault('f', p.proowner) AS acldefault, "
6630 : "p.pronamespace, "
6631 : "p.proowner "
6632 : "FROM pg_proc p "
6633 : "LEFT JOIN pg_init_privs pip ON "
6634 : "(p.oid = pip.objoid "
6635 : "AND pip.classoid = 'pg_proc'::regclass "
6636 : "AND pip.objsubid = 0) "
6637 : "WHERE %s"
6638 : "\n AND NOT EXISTS (SELECT 1 FROM pg_depend "
6639 : "WHERE classid = 'pg_proc'::regclass AND "
6640 : "objid = p.oid AND deptype = 'i')"
6641 : "\n AND ("
6642 : "\n pronamespace != "
6643 : "(SELECT oid FROM pg_namespace "
6644 : "WHERE nspname = 'pg_catalog')"
6645 : "\n OR EXISTS (SELECT 1 FROM pg_cast"
6646 : "\n WHERE pg_cast.oid > %u "
6647 : "\n AND p.oid = pg_cast.castfunc)"
6648 : "\n OR EXISTS (SELECT 1 FROM pg_transform"
6649 : "\n WHERE pg_transform.oid > %u AND "
6650 : "\n (p.oid = pg_transform.trffromsql"
6651 : "\n OR p.oid = pg_transform.trftosql))",
6652 : not_agg_check,
6653 : g_last_builtin_oid,
6654 : g_last_builtin_oid);
6655 308 : if (dopt->binary_upgrade)
6656 28 : appendPQExpBufferStr(query,
6657 : "\n OR EXISTS(SELECT 1 FROM pg_depend WHERE "
6658 : "classid = 'pg_proc'::regclass AND "
6659 : "objid = p.oid AND "
6660 : "refclassid = 'pg_extension'::regclass AND "
6661 : "deptype = 'e')");
6662 308 : appendPQExpBufferStr(query,
6663 : "\n OR p.proacl IS DISTINCT FROM pip.initprivs");
6664 308 : appendPQExpBufferChar(query, ')');
6665 : }
6666 : else
6667 : {
6668 0 : appendPQExpBuffer(query,
6669 : "SELECT tableoid, oid, proname, prolang, "
6670 : "pronargs, proargtypes, prorettype, proacl, "
6671 : "acldefault('f', proowner) AS acldefault, "
6672 : "pronamespace, "
6673 : "proowner "
6674 : "FROM pg_proc p "
6675 : "WHERE NOT proisagg"
6676 : "\n AND NOT EXISTS (SELECT 1 FROM pg_depend "
6677 : "WHERE classid = 'pg_proc'::regclass AND "
6678 : "objid = p.oid AND deptype = 'i')"
6679 : "\n AND ("
6680 : "\n pronamespace != "
6681 : "(SELECT oid FROM pg_namespace "
6682 : "WHERE nspname = 'pg_catalog')"
6683 : "\n OR EXISTS (SELECT 1 FROM pg_cast"
6684 : "\n WHERE pg_cast.oid > '%u'::oid"
6685 : "\n AND p.oid = pg_cast.castfunc)",
6686 : g_last_builtin_oid);
6687 :
6688 0 : if (fout->remoteVersion >= 90500)
6689 0 : appendPQExpBuffer(query,
6690 : "\n OR EXISTS (SELECT 1 FROM pg_transform"
6691 : "\n WHERE pg_transform.oid > '%u'::oid"
6692 : "\n AND (p.oid = pg_transform.trffromsql"
6693 : "\n OR p.oid = pg_transform.trftosql))",
6694 : g_last_builtin_oid);
6695 :
6696 0 : if (dopt->binary_upgrade)
6697 0 : appendPQExpBufferStr(query,
6698 : "\n OR EXISTS(SELECT 1 FROM pg_depend WHERE "
6699 : "classid = 'pg_proc'::regclass AND "
6700 : "objid = p.oid AND "
6701 : "refclassid = 'pg_extension'::regclass AND "
6702 : "deptype = 'e')");
6703 0 : appendPQExpBufferChar(query, ')');
6704 : }
6705 :
6706 308 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
6707 :
6708 308 : ntups = PQntuples(res);
6709 :
6710 308 : finfo = (FuncInfo *) pg_malloc0(ntups * sizeof(FuncInfo));
6711 :
6712 308 : i_tableoid = PQfnumber(res, "tableoid");
6713 308 : i_oid = PQfnumber(res, "oid");
6714 308 : i_proname = PQfnumber(res, "proname");
6715 308 : i_pronamespace = PQfnumber(res, "pronamespace");
6716 308 : i_proowner = PQfnumber(res, "proowner");
6717 308 : i_prolang = PQfnumber(res, "prolang");
6718 308 : i_pronargs = PQfnumber(res, "pronargs");
6719 308 : i_proargtypes = PQfnumber(res, "proargtypes");
6720 308 : i_prorettype = PQfnumber(res, "prorettype");
6721 308 : i_proacl = PQfnumber(res, "proacl");
6722 308 : i_acldefault = PQfnumber(res, "acldefault");
6723 :
6724 8864 : for (i = 0; i < ntups; i++)
6725 : {
6726 8556 : finfo[i].dobj.objType = DO_FUNC;
6727 8556 : finfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
6728 8556 : finfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
6729 8556 : AssignDumpId(&finfo[i].dobj);
6730 8556 : finfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_proname));
6731 17112 : finfo[i].dobj.namespace =
6732 8556 : findNamespace(atooid(PQgetvalue(res, i, i_pronamespace)));
6733 8556 : finfo[i].dacl.acl = pg_strdup(PQgetvalue(res, i, i_proacl));
6734 8556 : finfo[i].dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault));
6735 8556 : finfo[i].dacl.privtype = 0;
6736 8556 : finfo[i].dacl.initprivs = NULL;
6737 8556 : finfo[i].rolname = getRoleName(PQgetvalue(res, i, i_proowner));
6738 8556 : finfo[i].lang = atooid(PQgetvalue(res, i, i_prolang));
6739 8556 : finfo[i].prorettype = atooid(PQgetvalue(res, i, i_prorettype));
6740 8556 : finfo[i].nargs = atoi(PQgetvalue(res, i, i_pronargs));
6741 8556 : if (finfo[i].nargs == 0)
6742 2006 : finfo[i].argtypes = NULL;
6743 : else
6744 : {
6745 6550 : finfo[i].argtypes = (Oid *) pg_malloc(finfo[i].nargs * sizeof(Oid));
6746 6550 : parseOidArray(PQgetvalue(res, i, i_proargtypes),
6747 6550 : finfo[i].argtypes, finfo[i].nargs);
6748 : }
6749 8556 : finfo[i].postponed_def = false; /* might get set during sort */
6750 :
6751 : /* Decide whether we want to dump it */
6752 8556 : selectDumpableObject(&(finfo[i].dobj), fout);
6753 :
6754 : /* Mark whether function has an ACL */
6755 8556 : if (!PQgetisnull(res, i, i_proacl))
6756 272 : finfo[i].dobj.components |= DUMP_COMPONENT_ACL;
6757 : }
6758 :
6759 308 : PQclear(res);
6760 :
6761 308 : destroyPQExpBuffer(query);
6762 308 : }
6763 :
6764 : /*
6765 : * getTables
6766 : * read all the tables (no indexes) in the system catalogs,
6767 : * and return them as an array of TableInfo structures
6768 : *
6769 : * *numTables is set to the number of tables read in
6770 : */
6771 : TableInfo *
6772 310 : getTables(Archive *fout, int *numTables)
6773 : {
6774 310 : DumpOptions *dopt = fout->dopt;
6775 : PGresult *res;
6776 : int ntups;
6777 : int i;
6778 310 : PQExpBuffer query = createPQExpBuffer();
6779 : TableInfo *tblinfo;
6780 : int i_reltableoid;
6781 : int i_reloid;
6782 : int i_relname;
6783 : int i_relnamespace;
6784 : int i_relkind;
6785 : int i_reltype;
6786 : int i_relowner;
6787 : int i_relchecks;
6788 : int i_relhasindex;
6789 : int i_relhasrules;
6790 : int i_relpages;
6791 : int i_toastpages;
6792 : int i_owning_tab;
6793 : int i_owning_col;
6794 : int i_reltablespace;
6795 : int i_relhasoids;
6796 : int i_relhastriggers;
6797 : int i_relpersistence;
6798 : int i_relispopulated;
6799 : int i_relreplident;
6800 : int i_relrowsec;
6801 : int i_relforcerowsec;
6802 : int i_relfrozenxid;
6803 : int i_toastfrozenxid;
6804 : int i_toastoid;
6805 : int i_relminmxid;
6806 : int i_toastminmxid;
6807 : int i_reloptions;
6808 : int i_checkoption;
6809 : int i_toastreloptions;
6810 : int i_reloftype;
6811 : int i_foreignserver;
6812 : int i_amname;
6813 : int i_is_identity_sequence;
6814 : int i_relacl;
6815 : int i_acldefault;
6816 : int i_ispartition;
6817 :
6818 : /*
6819 : * Find all the tables and table-like objects.
6820 : *
6821 : * We must fetch all tables in this phase because otherwise we cannot
6822 : * correctly identify inherited columns, owned sequences, etc.
6823 : *
6824 : * We include system catalogs, so that we can work if a user table is
6825 : * defined to inherit from a system catalog (pretty weird, but...)
6826 : *
6827 : * Note: in this phase we should collect only a minimal amount of
6828 : * information about each table, basically just enough to decide if it is
6829 : * interesting. In particular, since we do not yet have lock on any user
6830 : * table, we MUST NOT invoke any server-side data collection functions
6831 : * (for instance, pg_get_partkeydef()). Those are likely to fail or give
6832 : * wrong answers if any concurrent DDL is happening.
6833 : */
6834 :
6835 310 : appendPQExpBufferStr(query,
6836 : "SELECT c.tableoid, c.oid, c.relname, "
6837 : "c.relnamespace, c.relkind, c.reltype, "
6838 : "c.relowner, "
6839 : "c.relchecks, "
6840 : "c.relhasindex, c.relhasrules, c.relpages, "
6841 : "c.relhastriggers, "
6842 : "c.relpersistence, "
6843 : "c.reloftype, "
6844 : "c.relacl, "
6845 : "acldefault(CASE WHEN c.relkind = " CppAsString2(RELKIND_SEQUENCE)
6846 : " THEN 's'::\"char\" ELSE 'r'::\"char\" END, c.relowner) AS acldefault, "
6847 : "CASE WHEN c.relkind = " CppAsString2(RELKIND_FOREIGN_TABLE) " THEN "
6848 : "(SELECT ftserver FROM pg_catalog.pg_foreign_table WHERE ftrelid = c.oid) "
6849 : "ELSE 0 END AS foreignserver, "
6850 : "c.relfrozenxid, tc.relfrozenxid AS tfrozenxid, "
6851 : "tc.oid AS toid, "
6852 : "tc.relpages AS toastpages, "
6853 : "tc.reloptions AS toast_reloptions, "
6854 : "d.refobjid AS owning_tab, "
6855 : "d.refobjsubid AS owning_col, "
6856 : "tsp.spcname AS reltablespace, ");
6857 :
6858 310 : if (fout->remoteVersion >= 120000)
6859 310 : appendPQExpBufferStr(query,
6860 : "false AS relhasoids, ");
6861 : else
6862 0 : appendPQExpBufferStr(query,
6863 : "c.relhasoids, ");
6864 :
6865 310 : if (fout->remoteVersion >= 90300)
6866 310 : appendPQExpBufferStr(query,
6867 : "c.relispopulated, ");
6868 : else
6869 0 : appendPQExpBufferStr(query,
6870 : "'t' as relispopulated, ");
6871 :
6872 310 : if (fout->remoteVersion >= 90400)
6873 310 : appendPQExpBufferStr(query,
6874 : "c.relreplident, ");
6875 : else
6876 0 : appendPQExpBufferStr(query,
6877 : "'d' AS relreplident, ");
6878 :
6879 310 : if (fout->remoteVersion >= 90500)
6880 310 : appendPQExpBufferStr(query,
6881 : "c.relrowsecurity, c.relforcerowsecurity, ");
6882 : else
6883 0 : appendPQExpBufferStr(query,
6884 : "false AS relrowsecurity, "
6885 : "false AS relforcerowsecurity, ");
6886 :
6887 310 : if (fout->remoteVersion >= 90300)
6888 310 : appendPQExpBufferStr(query,
6889 : "c.relminmxid, tc.relminmxid AS tminmxid, ");
6890 : else
6891 0 : appendPQExpBufferStr(query,
6892 : "0 AS relminmxid, 0 AS tminmxid, ");
6893 :
6894 310 : if (fout->remoteVersion >= 90300)
6895 310 : appendPQExpBufferStr(query,
6896 : "array_remove(array_remove(c.reloptions,'check_option=local'),'check_option=cascaded') AS reloptions, "
6897 : "CASE WHEN 'check_option=local' = ANY (c.reloptions) THEN 'LOCAL'::text "
6898 : "WHEN 'check_option=cascaded' = ANY (c.reloptions) THEN 'CASCADED'::text ELSE NULL END AS checkoption, ");
6899 : else
6900 0 : appendPQExpBufferStr(query,
6901 : "c.reloptions, NULL AS checkoption, ");
6902 :
6903 310 : if (fout->remoteVersion >= 90600)
6904 310 : appendPQExpBufferStr(query,
6905 : "am.amname, ");
6906 : else
6907 0 : appendPQExpBufferStr(query,
6908 : "NULL AS amname, ");
6909 :
6910 310 : if (fout->remoteVersion >= 90600)
6911 310 : appendPQExpBufferStr(query,
6912 : "(d.deptype = 'i') IS TRUE AS is_identity_sequence, ");
6913 : else
6914 0 : appendPQExpBufferStr(query,
6915 : "false AS is_identity_sequence, ");
6916 :
6917 310 : if (fout->remoteVersion >= 100000)
6918 310 : appendPQExpBufferStr(query,
6919 : "c.relispartition AS ispartition ");
6920 : else
6921 0 : appendPQExpBufferStr(query,
6922 : "false AS ispartition ");
6923 :
6924 : /*
6925 : * Left join to pg_depend to pick up dependency info linking sequences to
6926 : * their owning column, if any (note this dependency is AUTO except for
6927 : * identity sequences, where it's INTERNAL). Also join to pg_tablespace to
6928 : * collect the spcname.
6929 : */
6930 310 : appendPQExpBufferStr(query,
6931 : "\nFROM pg_class c\n"
6932 : "LEFT JOIN pg_depend d ON "
6933 : "(c.relkind = " CppAsString2(RELKIND_SEQUENCE) " AND "
6934 : "d.classid = 'pg_class'::regclass AND d.objid = c.oid AND "
6935 : "d.objsubid = 0 AND "
6936 : "d.refclassid = 'pg_class'::regclass AND d.deptype IN ('a', 'i'))\n"
6937 : "LEFT JOIN pg_tablespace tsp ON (tsp.oid = c.reltablespace)\n");
6938 :
6939 : /*
6940 : * In 9.6 and up, left join to pg_am to pick up the amname.
6941 : */
6942 310 : if (fout->remoteVersion >= 90600)
6943 310 : appendPQExpBufferStr(query,
6944 : "LEFT JOIN pg_am am ON (c.relam = am.oid)\n");
6945 :
6946 : /*
6947 : * We purposefully ignore toast OIDs for partitioned tables; the reason is
6948 : * that versions 10 and 11 have them, but later versions do not, so
6949 : * emitting them causes the upgrade to fail.
6950 : */
6951 310 : appendPQExpBufferStr(query,
6952 : "LEFT JOIN pg_class tc ON (c.reltoastrelid = tc.oid"
6953 : " AND tc.relkind = " CppAsString2(RELKIND_TOASTVALUE)
6954 : " AND c.relkind <> " CppAsString2(RELKIND_PARTITIONED_TABLE) ")\n");
6955 :
6956 : /*
6957 : * Restrict to interesting relkinds (in particular, not indexes). Not all
6958 : * relkinds are possible in older servers, but it's not worth the trouble
6959 : * to emit a version-dependent list.
6960 : *
6961 : * Composite-type table entries won't be dumped as such, but we have to
6962 : * make a DumpableObject for them so that we can track dependencies of the
6963 : * composite type (pg_depend entries for columns of the composite type
6964 : * link to the pg_class entry not the pg_type entry).
6965 : */
6966 310 : appendPQExpBufferStr(query,
6967 : "WHERE c.relkind IN ("
6968 : CppAsString2(RELKIND_RELATION) ", "
6969 : CppAsString2(RELKIND_SEQUENCE) ", "
6970 : CppAsString2(RELKIND_VIEW) ", "
6971 : CppAsString2(RELKIND_COMPOSITE_TYPE) ", "
6972 : CppAsString2(RELKIND_MATVIEW) ", "
6973 : CppAsString2(RELKIND_FOREIGN_TABLE) ", "
6974 : CppAsString2(RELKIND_PARTITIONED_TABLE) ")\n"
6975 : "ORDER BY c.oid");
6976 :
6977 310 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
6978 :
6979 310 : ntups = PQntuples(res);
6980 :
6981 310 : *numTables = ntups;
6982 :
6983 : /*
6984 : * Extract data from result and lock dumpable tables. We do the locking
6985 : * before anything else, to minimize the window wherein a table could
6986 : * disappear under us.
6987 : *
6988 : * Note that we have to save info about all tables here, even when dumping
6989 : * only one, because we don't yet know which tables might be inheritance
6990 : * ancestors of the target table.
6991 : */
6992 310 : tblinfo = (TableInfo *) pg_malloc0(ntups * sizeof(TableInfo));
6993 :
6994 310 : i_reltableoid = PQfnumber(res, "tableoid");
6995 310 : i_reloid = PQfnumber(res, "oid");
6996 310 : i_relname = PQfnumber(res, "relname");
6997 310 : i_relnamespace = PQfnumber(res, "relnamespace");
6998 310 : i_relkind = PQfnumber(res, "relkind");
6999 310 : i_reltype = PQfnumber(res, "reltype");
7000 310 : i_relowner = PQfnumber(res, "relowner");
7001 310 : i_relchecks = PQfnumber(res, "relchecks");
7002 310 : i_relhasindex = PQfnumber(res, "relhasindex");
7003 310 : i_relhasrules = PQfnumber(res, "relhasrules");
7004 310 : i_relpages = PQfnumber(res, "relpages");
7005 310 : i_toastpages = PQfnumber(res, "toastpages");
7006 310 : i_owning_tab = PQfnumber(res, "owning_tab");
7007 310 : i_owning_col = PQfnumber(res, "owning_col");
7008 310 : i_reltablespace = PQfnumber(res, "reltablespace");
7009 310 : i_relhasoids = PQfnumber(res, "relhasoids");
7010 310 : i_relhastriggers = PQfnumber(res, "relhastriggers");
7011 310 : i_relpersistence = PQfnumber(res, "relpersistence");
7012 310 : i_relispopulated = PQfnumber(res, "relispopulated");
7013 310 : i_relreplident = PQfnumber(res, "relreplident");
7014 310 : i_relrowsec = PQfnumber(res, "relrowsecurity");
7015 310 : i_relforcerowsec = PQfnumber(res, "relforcerowsecurity");
7016 310 : i_relfrozenxid = PQfnumber(res, "relfrozenxid");
7017 310 : i_toastfrozenxid = PQfnumber(res, "tfrozenxid");
7018 310 : i_toastoid = PQfnumber(res, "toid");
7019 310 : i_relminmxid = PQfnumber(res, "relminmxid");
7020 310 : i_toastminmxid = PQfnumber(res, "tminmxid");
7021 310 : i_reloptions = PQfnumber(res, "reloptions");
7022 310 : i_checkoption = PQfnumber(res, "checkoption");
7023 310 : i_toastreloptions = PQfnumber(res, "toast_reloptions");
7024 310 : i_reloftype = PQfnumber(res, "reloftype");
7025 310 : i_foreignserver = PQfnumber(res, "foreignserver");
7026 310 : i_amname = PQfnumber(res, "amname");
7027 310 : i_is_identity_sequence = PQfnumber(res, "is_identity_sequence");
7028 310 : i_relacl = PQfnumber(res, "relacl");
7029 310 : i_acldefault = PQfnumber(res, "acldefault");
7030 310 : i_ispartition = PQfnumber(res, "ispartition");
7031 :
7032 310 : if (dopt->lockWaitTimeout)
7033 : {
7034 : /*
7035 : * Arrange to fail instead of waiting forever for a table lock.
7036 : *
7037 : * NB: this coding assumes that the only queries issued within the
7038 : * following loop are LOCK TABLEs; else the timeout may be undesirably
7039 : * applied to other things too.
7040 : */
7041 4 : resetPQExpBuffer(query);
7042 4 : appendPQExpBufferStr(query, "SET statement_timeout = ");
7043 4 : appendStringLiteralConn(query, dopt->lockWaitTimeout, GetConnection(fout));
7044 4 : ExecuteSqlStatement(fout, query->data);
7045 : }
7046 :
7047 310 : resetPQExpBuffer(query);
7048 :
7049 81498 : for (i = 0; i < ntups; i++)
7050 : {
7051 81188 : tblinfo[i].dobj.objType = DO_TABLE;
7052 81188 : tblinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_reltableoid));
7053 81188 : tblinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_reloid));
7054 81188 : AssignDumpId(&tblinfo[i].dobj);
7055 81188 : tblinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_relname));
7056 162376 : tblinfo[i].dobj.namespace =
7057 81188 : findNamespace(atooid(PQgetvalue(res, i, i_relnamespace)));
7058 81188 : tblinfo[i].dacl.acl = pg_strdup(PQgetvalue(res, i, i_relacl));
7059 81188 : tblinfo[i].dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault));
7060 81188 : tblinfo[i].dacl.privtype = 0;
7061 81188 : tblinfo[i].dacl.initprivs = NULL;
7062 81188 : tblinfo[i].relkind = *(PQgetvalue(res, i, i_relkind));
7063 81188 : tblinfo[i].reltype = atooid(PQgetvalue(res, i, i_reltype));
7064 81188 : tblinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_relowner));
7065 81188 : tblinfo[i].ncheck = atoi(PQgetvalue(res, i, i_relchecks));
7066 81188 : tblinfo[i].hasindex = (strcmp(PQgetvalue(res, i, i_relhasindex), "t") == 0);
7067 81188 : tblinfo[i].hasrules = (strcmp(PQgetvalue(res, i, i_relhasrules), "t") == 0);
7068 81188 : tblinfo[i].relpages = atoi(PQgetvalue(res, i, i_relpages));
7069 81188 : if (PQgetisnull(res, i, i_toastpages))
7070 63966 : tblinfo[i].toastpages = 0;
7071 : else
7072 17222 : tblinfo[i].toastpages = atoi(PQgetvalue(res, i, i_toastpages));
7073 81188 : if (PQgetisnull(res, i, i_owning_tab))
7074 : {
7075 80440 : tblinfo[i].owning_tab = InvalidOid;
7076 80440 : tblinfo[i].owning_col = 0;
7077 : }
7078 : else
7079 : {
7080 748 : tblinfo[i].owning_tab = atooid(PQgetvalue(res, i, i_owning_tab));
7081 748 : tblinfo[i].owning_col = atoi(PQgetvalue(res, i, i_owning_col));
7082 : }
7083 81188 : tblinfo[i].reltablespace = pg_strdup(PQgetvalue(res, i, i_reltablespace));
7084 81188 : tblinfo[i].hasoids = (strcmp(PQgetvalue(res, i, i_relhasoids), "t") == 0);
7085 81188 : tblinfo[i].hastriggers = (strcmp(PQgetvalue(res, i, i_relhastriggers), "t") == 0);
7086 81188 : tblinfo[i].relpersistence = *(PQgetvalue(res, i, i_relpersistence));
7087 81188 : tblinfo[i].relispopulated = (strcmp(PQgetvalue(res, i, i_relispopulated), "t") == 0);
7088 81188 : tblinfo[i].relreplident = *(PQgetvalue(res, i, i_relreplident));
7089 81188 : tblinfo[i].rowsec = (strcmp(PQgetvalue(res, i, i_relrowsec), "t") == 0);
7090 81188 : tblinfo[i].forcerowsec = (strcmp(PQgetvalue(res, i, i_relforcerowsec), "t") == 0);
7091 81188 : tblinfo[i].frozenxid = atooid(PQgetvalue(res, i, i_relfrozenxid));
7092 81188 : tblinfo[i].toast_frozenxid = atooid(PQgetvalue(res, i, i_toastfrozenxid));
7093 81188 : tblinfo[i].toast_oid = atooid(PQgetvalue(res, i, i_toastoid));
7094 81188 : tblinfo[i].minmxid = atooid(PQgetvalue(res, i, i_relminmxid));
7095 81188 : tblinfo[i].toast_minmxid = atooid(PQgetvalue(res, i, i_toastminmxid));
7096 81188 : tblinfo[i].reloptions = pg_strdup(PQgetvalue(res, i, i_reloptions));
7097 81188 : if (PQgetisnull(res, i, i_checkoption))
7098 81100 : tblinfo[i].checkoption = NULL;
7099 : else
7100 88 : tblinfo[i].checkoption = pg_strdup(PQgetvalue(res, i, i_checkoption));
7101 81188 : tblinfo[i].toast_reloptions = pg_strdup(PQgetvalue(res, i, i_toastreloptions));
7102 81188 : tblinfo[i].reloftype = atooid(PQgetvalue(res, i, i_reloftype));
7103 81188 : tblinfo[i].foreign_server = atooid(PQgetvalue(res, i, i_foreignserver));
7104 81188 : if (PQgetisnull(res, i, i_amname))
7105 48070 : tblinfo[i].amname = NULL;
7106 : else
7107 33118 : tblinfo[i].amname = pg_strdup(PQgetvalue(res, i, i_amname));
7108 81188 : tblinfo[i].is_identity_sequence = (strcmp(PQgetvalue(res, i, i_is_identity_sequence), "t") == 0);
7109 81188 : tblinfo[i].ispartition = (strcmp(PQgetvalue(res, i, i_ispartition), "t") == 0);
7110 :
7111 : /* other fields were zeroed above */
7112 :
7113 : /*
7114 : * Decide whether we want to dump this table.
7115 : */
7116 81188 : if (tblinfo[i].relkind == RELKIND_COMPOSITE_TYPE)
7117 362 : tblinfo[i].dobj.dump = DUMP_COMPONENT_NONE;
7118 : else
7119 80826 : selectDumpableTable(&tblinfo[i], fout);
7120 :
7121 : /*
7122 : * Now, consider the table "interesting" if we need to dump its
7123 : * definition or its data. Later on, we'll skip a lot of data
7124 : * collection for uninteresting tables.
7125 : *
7126 : * Note: the "interesting" flag will also be set by flagInhTables for
7127 : * parents of interesting tables, so that we collect necessary
7128 : * inheritance info even when the parents are not themselves being
7129 : * dumped. This is the main reason why we need an "interesting" flag
7130 : * that's separate from the components-to-dump bitmask.
7131 : */
7132 81188 : tblinfo[i].interesting = (tblinfo[i].dobj.dump &
7133 : (DUMP_COMPONENT_DEFINITION |
7134 81188 : DUMP_COMPONENT_DATA)) != 0;
7135 :
7136 81188 : tblinfo[i].dummy_view = false; /* might get set during sort */
7137 81188 : tblinfo[i].postponed_def = false; /* might get set during sort */
7138 :
7139 : /* Tables have data */
7140 81188 : tblinfo[i].dobj.components |= DUMP_COMPONENT_DATA;
7141 :
7142 : /* Mark whether table has an ACL */
7143 81188 : if (!PQgetisnull(res, i, i_relacl))
7144 64352 : tblinfo[i].dobj.components |= DUMP_COMPONENT_ACL;
7145 81188 : tblinfo[i].hascolumnACLs = false; /* may get set later */
7146 :
7147 : /*
7148 : * Read-lock target tables to make sure they aren't DROPPED or altered
7149 : * in schema before we get around to dumping them.
7150 : *
7151 : * Note that we don't explicitly lock parents of the target tables; we
7152 : * assume our lock on the child is enough to prevent schema
7153 : * alterations to parent tables.
7154 : *
7155 : * NOTE: it'd be kinda nice to lock other relations too, not only
7156 : * plain or partitioned tables, but the backend doesn't presently
7157 : * allow that.
7158 : *
7159 : * We only need to lock the table for certain components; see
7160 : * pg_dump.h
7161 : */
7162 81188 : if ((tblinfo[i].dobj.dump & DUMP_COMPONENTS_REQUIRING_LOCK) &&
7163 12080 : (tblinfo[i].relkind == RELKIND_RELATION ||
7164 3616 : tblinfo[i].relkind == RELKIND_PARTITIONED_TABLE))
7165 : {
7166 : /*
7167 : * Tables are locked in batches. When dumping from a remote
7168 : * server this can save a significant amount of time by reducing
7169 : * the number of round trips.
7170 : */
7171 9514 : if (query->len == 0)
7172 198 : appendPQExpBuffer(query, "LOCK TABLE %s",
7173 198 : fmtQualifiedDumpable(&tblinfo[i]));
7174 : else
7175 : {
7176 9316 : appendPQExpBuffer(query, ", %s",
7177 9316 : fmtQualifiedDumpable(&tblinfo[i]));
7178 :
7179 : /* Arbitrarily end a batch when query length reaches 100K. */
7180 9316 : if (query->len >= 100000)
7181 : {
7182 : /* Lock another batch of tables. */
7183 0 : appendPQExpBufferStr(query, " IN ACCESS SHARE MODE");
7184 0 : ExecuteSqlStatement(fout, query->data);
7185 0 : resetPQExpBuffer(query);
7186 : }
7187 : }
7188 : }
7189 : }
7190 :
7191 310 : if (query->len != 0)
7192 : {
7193 : /* Lock the tables in the last batch. */
7194 198 : appendPQExpBufferStr(query, " IN ACCESS SHARE MODE");
7195 198 : ExecuteSqlStatement(fout, query->data);
7196 : }
7197 :
7198 308 : if (dopt->lockWaitTimeout)
7199 : {
7200 4 : ExecuteSqlStatement(fout, "SET statement_timeout = 0");
7201 : }
7202 :
7203 308 : PQclear(res);
7204 :
7205 308 : destroyPQExpBuffer(query);
7206 :
7207 308 : return tblinfo;
7208 : }
7209 :
7210 : /*
7211 : * getOwnedSeqs
7212 : * identify owned sequences and mark them as dumpable if owning table is
7213 : *
7214 : * We used to do this in getTables(), but it's better to do it after the
7215 : * index used by findTableByOid() has been set up.
7216 : */
7217 : void
7218 308 : getOwnedSeqs(Archive *fout, TableInfo tblinfo[], int numTables)
7219 : {
7220 : int i;
7221 :
7222 : /*
7223 : * Force sequences that are "owned" by table columns to be dumped whenever
7224 : * their owning table is being dumped.
7225 : */
7226 80978 : for (i = 0; i < numTables; i++)
7227 : {
7228 80670 : TableInfo *seqinfo = &tblinfo[i];
7229 : TableInfo *owning_tab;
7230 :
7231 80670 : if (!OidIsValid(seqinfo->owning_tab))
7232 79928 : continue; /* not an owned sequence */
7233 :
7234 742 : owning_tab = findTableByOid(seqinfo->owning_tab);
7235 742 : if (owning_tab == NULL)
7236 0 : pg_fatal("failed sanity check, parent table with OID %u of sequence with OID %u not found",
7237 : seqinfo->owning_tab, seqinfo->dobj.catId.oid);
7238 :
7239 : /*
7240 : * Only dump identity sequences if we're going to dump the table that
7241 : * it belongs to.
7242 : */
7243 742 : if (owning_tab->dobj.dump == DUMP_COMPONENT_NONE &&
7244 176 : seqinfo->is_identity_sequence)
7245 : {
7246 102 : seqinfo->dobj.dump = DUMP_COMPONENT_NONE;
7247 102 : continue;
7248 : }
7249 :
7250 : /*
7251 : * Otherwise we need to dump the components that are being dumped for
7252 : * the table and any components which the sequence is explicitly
7253 : * marked with.
7254 : *
7255 : * We can't simply use the set of components which are being dumped
7256 : * for the table as the table might be in an extension (and only the
7257 : * non-extension components, eg: ACLs if changed, security labels, and
7258 : * policies, are being dumped) while the sequence is not (and
7259 : * therefore the definition and other components should also be
7260 : * dumped).
7261 : *
7262 : * If the sequence is part of the extension then it should be properly
7263 : * marked by checkExtensionMembership() and this will be a no-op as
7264 : * the table will be equivalently marked.
7265 : */
7266 640 : seqinfo->dobj.dump = seqinfo->dobj.dump | owning_tab->dobj.dump;
7267 :
7268 640 : if (seqinfo->dobj.dump != DUMP_COMPONENT_NONE)
7269 570 : seqinfo->interesting = true;
7270 : }
7271 308 : }
7272 :
7273 : /*
7274 : * getInherits
7275 : * read all the inheritance information
7276 : * from the system catalogs return them in the InhInfo* structure
7277 : *
7278 : * numInherits is set to the number of pairs read in
7279 : */
7280 : InhInfo *
7281 308 : getInherits(Archive *fout, int *numInherits)
7282 : {
7283 : PGresult *res;
7284 : int ntups;
7285 : int i;
7286 308 : PQExpBuffer query = createPQExpBuffer();
7287 : InhInfo *inhinfo;
7288 :
7289 : int i_inhrelid;
7290 : int i_inhparent;
7291 :
7292 : /* find all the inheritance information */
7293 308 : appendPQExpBufferStr(query, "SELECT inhrelid, inhparent FROM pg_inherits");
7294 :
7295 308 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
7296 :
7297 308 : ntups = PQntuples(res);
7298 :
7299 308 : *numInherits = ntups;
7300 :
7301 308 : inhinfo = (InhInfo *) pg_malloc(ntups * sizeof(InhInfo));
7302 :
7303 308 : i_inhrelid = PQfnumber(res, "inhrelid");
7304 308 : i_inhparent = PQfnumber(res, "inhparent");
7305 :
7306 5984 : for (i = 0; i < ntups; i++)
7307 : {
7308 5676 : inhinfo[i].inhrelid = atooid(PQgetvalue(res, i, i_inhrelid));
7309 5676 : inhinfo[i].inhparent = atooid(PQgetvalue(res, i, i_inhparent));
7310 : }
7311 :
7312 308 : PQclear(res);
7313 :
7314 308 : destroyPQExpBuffer(query);
7315 :
7316 308 : return inhinfo;
7317 : }
7318 :
7319 : /*
7320 : * getPartitioningInfo
7321 : * get information about partitioning
7322 : *
7323 : * For the most part, we only collect partitioning info about tables we
7324 : * intend to dump. However, this function has to consider all partitioned
7325 : * tables in the database, because we need to know about parents of partitions
7326 : * we are going to dump even if the parents themselves won't be dumped.
7327 : *
7328 : * Specifically, what we need to know is whether each partitioned table
7329 : * has an "unsafe" partitioning scheme that requires us to force
7330 : * load-via-partition-root mode for its children. Currently the only case
7331 : * for which we force that is hash partitioning on enum columns, since the
7332 : * hash codes depend on enum value OIDs which won't be replicated across
7333 : * dump-and-reload. There are other cases in which load-via-partition-root
7334 : * might be necessary, but we expect users to cope with them.
7335 : */
7336 : void
7337 308 : getPartitioningInfo(Archive *fout)
7338 : {
7339 : PQExpBuffer query;
7340 : PGresult *res;
7341 : int ntups;
7342 :
7343 : /* hash partitioning didn't exist before v11 */
7344 308 : if (fout->remoteVersion < 110000)
7345 0 : return;
7346 : /* needn't bother if schema-only dump */
7347 308 : if (fout->dopt->schemaOnly)
7348 32 : return;
7349 :
7350 276 : query = createPQExpBuffer();
7351 :
7352 : /*
7353 : * Unsafe partitioning schemes are exactly those for which hash enum_ops
7354 : * appears among the partition opclasses. We needn't check partstrat.
7355 : *
7356 : * Note that this query may well retrieve info about tables we aren't
7357 : * going to dump and hence have no lock on. That's okay since we need not
7358 : * invoke any unsafe server-side functions.
7359 : */
7360 276 : appendPQExpBufferStr(query,
7361 : "SELECT partrelid FROM pg_partitioned_table WHERE\n"
7362 : "(SELECT c.oid FROM pg_opclass c JOIN pg_am a "
7363 : "ON c.opcmethod = a.oid\n"
7364 : "WHERE opcname = 'enum_ops' "
7365 : "AND opcnamespace = 'pg_catalog'::regnamespace "
7366 : "AND amname = 'hash') = ANY(partclass)");
7367 :
7368 276 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
7369 :
7370 276 : ntups = PQntuples(res);
7371 :
7372 280 : for (int i = 0; i < ntups; i++)
7373 : {
7374 4 : Oid tabrelid = atooid(PQgetvalue(res, i, 0));
7375 : TableInfo *tbinfo;
7376 :
7377 4 : tbinfo = findTableByOid(tabrelid);
7378 4 : if (tbinfo == NULL)
7379 0 : pg_fatal("failed sanity check, table OID %u appearing in pg_partitioned_table not found",
7380 : tabrelid);
7381 4 : tbinfo->unsafe_partitions = true;
7382 : }
7383 :
7384 276 : PQclear(res);
7385 :
7386 276 : destroyPQExpBuffer(query);
7387 : }
7388 :
7389 : /*
7390 : * getIndexes
7391 : * get information about every index on a dumpable table
7392 : *
7393 : * Note: index data is not returned directly to the caller, but it
7394 : * does get entered into the DumpableObject tables.
7395 : */
7396 : void
7397 308 : getIndexes(Archive *fout, TableInfo tblinfo[], int numTables)
7398 : {
7399 308 : PQExpBuffer query = createPQExpBuffer();
7400 308 : PQExpBuffer tbloids = createPQExpBuffer();
7401 : PGresult *res;
7402 : int ntups;
7403 : int curtblindx;
7404 : IndxInfo *indxinfo;
7405 : int i_tableoid,
7406 : i_oid,
7407 : i_indrelid,
7408 : i_indexname,
7409 : i_parentidx,
7410 : i_indexdef,
7411 : i_indnkeyatts,
7412 : i_indnatts,
7413 : i_indkey,
7414 : i_indisclustered,
7415 : i_indisreplident,
7416 : i_indnullsnotdistinct,
7417 : i_contype,
7418 : i_conname,
7419 : i_condeferrable,
7420 : i_condeferred,
7421 : i_conperiod,
7422 : i_contableoid,
7423 : i_conoid,
7424 : i_condef,
7425 : i_tablespace,
7426 : i_indreloptions,
7427 : i_indstatcols,
7428 : i_indstatvals;
7429 :
7430 : /*
7431 : * We want to perform just one query against pg_index. However, we
7432 : * mustn't try to select every row of the catalog and then sort it out on
7433 : * the client side, because some of the server-side functions we need
7434 : * would be unsafe to apply to tables we don't have lock on. Hence, we
7435 : * build an array of the OIDs of tables we care about (and now have lock
7436 : * on!), and use a WHERE clause to constrain which rows are selected.
7437 : */
7438 308 : appendPQExpBufferChar(tbloids, '{');
7439 80978 : for (int i = 0; i < numTables; i++)
7440 : {
7441 80670 : TableInfo *tbinfo = &tblinfo[i];
7442 :
7443 80670 : if (!tbinfo->hasindex)
7444 56826 : continue;
7445 :
7446 : /*
7447 : * We can ignore indexes of uninteresting tables.
7448 : */
7449 23844 : if (!tbinfo->interesting)
7450 20336 : continue;
7451 :
7452 : /* OK, we need info for this table */
7453 3508 : if (tbloids->len > 1) /* do we have more than the '{'? */
7454 3356 : appendPQExpBufferChar(tbloids, ',');
7455 3508 : appendPQExpBuffer(tbloids, "%u", tbinfo->dobj.catId.oid);
7456 : }
7457 308 : appendPQExpBufferChar(tbloids, '}');
7458 :
7459 308 : appendPQExpBufferStr(query,
7460 : "SELECT t.tableoid, t.oid, i.indrelid, "
7461 : "t.relname AS indexname, "
7462 : "pg_catalog.pg_get_indexdef(i.indexrelid) AS indexdef, "
7463 : "i.indkey, i.indisclustered, "
7464 : "c.contype, c.conname, "
7465 : "c.condeferrable, c.condeferred, "
7466 : "c.tableoid AS contableoid, "
7467 : "c.oid AS conoid, "
7468 : "pg_catalog.pg_get_constraintdef(c.oid, false) AS condef, "
7469 : "(SELECT spcname FROM pg_catalog.pg_tablespace s WHERE s.oid = t.reltablespace) AS tablespace, "
7470 : "t.reloptions AS indreloptions, ");
7471 :
7472 :
7473 308 : if (fout->remoteVersion >= 90400)
7474 308 : appendPQExpBufferStr(query,
7475 : "i.indisreplident, ");
7476 : else
7477 0 : appendPQExpBufferStr(query,
7478 : "false AS indisreplident, ");
7479 :
7480 308 : if (fout->remoteVersion >= 110000)
7481 308 : appendPQExpBufferStr(query,
7482 : "inh.inhparent AS parentidx, "
7483 : "i.indnkeyatts AS indnkeyatts, "
7484 : "i.indnatts AS indnatts, "
7485 : "(SELECT pg_catalog.array_agg(attnum ORDER BY attnum) "
7486 : " FROM pg_catalog.pg_attribute "
7487 : " WHERE attrelid = i.indexrelid AND "
7488 : " attstattarget >= 0) AS indstatcols, "
7489 : "(SELECT pg_catalog.array_agg(attstattarget ORDER BY attnum) "
7490 : " FROM pg_catalog.pg_attribute "
7491 : " WHERE attrelid = i.indexrelid AND "
7492 : " attstattarget >= 0) AS indstatvals, ");
7493 : else
7494 0 : appendPQExpBufferStr(query,
7495 : "0 AS parentidx, "
7496 : "i.indnatts AS indnkeyatts, "
7497 : "i.indnatts AS indnatts, "
7498 : "'' AS indstatcols, "
7499 : "'' AS indstatvals, ");
7500 :
7501 308 : if (fout->remoteVersion >= 150000)
7502 308 : appendPQExpBufferStr(query,
7503 : "i.indnullsnotdistinct, ");
7504 : else
7505 0 : appendPQExpBufferStr(query,
7506 : "false AS indnullsnotdistinct, ");
7507 :
7508 308 : if (fout->remoteVersion >= 180000)
7509 308 : appendPQExpBufferStr(query,
7510 : "c.conperiod ");
7511 : else
7512 0 : appendPQExpBufferStr(query,
7513 : "NULL AS conperiod ");
7514 :
7515 : /*
7516 : * The point of the messy-looking outer join is to find a constraint that
7517 : * is related by an internal dependency link to the index. If we find one,
7518 : * create a CONSTRAINT entry linked to the INDEX entry. We assume an
7519 : * index won't have more than one internal dependency.
7520 : *
7521 : * Note: the check on conrelid is redundant, but useful because that
7522 : * column is indexed while conindid is not.
7523 : */
7524 308 : if (fout->remoteVersion >= 110000)
7525 : {
7526 308 : appendPQExpBuffer(query,
7527 : "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
7528 : "JOIN pg_catalog.pg_index i ON (src.tbloid = i.indrelid) "
7529 : "JOIN pg_catalog.pg_class t ON (t.oid = i.indexrelid) "
7530 : "JOIN pg_catalog.pg_class t2 ON (t2.oid = i.indrelid) "
7531 : "LEFT JOIN pg_catalog.pg_constraint c "
7532 : "ON (i.indrelid = c.conrelid AND "
7533 : "i.indexrelid = c.conindid AND "
7534 : "c.contype IN ('p','u','x')) "
7535 : "LEFT JOIN pg_catalog.pg_inherits inh "
7536 : "ON (inh.inhrelid = indexrelid) "
7537 : "WHERE (i.indisvalid OR t2.relkind = 'p') "
7538 : "AND i.indisready "
7539 : "ORDER BY i.indrelid, indexname",
7540 : tbloids->data);
7541 : }
7542 : else
7543 : {
7544 : /*
7545 : * the test on indisready is necessary in 9.2, and harmless in
7546 : * earlier/later versions
7547 : */
7548 0 : appendPQExpBuffer(query,
7549 : "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
7550 : "JOIN pg_catalog.pg_index i ON (src.tbloid = i.indrelid) "
7551 : "JOIN pg_catalog.pg_class t ON (t.oid = i.indexrelid) "
7552 : "LEFT JOIN pg_catalog.pg_constraint c "
7553 : "ON (i.indrelid = c.conrelid AND "
7554 : "i.indexrelid = c.conindid AND "
7555 : "c.contype IN ('p','u','x')) "
7556 : "WHERE i.indisvalid AND i.indisready "
7557 : "ORDER BY i.indrelid, indexname",
7558 : tbloids->data);
7559 : }
7560 :
7561 308 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
7562 :
7563 308 : ntups = PQntuples(res);
7564 :
7565 308 : i_tableoid = PQfnumber(res, "tableoid");
7566 308 : i_oid = PQfnumber(res, "oid");
7567 308 : i_indrelid = PQfnumber(res, "indrelid");
7568 308 : i_indexname = PQfnumber(res, "indexname");
7569 308 : i_parentidx = PQfnumber(res, "parentidx");
7570 308 : i_indexdef = PQfnumber(res, "indexdef");
7571 308 : i_indnkeyatts = PQfnumber(res, "indnkeyatts");
7572 308 : i_indnatts = PQfnumber(res, "indnatts");
7573 308 : i_indkey = PQfnumber(res, "indkey");
7574 308 : i_indisclustered = PQfnumber(res, "indisclustered");
7575 308 : i_indisreplident = PQfnumber(res, "indisreplident");
7576 308 : i_indnullsnotdistinct = PQfnumber(res, "indnullsnotdistinct");
7577 308 : i_contype = PQfnumber(res, "contype");
7578 308 : i_conname = PQfnumber(res, "conname");
7579 308 : i_condeferrable = PQfnumber(res, "condeferrable");
7580 308 : i_condeferred = PQfnumber(res, "condeferred");
7581 308 : i_conperiod = PQfnumber(res, "conperiod");
7582 308 : i_contableoid = PQfnumber(res, "contableoid");
7583 308 : i_conoid = PQfnumber(res, "conoid");
7584 308 : i_condef = PQfnumber(res, "condef");
7585 308 : i_tablespace = PQfnumber(res, "tablespace");
7586 308 : i_indreloptions = PQfnumber(res, "indreloptions");
7587 308 : i_indstatcols = PQfnumber(res, "indstatcols");
7588 308 : i_indstatvals = PQfnumber(res, "indstatvals");
7589 :
7590 308 : indxinfo = (IndxInfo *) pg_malloc(ntups * sizeof(IndxInfo));
7591 :
7592 : /*
7593 : * Outer loop iterates once per table, not once per row. Incrementing of
7594 : * j is handled by the inner loop.
7595 : */
7596 308 : curtblindx = -1;
7597 3808 : for (int j = 0; j < ntups;)
7598 : {
7599 3500 : Oid indrelid = atooid(PQgetvalue(res, j, i_indrelid));
7600 3500 : TableInfo *tbinfo = NULL;
7601 : int numinds;
7602 :
7603 : /* Count rows for this table */
7604 4682 : for (numinds = 1; numinds < ntups - j; numinds++)
7605 4530 : if (atooid(PQgetvalue(res, j + numinds, i_indrelid)) != indrelid)
7606 3348 : break;
7607 :
7608 : /*
7609 : * Locate the associated TableInfo; we rely on tblinfo[] being in OID
7610 : * order.
7611 : */
7612 42932 : while (++curtblindx < numTables)
7613 : {
7614 42932 : tbinfo = &tblinfo[curtblindx];
7615 42932 : if (tbinfo->dobj.catId.oid == indrelid)
7616 3500 : break;
7617 : }
7618 3500 : if (curtblindx >= numTables)
7619 0 : pg_fatal("unrecognized table OID %u", indrelid);
7620 : /* cross-check that we only got requested tables */
7621 3500 : if (!tbinfo->hasindex ||
7622 3500 : !tbinfo->interesting)
7623 0 : pg_fatal("unexpected index data for table \"%s\"",
7624 : tbinfo->dobj.name);
7625 :
7626 : /* Save data for this table */
7627 3500 : tbinfo->indexes = indxinfo + j;
7628 3500 : tbinfo->numIndexes = numinds;
7629 :
7630 8182 : for (int c = 0; c < numinds; c++, j++)
7631 : {
7632 : char contype;
7633 :
7634 4682 : indxinfo[j].dobj.objType = DO_INDEX;
7635 4682 : indxinfo[j].dobj.catId.tableoid = atooid(PQgetvalue(res, j, i_tableoid));
7636 4682 : indxinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
7637 4682 : AssignDumpId(&indxinfo[j].dobj);
7638 4682 : indxinfo[j].dobj.dump = tbinfo->dobj.dump;
7639 4682 : indxinfo[j].dobj.name = pg_strdup(PQgetvalue(res, j, i_indexname));
7640 4682 : indxinfo[j].dobj.namespace = tbinfo->dobj.namespace;
7641 4682 : indxinfo[j].indextable = tbinfo;
7642 4682 : indxinfo[j].indexdef = pg_strdup(PQgetvalue(res, j, i_indexdef));
7643 4682 : indxinfo[j].indnkeyattrs = atoi(PQgetvalue(res, j, i_indnkeyatts));
7644 4682 : indxinfo[j].indnattrs = atoi(PQgetvalue(res, j, i_indnatts));
7645 4682 : indxinfo[j].tablespace = pg_strdup(PQgetvalue(res, j, i_tablespace));
7646 4682 : indxinfo[j].indreloptions = pg_strdup(PQgetvalue(res, j, i_indreloptions));
7647 4682 : indxinfo[j].indstatcols = pg_strdup(PQgetvalue(res, j, i_indstatcols));
7648 4682 : indxinfo[j].indstatvals = pg_strdup(PQgetvalue(res, j, i_indstatvals));
7649 4682 : indxinfo[j].indkeys = (Oid *) pg_malloc(indxinfo[j].indnattrs * sizeof(Oid));
7650 4682 : parseOidArray(PQgetvalue(res, j, i_indkey),
7651 4682 : indxinfo[j].indkeys, indxinfo[j].indnattrs);
7652 4682 : indxinfo[j].indisclustered = (PQgetvalue(res, j, i_indisclustered)[0] == 't');
7653 4682 : indxinfo[j].indisreplident = (PQgetvalue(res, j, i_indisreplident)[0] == 't');
7654 4682 : indxinfo[j].indnullsnotdistinct = (PQgetvalue(res, j, i_indnullsnotdistinct)[0] == 't');
7655 4682 : indxinfo[j].parentidx = atooid(PQgetvalue(res, j, i_parentidx));
7656 4682 : indxinfo[j].partattaches = (SimplePtrList)
7657 : {
7658 : NULL, NULL
7659 : };
7660 4682 : contype = *(PQgetvalue(res, j, i_contype));
7661 :
7662 4682 : if (contype == 'p' || contype == 'u' || contype == 'x')
7663 2680 : {
7664 : /*
7665 : * If we found a constraint matching the index, create an
7666 : * entry for it.
7667 : */
7668 : ConstraintInfo *constrinfo;
7669 :
7670 2680 : constrinfo = (ConstraintInfo *) pg_malloc(sizeof(ConstraintInfo));
7671 2680 : constrinfo->dobj.objType = DO_CONSTRAINT;
7672 2680 : constrinfo->dobj.catId.tableoid = atooid(PQgetvalue(res, j, i_contableoid));
7673 2680 : constrinfo->dobj.catId.oid = atooid(PQgetvalue(res, j, i_conoid));
7674 2680 : AssignDumpId(&constrinfo->dobj);
7675 2680 : constrinfo->dobj.dump = tbinfo->dobj.dump;
7676 2680 : constrinfo->dobj.name = pg_strdup(PQgetvalue(res, j, i_conname));
7677 2680 : constrinfo->dobj.namespace = tbinfo->dobj.namespace;
7678 2680 : constrinfo->contable = tbinfo;
7679 2680 : constrinfo->condomain = NULL;
7680 2680 : constrinfo->contype = contype;
7681 2680 : if (contype == 'x')
7682 20 : constrinfo->condef = pg_strdup(PQgetvalue(res, j, i_condef));
7683 : else
7684 2660 : constrinfo->condef = NULL;
7685 2680 : constrinfo->confrelid = InvalidOid;
7686 2680 : constrinfo->conindex = indxinfo[j].dobj.dumpId;
7687 2680 : constrinfo->condeferrable = *(PQgetvalue(res, j, i_condeferrable)) == 't';
7688 2680 : constrinfo->condeferred = *(PQgetvalue(res, j, i_condeferred)) == 't';
7689 2680 : constrinfo->conperiod = *(PQgetvalue(res, j, i_conperiod)) == 't';
7690 2680 : constrinfo->conislocal = true;
7691 2680 : constrinfo->separate = true;
7692 :
7693 2680 : indxinfo[j].indexconstraint = constrinfo->dobj.dumpId;
7694 : }
7695 : else
7696 : {
7697 : /* Plain secondary index */
7698 2002 : indxinfo[j].indexconstraint = 0;
7699 : }
7700 : }
7701 : }
7702 :
7703 308 : PQclear(res);
7704 :
7705 308 : destroyPQExpBuffer(query);
7706 308 : destroyPQExpBuffer(tbloids);
7707 308 : }
7708 :
7709 : /*
7710 : * getExtendedStatistics
7711 : * get information about extended-statistics objects.
7712 : *
7713 : * Note: extended statistics data is not returned directly to the caller, but
7714 : * it does get entered into the DumpableObject tables.
7715 : */
7716 : void
7717 308 : getExtendedStatistics(Archive *fout)
7718 : {
7719 : PQExpBuffer query;
7720 : PGresult *res;
7721 : StatsExtInfo *statsextinfo;
7722 : int ntups;
7723 : int i_tableoid;
7724 : int i_oid;
7725 : int i_stxname;
7726 : int i_stxnamespace;
7727 : int i_stxowner;
7728 : int i_stxrelid;
7729 : int i_stattarget;
7730 : int i;
7731 :
7732 : /* Extended statistics were new in v10 */
7733 308 : if (fout->remoteVersion < 100000)
7734 0 : return;
7735 :
7736 308 : query = createPQExpBuffer();
7737 :
7738 308 : if (fout->remoteVersion < 130000)
7739 0 : appendPQExpBufferStr(query, "SELECT tableoid, oid, stxname, "
7740 : "stxnamespace, stxowner, stxrelid, NULL AS stxstattarget "
7741 : "FROM pg_catalog.pg_statistic_ext");
7742 : else
7743 308 : appendPQExpBufferStr(query, "SELECT tableoid, oid, stxname, "
7744 : "stxnamespace, stxowner, stxrelid, stxstattarget "
7745 : "FROM pg_catalog.pg_statistic_ext");
7746 :
7747 308 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
7748 :
7749 308 : ntups = PQntuples(res);
7750 :
7751 308 : i_tableoid = PQfnumber(res, "tableoid");
7752 308 : i_oid = PQfnumber(res, "oid");
7753 308 : i_stxname = PQfnumber(res, "stxname");
7754 308 : i_stxnamespace = PQfnumber(res, "stxnamespace");
7755 308 : i_stxowner = PQfnumber(res, "stxowner");
7756 308 : i_stxrelid = PQfnumber(res, "stxrelid");
7757 308 : i_stattarget = PQfnumber(res, "stxstattarget");
7758 :
7759 308 : statsextinfo = (StatsExtInfo *) pg_malloc(ntups * sizeof(StatsExtInfo));
7760 :
7761 622 : for (i = 0; i < ntups; i++)
7762 : {
7763 314 : statsextinfo[i].dobj.objType = DO_STATSEXT;
7764 314 : statsextinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
7765 314 : statsextinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
7766 314 : AssignDumpId(&statsextinfo[i].dobj);
7767 314 : statsextinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_stxname));
7768 628 : statsextinfo[i].dobj.namespace =
7769 314 : findNamespace(atooid(PQgetvalue(res, i, i_stxnamespace)));
7770 314 : statsextinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_stxowner));
7771 628 : statsextinfo[i].stattable =
7772 314 : findTableByOid(atooid(PQgetvalue(res, i, i_stxrelid)));
7773 314 : if (PQgetisnull(res, i, i_stattarget))
7774 228 : statsextinfo[i].stattarget = -1;
7775 : else
7776 86 : statsextinfo[i].stattarget = atoi(PQgetvalue(res, i, i_stattarget));
7777 :
7778 : /* Decide whether we want to dump it */
7779 314 : selectDumpableStatisticsObject(&(statsextinfo[i]), fout);
7780 : }
7781 :
7782 308 : PQclear(res);
7783 308 : destroyPQExpBuffer(query);
7784 : }
7785 :
7786 : /*
7787 : * getConstraints
7788 : *
7789 : * Get info about constraints on dumpable tables.
7790 : *
7791 : * Currently handles foreign keys only.
7792 : * Unique and primary key constraints are handled with indexes,
7793 : * while check constraints are processed in getTableAttrs().
7794 : */
7795 : void
7796 308 : getConstraints(Archive *fout, TableInfo tblinfo[], int numTables)
7797 : {
7798 308 : PQExpBuffer query = createPQExpBuffer();
7799 308 : PQExpBuffer tbloids = createPQExpBuffer();
7800 : PGresult *res;
7801 : int ntups;
7802 : int curtblindx;
7803 308 : TableInfo *tbinfo = NULL;
7804 : ConstraintInfo *constrinfo;
7805 : int i_contableoid,
7806 : i_conoid,
7807 : i_conrelid,
7808 : i_conname,
7809 : i_confrelid,
7810 : i_conindid,
7811 : i_condef;
7812 :
7813 : /*
7814 : * We want to perform just one query against pg_constraint. However, we
7815 : * mustn't try to select every row of the catalog and then sort it out on
7816 : * the client side, because some of the server-side functions we need
7817 : * would be unsafe to apply to tables we don't have lock on. Hence, we
7818 : * build an array of the OIDs of tables we care about (and now have lock
7819 : * on!), and use a WHERE clause to constrain which rows are selected.
7820 : */
7821 308 : appendPQExpBufferChar(tbloids, '{');
7822 80978 : for (int i = 0; i < numTables; i++)
7823 : {
7824 80670 : TableInfo *tinfo = &tblinfo[i];
7825 :
7826 : /*
7827 : * For partitioned tables, foreign keys have no triggers so they must
7828 : * be included anyway in case some foreign keys are defined.
7829 : */
7830 80670 : if ((!tinfo->hastriggers &&
7831 78558 : tinfo->relkind != RELKIND_PARTITIONED_TABLE) ||
7832 3050 : !(tinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
7833 78374 : continue;
7834 :
7835 : /* OK, we need info for this table */
7836 2296 : if (tbloids->len > 1) /* do we have more than the '{'? */
7837 2194 : appendPQExpBufferChar(tbloids, ',');
7838 2296 : appendPQExpBuffer(tbloids, "%u", tinfo->dobj.catId.oid);
7839 : }
7840 308 : appendPQExpBufferChar(tbloids, '}');
7841 :
7842 308 : appendPQExpBufferStr(query,
7843 : "SELECT c.tableoid, c.oid, "
7844 : "conrelid, conname, confrelid, ");
7845 308 : if (fout->remoteVersion >= 110000)
7846 308 : appendPQExpBufferStr(query, "conindid, ");
7847 : else
7848 0 : appendPQExpBufferStr(query, "0 AS conindid, ");
7849 308 : appendPQExpBuffer(query,
7850 : "pg_catalog.pg_get_constraintdef(c.oid) AS condef\n"
7851 : "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
7852 : "JOIN pg_catalog.pg_constraint c ON (src.tbloid = c.conrelid)\n"
7853 : "WHERE contype = 'f' ",
7854 : tbloids->data);
7855 308 : if (fout->remoteVersion >= 110000)
7856 308 : appendPQExpBufferStr(query,
7857 : "AND conparentid = 0 ");
7858 308 : appendPQExpBufferStr(query,
7859 : "ORDER BY conrelid, conname");
7860 :
7861 308 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
7862 :
7863 308 : ntups = PQntuples(res);
7864 :
7865 308 : i_contableoid = PQfnumber(res, "tableoid");
7866 308 : i_conoid = PQfnumber(res, "oid");
7867 308 : i_conrelid = PQfnumber(res, "conrelid");
7868 308 : i_conname = PQfnumber(res, "conname");
7869 308 : i_confrelid = PQfnumber(res, "confrelid");
7870 308 : i_conindid = PQfnumber(res, "conindid");
7871 308 : i_condef = PQfnumber(res, "condef");
7872 :
7873 308 : constrinfo = (ConstraintInfo *) pg_malloc(ntups * sizeof(ConstraintInfo));
7874 :
7875 308 : curtblindx = -1;
7876 652 : for (int j = 0; j < ntups; j++)
7877 : {
7878 344 : Oid conrelid = atooid(PQgetvalue(res, j, i_conrelid));
7879 : TableInfo *reftable;
7880 :
7881 : /*
7882 : * Locate the associated TableInfo; we rely on tblinfo[] being in OID
7883 : * order.
7884 : */
7885 344 : if (tbinfo == NULL || tbinfo->dobj.catId.oid != conrelid)
7886 : {
7887 24948 : while (++curtblindx < numTables)
7888 : {
7889 24948 : tbinfo = &tblinfo[curtblindx];
7890 24948 : if (tbinfo->dobj.catId.oid == conrelid)
7891 324 : break;
7892 : }
7893 324 : if (curtblindx >= numTables)
7894 0 : pg_fatal("unrecognized table OID %u", conrelid);
7895 : }
7896 :
7897 344 : constrinfo[j].dobj.objType = DO_FK_CONSTRAINT;
7898 344 : constrinfo[j].dobj.catId.tableoid = atooid(PQgetvalue(res, j, i_contableoid));
7899 344 : constrinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_conoid));
7900 344 : AssignDumpId(&constrinfo[j].dobj);
7901 344 : constrinfo[j].dobj.name = pg_strdup(PQgetvalue(res, j, i_conname));
7902 344 : constrinfo[j].dobj.namespace = tbinfo->dobj.namespace;
7903 344 : constrinfo[j].contable = tbinfo;
7904 344 : constrinfo[j].condomain = NULL;
7905 344 : constrinfo[j].contype = 'f';
7906 344 : constrinfo[j].condef = pg_strdup(PQgetvalue(res, j, i_condef));
7907 344 : constrinfo[j].confrelid = atooid(PQgetvalue(res, j, i_confrelid));
7908 344 : constrinfo[j].conindex = 0;
7909 344 : constrinfo[j].condeferrable = false;
7910 344 : constrinfo[j].condeferred = false;
7911 344 : constrinfo[j].conislocal = true;
7912 344 : constrinfo[j].separate = true;
7913 :
7914 : /*
7915 : * Restoring an FK that points to a partitioned table requires that
7916 : * all partition indexes have been attached beforehand. Ensure that
7917 : * happens by making the constraint depend on each index partition
7918 : * attach object.
7919 : */
7920 344 : reftable = findTableByOid(constrinfo[j].confrelid);
7921 344 : if (reftable && reftable->relkind == RELKIND_PARTITIONED_TABLE)
7922 : {
7923 40 : Oid indexOid = atooid(PQgetvalue(res, j, i_conindid));
7924 :
7925 40 : if (indexOid != InvalidOid)
7926 : {
7927 40 : for (int k = 0; k < reftable->numIndexes; k++)
7928 : {
7929 : IndxInfo *refidx;
7930 :
7931 : /* not our index? */
7932 40 : if (reftable->indexes[k].dobj.catId.oid != indexOid)
7933 0 : continue;
7934 :
7935 40 : refidx = &reftable->indexes[k];
7936 40 : addConstrChildIdxDeps(&constrinfo[j].dobj, refidx);
7937 40 : break;
7938 : }
7939 : }
7940 : }
7941 : }
7942 :
7943 308 : PQclear(res);
7944 :
7945 308 : destroyPQExpBuffer(query);
7946 308 : destroyPQExpBuffer(tbloids);
7947 308 : }
7948 :
7949 : /*
7950 : * addConstrChildIdxDeps
7951 : *
7952 : * Recursive subroutine for getConstraints
7953 : *
7954 : * Given an object representing a foreign key constraint and an index on the
7955 : * partitioned table it references, mark the constraint object as dependent
7956 : * on the DO_INDEX_ATTACH object of each index partition, recursively
7957 : * drilling down to their partitions if any. This ensures that the FK is not
7958 : * restored until the index is fully marked valid.
7959 : */
7960 : static void
7961 90 : addConstrChildIdxDeps(DumpableObject *dobj, const IndxInfo *refidx)
7962 : {
7963 : SimplePtrListCell *cell;
7964 :
7965 : Assert(dobj->objType == DO_FK_CONSTRAINT);
7966 :
7967 310 : for (cell = refidx->partattaches.head; cell; cell = cell->next)
7968 : {
7969 220 : IndexAttachInfo *attach = (IndexAttachInfo *) cell->ptr;
7970 :
7971 220 : addObjectDependency(dobj, attach->dobj.dumpId);
7972 :
7973 220 : if (attach->partitionIdx->partattaches.head != NULL)
7974 50 : addConstrChildIdxDeps(dobj, attach->partitionIdx);
7975 : }
7976 90 : }
7977 :
7978 : /*
7979 : * getDomainConstraints
7980 : *
7981 : * Get info about constraints on a domain.
7982 : */
7983 : static void
7984 272 : getDomainConstraints(Archive *fout, TypeInfo *tyinfo)
7985 : {
7986 : int i;
7987 : ConstraintInfo *constrinfo;
7988 272 : PQExpBuffer query = createPQExpBuffer();
7989 : PGresult *res;
7990 : int i_tableoid,
7991 : i_oid,
7992 : i_conname,
7993 : i_consrc;
7994 : int ntups;
7995 :
7996 272 : if (!fout->is_prepared[PREPQUERY_GETDOMAINCONSTRAINTS])
7997 : {
7998 : /* Set up query for constraint-specific details */
7999 82 : appendPQExpBufferStr(query,
8000 : "PREPARE getDomainConstraints(pg_catalog.oid) AS\n"
8001 : "SELECT tableoid, oid, conname, "
8002 : "pg_catalog.pg_get_constraintdef(oid) AS consrc, "
8003 : "convalidated "
8004 : "FROM pg_catalog.pg_constraint "
8005 : "WHERE contypid = $1 AND contype = 'c' "
8006 : "ORDER BY conname");
8007 :
8008 82 : ExecuteSqlStatement(fout, query->data);
8009 :
8010 82 : fout->is_prepared[PREPQUERY_GETDOMAINCONSTRAINTS] = true;
8011 : }
8012 :
8013 272 : printfPQExpBuffer(query,
8014 : "EXECUTE getDomainConstraints('%u')",
8015 : tyinfo->dobj.catId.oid);
8016 :
8017 272 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
8018 :
8019 272 : ntups = PQntuples(res);
8020 :
8021 272 : i_tableoid = PQfnumber(res, "tableoid");
8022 272 : i_oid = PQfnumber(res, "oid");
8023 272 : i_conname = PQfnumber(res, "conname");
8024 272 : i_consrc = PQfnumber(res, "consrc");
8025 :
8026 272 : constrinfo = (ConstraintInfo *) pg_malloc(ntups * sizeof(ConstraintInfo));
8027 :
8028 272 : tyinfo->nDomChecks = ntups;
8029 272 : tyinfo->domChecks = constrinfo;
8030 :
8031 454 : for (i = 0; i < ntups; i++)
8032 : {
8033 182 : bool validated = PQgetvalue(res, i, 4)[0] == 't';
8034 :
8035 182 : constrinfo[i].dobj.objType = DO_CONSTRAINT;
8036 182 : constrinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
8037 182 : constrinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
8038 182 : AssignDumpId(&constrinfo[i].dobj);
8039 182 : constrinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_conname));
8040 182 : constrinfo[i].dobj.namespace = tyinfo->dobj.namespace;
8041 182 : constrinfo[i].contable = NULL;
8042 182 : constrinfo[i].condomain = tyinfo;
8043 182 : constrinfo[i].contype = 'c';
8044 182 : constrinfo[i].condef = pg_strdup(PQgetvalue(res, i, i_consrc));
8045 182 : constrinfo[i].confrelid = InvalidOid;
8046 182 : constrinfo[i].conindex = 0;
8047 182 : constrinfo[i].condeferrable = false;
8048 182 : constrinfo[i].condeferred = false;
8049 182 : constrinfo[i].conislocal = true;
8050 :
8051 182 : constrinfo[i].separate = !validated;
8052 :
8053 : /*
8054 : * Make the domain depend on the constraint, ensuring it won't be
8055 : * output till any constraint dependencies are OK. If the constraint
8056 : * has not been validated, it's going to be dumped after the domain
8057 : * anyway, so this doesn't matter.
8058 : */
8059 182 : if (validated)
8060 182 : addObjectDependency(&tyinfo->dobj,
8061 182 : constrinfo[i].dobj.dumpId);
8062 : }
8063 :
8064 272 : PQclear(res);
8065 :
8066 272 : destroyPQExpBuffer(query);
8067 272 : }
8068 :
8069 : /*
8070 : * getRules
8071 : * get basic information about every rule in the system
8072 : */
8073 : void
8074 308 : getRules(Archive *fout)
8075 : {
8076 : PGresult *res;
8077 : int ntups;
8078 : int i;
8079 308 : PQExpBuffer query = createPQExpBuffer();
8080 : RuleInfo *ruleinfo;
8081 : int i_tableoid;
8082 : int i_oid;
8083 : int i_rulename;
8084 : int i_ruletable;
8085 : int i_ev_type;
8086 : int i_is_instead;
8087 : int i_ev_enabled;
8088 :
8089 308 : appendPQExpBufferStr(query, "SELECT "
8090 : "tableoid, oid, rulename, "
8091 : "ev_class AS ruletable, ev_type, is_instead, "
8092 : "ev_enabled "
8093 : "FROM pg_rewrite "
8094 : "ORDER BY oid");
8095 :
8096 308 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
8097 :
8098 308 : ntups = PQntuples(res);
8099 :
8100 308 : ruleinfo = (RuleInfo *) pg_malloc(ntups * sizeof(RuleInfo));
8101 :
8102 308 : i_tableoid = PQfnumber(res, "tableoid");
8103 308 : i_oid = PQfnumber(res, "oid");
8104 308 : i_rulename = PQfnumber(res, "rulename");
8105 308 : i_ruletable = PQfnumber(res, "ruletable");
8106 308 : i_ev_type = PQfnumber(res, "ev_type");
8107 308 : i_is_instead = PQfnumber(res, "is_instead");
8108 308 : i_ev_enabled = PQfnumber(res, "ev_enabled");
8109 :
8110 47158 : for (i = 0; i < ntups; i++)
8111 : {
8112 : Oid ruletableoid;
8113 :
8114 46850 : ruleinfo[i].dobj.objType = DO_RULE;
8115 46850 : ruleinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
8116 46850 : ruleinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
8117 46850 : AssignDumpId(&ruleinfo[i].dobj);
8118 46850 : ruleinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_rulename));
8119 46850 : ruletableoid = atooid(PQgetvalue(res, i, i_ruletable));
8120 46850 : ruleinfo[i].ruletable = findTableByOid(ruletableoid);
8121 46850 : if (ruleinfo[i].ruletable == NULL)
8122 0 : pg_fatal("failed sanity check, parent table with OID %u of pg_rewrite entry with OID %u not found",
8123 : ruletableoid, ruleinfo[i].dobj.catId.oid);
8124 46850 : ruleinfo[i].dobj.namespace = ruleinfo[i].ruletable->dobj.namespace;
8125 46850 : ruleinfo[i].dobj.dump = ruleinfo[i].ruletable->dobj.dump;
8126 46850 : ruleinfo[i].ev_type = *(PQgetvalue(res, i, i_ev_type));
8127 46850 : ruleinfo[i].is_instead = *(PQgetvalue(res, i, i_is_instead)) == 't';
8128 46850 : ruleinfo[i].ev_enabled = *(PQgetvalue(res, i, i_ev_enabled));
8129 46850 : if (ruleinfo[i].ruletable)
8130 : {
8131 : /*
8132 : * If the table is a view or materialized view, force its ON
8133 : * SELECT rule to be sorted before the view itself --- this
8134 : * ensures that any dependencies for the rule affect the table's
8135 : * positioning. Other rules are forced to appear after their
8136 : * table.
8137 : */
8138 46850 : if ((ruleinfo[i].ruletable->relkind == RELKIND_VIEW ||
8139 1348 : ruleinfo[i].ruletable->relkind == RELKIND_MATVIEW) &&
8140 46388 : ruleinfo[i].ev_type == '1' && ruleinfo[i].is_instead)
8141 : {
8142 45688 : addObjectDependency(&ruleinfo[i].ruletable->dobj,
8143 45688 : ruleinfo[i].dobj.dumpId);
8144 : /* We'll merge the rule into CREATE VIEW, if possible */
8145 45688 : ruleinfo[i].separate = false;
8146 : }
8147 : else
8148 : {
8149 1162 : addObjectDependency(&ruleinfo[i].dobj,
8150 1162 : ruleinfo[i].ruletable->dobj.dumpId);
8151 1162 : ruleinfo[i].separate = true;
8152 : }
8153 : }
8154 : else
8155 0 : ruleinfo[i].separate = true;
8156 : }
8157 :
8158 308 : PQclear(res);
8159 :
8160 308 : destroyPQExpBuffer(query);
8161 308 : }
8162 :
8163 : /*
8164 : * getTriggers
8165 : * get information about every trigger on a dumpable table
8166 : *
8167 : * Note: trigger data is not returned directly to the caller, but it
8168 : * does get entered into the DumpableObject tables.
8169 : */
8170 : void
8171 308 : getTriggers(Archive *fout, TableInfo tblinfo[], int numTables)
8172 : {
8173 308 : PQExpBuffer query = createPQExpBuffer();
8174 308 : PQExpBuffer tbloids = createPQExpBuffer();
8175 : PGresult *res;
8176 : int ntups;
8177 : int curtblindx;
8178 : TriggerInfo *tginfo;
8179 : int i_tableoid,
8180 : i_oid,
8181 : i_tgrelid,
8182 : i_tgname,
8183 : i_tgenabled,
8184 : i_tgispartition,
8185 : i_tgdef;
8186 :
8187 : /*
8188 : * We want to perform just one query against pg_trigger. However, we
8189 : * mustn't try to select every row of the catalog and then sort it out on
8190 : * the client side, because some of the server-side functions we need
8191 : * would be unsafe to apply to tables we don't have lock on. Hence, we
8192 : * build an array of the OIDs of tables we care about (and now have lock
8193 : * on!), and use a WHERE clause to constrain which rows are selected.
8194 : */
8195 308 : appendPQExpBufferChar(tbloids, '{');
8196 80978 : for (int i = 0; i < numTables; i++)
8197 : {
8198 80670 : TableInfo *tbinfo = &tblinfo[i];
8199 :
8200 80670 : if (!tbinfo->hastriggers ||
8201 2112 : !(tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
8202 79058 : continue;
8203 :
8204 : /* OK, we need info for this table */
8205 1612 : if (tbloids->len > 1) /* do we have more than the '{'? */
8206 1514 : appendPQExpBufferChar(tbloids, ',');
8207 1612 : appendPQExpBuffer(tbloids, "%u", tbinfo->dobj.catId.oid);
8208 : }
8209 308 : appendPQExpBufferChar(tbloids, '}');
8210 :
8211 308 : if (fout->remoteVersion >= 150000)
8212 : {
8213 : /*
8214 : * NB: think not to use pretty=true in pg_get_triggerdef. It could
8215 : * result in non-forward-compatible dumps of WHEN clauses due to
8216 : * under-parenthesization.
8217 : *
8218 : * NB: We need to see partition triggers in case the tgenabled flag
8219 : * has been changed from the parent.
8220 : */
8221 308 : appendPQExpBuffer(query,
8222 : "SELECT t.tgrelid, t.tgname, "
8223 : "pg_catalog.pg_get_triggerdef(t.oid, false) AS tgdef, "
8224 : "t.tgenabled, t.tableoid, t.oid, "
8225 : "t.tgparentid <> 0 AS tgispartition\n"
8226 : "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
8227 : "JOIN pg_catalog.pg_trigger t ON (src.tbloid = t.tgrelid) "
8228 : "LEFT JOIN pg_catalog.pg_trigger u ON (u.oid = t.tgparentid) "
8229 : "WHERE ((NOT t.tgisinternal AND t.tgparentid = 0) "
8230 : "OR t.tgenabled != u.tgenabled) "
8231 : "ORDER BY t.tgrelid, t.tgname",
8232 : tbloids->data);
8233 : }
8234 0 : else if (fout->remoteVersion >= 130000)
8235 : {
8236 : /*
8237 : * NB: think not to use pretty=true in pg_get_triggerdef. It could
8238 : * result in non-forward-compatible dumps of WHEN clauses due to
8239 : * under-parenthesization.
8240 : *
8241 : * NB: We need to see tgisinternal triggers in partitions, in case the
8242 : * tgenabled flag has been changed from the parent.
8243 : */
8244 0 : appendPQExpBuffer(query,
8245 : "SELECT t.tgrelid, t.tgname, "
8246 : "pg_catalog.pg_get_triggerdef(t.oid, false) AS tgdef, "
8247 : "t.tgenabled, t.tableoid, t.oid, t.tgisinternal as tgispartition\n"
8248 : "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
8249 : "JOIN pg_catalog.pg_trigger t ON (src.tbloid = t.tgrelid) "
8250 : "LEFT JOIN pg_catalog.pg_trigger u ON (u.oid = t.tgparentid) "
8251 : "WHERE (NOT t.tgisinternal OR t.tgenabled != u.tgenabled) "
8252 : "ORDER BY t.tgrelid, t.tgname",
8253 : tbloids->data);
8254 : }
8255 0 : else if (fout->remoteVersion >= 110000)
8256 : {
8257 : /*
8258 : * NB: We need to see tgisinternal triggers in partitions, in case the
8259 : * tgenabled flag has been changed from the parent. No tgparentid in
8260 : * version 11-12, so we have to match them via pg_depend.
8261 : *
8262 : * See above about pretty=true in pg_get_triggerdef.
8263 : */
8264 0 : appendPQExpBuffer(query,
8265 : "SELECT t.tgrelid, t.tgname, "
8266 : "pg_catalog.pg_get_triggerdef(t.oid, false) AS tgdef, "
8267 : "t.tgenabled, t.tableoid, t.oid, t.tgisinternal as tgispartition "
8268 : "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
8269 : "JOIN pg_catalog.pg_trigger t ON (src.tbloid = t.tgrelid) "
8270 : "LEFT JOIN pg_catalog.pg_depend AS d ON "
8271 : " d.classid = 'pg_catalog.pg_trigger'::pg_catalog.regclass AND "
8272 : " d.refclassid = 'pg_catalog.pg_trigger'::pg_catalog.regclass AND "
8273 : " d.objid = t.oid "
8274 : "LEFT JOIN pg_catalog.pg_trigger AS pt ON pt.oid = refobjid "
8275 : "WHERE (NOT t.tgisinternal OR t.tgenabled != pt.tgenabled) "
8276 : "ORDER BY t.tgrelid, t.tgname",
8277 : tbloids->data);
8278 : }
8279 : else
8280 : {
8281 : /* See above about pretty=true in pg_get_triggerdef */
8282 0 : appendPQExpBuffer(query,
8283 : "SELECT t.tgrelid, t.tgname, "
8284 : "pg_catalog.pg_get_triggerdef(t.oid, false) AS tgdef, "
8285 : "t.tgenabled, false as tgispartition, "
8286 : "t.tableoid, t.oid "
8287 : "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
8288 : "JOIN pg_catalog.pg_trigger t ON (src.tbloid = t.tgrelid) "
8289 : "WHERE NOT tgisinternal "
8290 : "ORDER BY t.tgrelid, t.tgname",
8291 : tbloids->data);
8292 : }
8293 :
8294 308 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
8295 :
8296 308 : ntups = PQntuples(res);
8297 :
8298 308 : i_tableoid = PQfnumber(res, "tableoid");
8299 308 : i_oid = PQfnumber(res, "oid");
8300 308 : i_tgrelid = PQfnumber(res, "tgrelid");
8301 308 : i_tgname = PQfnumber(res, "tgname");
8302 308 : i_tgenabled = PQfnumber(res, "tgenabled");
8303 308 : i_tgispartition = PQfnumber(res, "tgispartition");
8304 308 : i_tgdef = PQfnumber(res, "tgdef");
8305 :
8306 308 : tginfo = (TriggerInfo *) pg_malloc(ntups * sizeof(TriggerInfo));
8307 :
8308 : /*
8309 : * Outer loop iterates once per table, not once per row. Incrementing of
8310 : * j is handled by the inner loop.
8311 : */
8312 308 : curtblindx = -1;
8313 890 : for (int j = 0; j < ntups;)
8314 : {
8315 582 : Oid tgrelid = atooid(PQgetvalue(res, j, i_tgrelid));
8316 582 : TableInfo *tbinfo = NULL;
8317 : int numtrigs;
8318 :
8319 : /* Count rows for this table */
8320 986 : for (numtrigs = 1; numtrigs < ntups - j; numtrigs++)
8321 888 : if (atooid(PQgetvalue(res, j + numtrigs, i_tgrelid)) != tgrelid)
8322 484 : break;
8323 :
8324 : /*
8325 : * Locate the associated TableInfo; we rely on tblinfo[] being in OID
8326 : * order.
8327 : */
8328 29848 : while (++curtblindx < numTables)
8329 : {
8330 29848 : tbinfo = &tblinfo[curtblindx];
8331 29848 : if (tbinfo->dobj.catId.oid == tgrelid)
8332 582 : break;
8333 : }
8334 582 : if (curtblindx >= numTables)
8335 0 : pg_fatal("unrecognized table OID %u", tgrelid);
8336 :
8337 : /* Save data for this table */
8338 582 : tbinfo->triggers = tginfo + j;
8339 582 : tbinfo->numTriggers = numtrigs;
8340 :
8341 1568 : for (int c = 0; c < numtrigs; c++, j++)
8342 : {
8343 986 : tginfo[j].dobj.objType = DO_TRIGGER;
8344 986 : tginfo[j].dobj.catId.tableoid = atooid(PQgetvalue(res, j, i_tableoid));
8345 986 : tginfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
8346 986 : AssignDumpId(&tginfo[j].dobj);
8347 986 : tginfo[j].dobj.name = pg_strdup(PQgetvalue(res, j, i_tgname));
8348 986 : tginfo[j].dobj.namespace = tbinfo->dobj.namespace;
8349 986 : tginfo[j].tgtable = tbinfo;
8350 986 : tginfo[j].tgenabled = *(PQgetvalue(res, j, i_tgenabled));
8351 986 : tginfo[j].tgispartition = *(PQgetvalue(res, j, i_tgispartition)) == 't';
8352 986 : tginfo[j].tgdef = pg_strdup(PQgetvalue(res, j, i_tgdef));
8353 : }
8354 : }
8355 :
8356 308 : PQclear(res);
8357 :
8358 308 : destroyPQExpBuffer(query);
8359 308 : destroyPQExpBuffer(tbloids);
8360 308 : }
8361 :
8362 : /*
8363 : * getEventTriggers
8364 : * get information about event triggers
8365 : */
8366 : void
8367 308 : getEventTriggers(Archive *fout)
8368 : {
8369 : int i;
8370 : PQExpBuffer query;
8371 : PGresult *res;
8372 : EventTriggerInfo *evtinfo;
8373 : int i_tableoid,
8374 : i_oid,
8375 : i_evtname,
8376 : i_evtevent,
8377 : i_evtowner,
8378 : i_evttags,
8379 : i_evtfname,
8380 : i_evtenabled;
8381 : int ntups;
8382 :
8383 : /* Before 9.3, there are no event triggers */
8384 308 : if (fout->remoteVersion < 90300)
8385 0 : return;
8386 :
8387 308 : query = createPQExpBuffer();
8388 :
8389 308 : appendPQExpBufferStr(query,
8390 : "SELECT e.tableoid, e.oid, evtname, evtenabled, "
8391 : "evtevent, evtowner, "
8392 : "array_to_string(array("
8393 : "select quote_literal(x) "
8394 : " from unnest(evttags) as t(x)), ', ') as evttags, "
8395 : "e.evtfoid::regproc as evtfname "
8396 : "FROM pg_event_trigger e "
8397 : "ORDER BY e.oid");
8398 :
8399 308 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
8400 :
8401 308 : ntups = PQntuples(res);
8402 :
8403 308 : evtinfo = (EventTriggerInfo *) pg_malloc(ntups * sizeof(EventTriggerInfo));
8404 :
8405 308 : i_tableoid = PQfnumber(res, "tableoid");
8406 308 : i_oid = PQfnumber(res, "oid");
8407 308 : i_evtname = PQfnumber(res, "evtname");
8408 308 : i_evtevent = PQfnumber(res, "evtevent");
8409 308 : i_evtowner = PQfnumber(res, "evtowner");
8410 308 : i_evttags = PQfnumber(res, "evttags");
8411 308 : i_evtfname = PQfnumber(res, "evtfname");
8412 308 : i_evtenabled = PQfnumber(res, "evtenabled");
8413 :
8414 408 : for (i = 0; i < ntups; i++)
8415 : {
8416 100 : evtinfo[i].dobj.objType = DO_EVENT_TRIGGER;
8417 100 : evtinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
8418 100 : evtinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
8419 100 : AssignDumpId(&evtinfo[i].dobj);
8420 100 : evtinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_evtname));
8421 100 : evtinfo[i].evtname = pg_strdup(PQgetvalue(res, i, i_evtname));
8422 100 : evtinfo[i].evtevent = pg_strdup(PQgetvalue(res, i, i_evtevent));
8423 100 : evtinfo[i].evtowner = getRoleName(PQgetvalue(res, i, i_evtowner));
8424 100 : evtinfo[i].evttags = pg_strdup(PQgetvalue(res, i, i_evttags));
8425 100 : evtinfo[i].evtfname = pg_strdup(PQgetvalue(res, i, i_evtfname));
8426 100 : evtinfo[i].evtenabled = *(PQgetvalue(res, i, i_evtenabled));
8427 :
8428 : /* Decide whether we want to dump it */
8429 100 : selectDumpableObject(&(evtinfo[i].dobj), fout);
8430 : }
8431 :
8432 308 : PQclear(res);
8433 :
8434 308 : destroyPQExpBuffer(query);
8435 : }
8436 :
8437 : /*
8438 : * getProcLangs
8439 : * get basic information about every procedural language in the system
8440 : *
8441 : * NB: this must run after getFuncs() because we assume we can do
8442 : * findFuncByOid().
8443 : */
8444 : void
8445 308 : getProcLangs(Archive *fout)
8446 : {
8447 : PGresult *res;
8448 : int ntups;
8449 : int i;
8450 308 : PQExpBuffer query = createPQExpBuffer();
8451 : ProcLangInfo *planginfo;
8452 : int i_tableoid;
8453 : int i_oid;
8454 : int i_lanname;
8455 : int i_lanpltrusted;
8456 : int i_lanplcallfoid;
8457 : int i_laninline;
8458 : int i_lanvalidator;
8459 : int i_lanacl;
8460 : int i_acldefault;
8461 : int i_lanowner;
8462 :
8463 308 : appendPQExpBufferStr(query, "SELECT tableoid, oid, "
8464 : "lanname, lanpltrusted, lanplcallfoid, "
8465 : "laninline, lanvalidator, "
8466 : "lanacl, "
8467 : "acldefault('l', lanowner) AS acldefault, "
8468 : "lanowner "
8469 : "FROM pg_language "
8470 : "WHERE lanispl "
8471 : "ORDER BY oid");
8472 :
8473 308 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
8474 :
8475 308 : ntups = PQntuples(res);
8476 :
8477 308 : planginfo = (ProcLangInfo *) pg_malloc(ntups * sizeof(ProcLangInfo));
8478 :
8479 308 : i_tableoid = PQfnumber(res, "tableoid");
8480 308 : i_oid = PQfnumber(res, "oid");
8481 308 : i_lanname = PQfnumber(res, "lanname");
8482 308 : i_lanpltrusted = PQfnumber(res, "lanpltrusted");
8483 308 : i_lanplcallfoid = PQfnumber(res, "lanplcallfoid");
8484 308 : i_laninline = PQfnumber(res, "laninline");
8485 308 : i_lanvalidator = PQfnumber(res, "lanvalidator");
8486 308 : i_lanacl = PQfnumber(res, "lanacl");
8487 308 : i_acldefault = PQfnumber(res, "acldefault");
8488 308 : i_lanowner = PQfnumber(res, "lanowner");
8489 :
8490 702 : for (i = 0; i < ntups; i++)
8491 : {
8492 394 : planginfo[i].dobj.objType = DO_PROCLANG;
8493 394 : planginfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
8494 394 : planginfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
8495 394 : AssignDumpId(&planginfo[i].dobj);
8496 :
8497 394 : planginfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_lanname));
8498 394 : planginfo[i].dacl.acl = pg_strdup(PQgetvalue(res, i, i_lanacl));
8499 394 : planginfo[i].dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault));
8500 394 : planginfo[i].dacl.privtype = 0;
8501 394 : planginfo[i].dacl.initprivs = NULL;
8502 394 : planginfo[i].lanpltrusted = *(PQgetvalue(res, i, i_lanpltrusted)) == 't';
8503 394 : planginfo[i].lanplcallfoid = atooid(PQgetvalue(res, i, i_lanplcallfoid));
8504 394 : planginfo[i].laninline = atooid(PQgetvalue(res, i, i_laninline));
8505 394 : planginfo[i].lanvalidator = atooid(PQgetvalue(res, i, i_lanvalidator));
8506 394 : planginfo[i].lanowner = getRoleName(PQgetvalue(res, i, i_lanowner));
8507 :
8508 : /* Decide whether we want to dump it */
8509 394 : selectDumpableProcLang(&(planginfo[i]), fout);
8510 :
8511 : /* Mark whether language has an ACL */
8512 394 : if (!PQgetisnull(res, i, i_lanacl))
8513 86 : planginfo[i].dobj.components |= DUMP_COMPONENT_ACL;
8514 : }
8515 :
8516 308 : PQclear(res);
8517 :
8518 308 : destroyPQExpBuffer(query);
8519 308 : }
8520 :
8521 : /*
8522 : * getCasts
8523 : * get basic information about most casts in the system
8524 : *
8525 : * Skip casts from a range to its multirange, since we'll create those
8526 : * automatically.
8527 : */
8528 : void
8529 308 : getCasts(Archive *fout)
8530 : {
8531 : PGresult *res;
8532 : int ntups;
8533 : int i;
8534 308 : PQExpBuffer query = createPQExpBuffer();
8535 : CastInfo *castinfo;
8536 : int i_tableoid;
8537 : int i_oid;
8538 : int i_castsource;
8539 : int i_casttarget;
8540 : int i_castfunc;
8541 : int i_castcontext;
8542 : int i_castmethod;
8543 :
8544 308 : if (fout->remoteVersion >= 140000)
8545 : {
8546 308 : appendPQExpBufferStr(query, "SELECT tableoid, oid, "
8547 : "castsource, casttarget, castfunc, castcontext, "
8548 : "castmethod "
8549 : "FROM pg_cast c "
8550 : "WHERE NOT EXISTS ( "
8551 : "SELECT 1 FROM pg_range r "
8552 : "WHERE c.castsource = r.rngtypid "
8553 : "AND c.casttarget = r.rngmultitypid "
8554 : ") "
8555 : "ORDER BY 3,4");
8556 : }
8557 : else
8558 : {
8559 0 : appendPQExpBufferStr(query, "SELECT tableoid, oid, "
8560 : "castsource, casttarget, castfunc, castcontext, "
8561 : "castmethod "
8562 : "FROM pg_cast ORDER BY 3,4");
8563 : }
8564 :
8565 308 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
8566 :
8567 308 : ntups = PQntuples(res);
8568 :
8569 308 : castinfo = (CastInfo *) pg_malloc(ntups * sizeof(CastInfo));
8570 :
8571 308 : i_tableoid = PQfnumber(res, "tableoid");
8572 308 : i_oid = PQfnumber(res, "oid");
8573 308 : i_castsource = PQfnumber(res, "castsource");
8574 308 : i_casttarget = PQfnumber(res, "casttarget");
8575 308 : i_castfunc = PQfnumber(res, "castfunc");
8576 308 : i_castcontext = PQfnumber(res, "castcontext");
8577 308 : i_castmethod = PQfnumber(res, "castmethod");
8578 :
8579 69162 : for (i = 0; i < ntups; i++)
8580 : {
8581 : PQExpBufferData namebuf;
8582 : TypeInfo *sTypeInfo;
8583 : TypeInfo *tTypeInfo;
8584 :
8585 68854 : castinfo[i].dobj.objType = DO_CAST;
8586 68854 : castinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
8587 68854 : castinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
8588 68854 : AssignDumpId(&castinfo[i].dobj);
8589 68854 : castinfo[i]. |