Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * pg_dump.c
4 : * pg_dump is a utility for dumping out a postgres database
5 : * into a script file.
6 : *
7 : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
8 : * Portions Copyright (c) 1994, Regents of the University of California
9 : *
10 : * pg_dump will read the system catalogs in a database and dump out a
11 : * script that reproduces the schema in terms of SQL that is understood
12 : * by PostgreSQL
13 : *
14 : * Note that pg_dump runs in a transaction-snapshot mode transaction,
15 : * so it sees a consistent snapshot of the database including system
16 : * catalogs. However, it relies in part on various specialized backend
17 : * functions like pg_get_indexdef(), and those things tend to look at
18 : * the currently committed state. So it is possible to get 'cache
19 : * lookup failed' error if someone performs DDL changes while a dump is
20 : * happening. The window for this sort of thing is from the acquisition
21 : * of the transaction snapshot to getSchemaData() (when pg_dump acquires
22 : * AccessShareLock on every table it intends to dump). It isn't very large,
23 : * but it can happen.
24 : *
25 : * http://archives.postgresql.org/pgsql-bugs/2010-02/msg00187.php
26 : *
27 : * IDENTIFICATION
28 : * src/bin/pg_dump/pg_dump.c
29 : *
30 : *-------------------------------------------------------------------------
31 : */
32 : #include "postgres_fe.h"
33 :
34 : #include <unistd.h>
35 : #include <ctype.h>
36 : #include <limits.h>
37 : #ifdef HAVE_TERMIOS_H
38 : #include <termios.h>
39 : #endif
40 :
41 : #include "access/attnum.h"
42 : #include "access/sysattr.h"
43 : #include "access/transam.h"
44 : #include "catalog/pg_aggregate_d.h"
45 : #include "catalog/pg_am_d.h"
46 : #include "catalog/pg_attribute_d.h"
47 : #include "catalog/pg_authid_d.h"
48 : #include "catalog/pg_cast_d.h"
49 : #include "catalog/pg_class_d.h"
50 : #include "catalog/pg_constraint_d.h"
51 : #include "catalog/pg_default_acl_d.h"
52 : #include "catalog/pg_largeobject_d.h"
53 : #include "catalog/pg_largeobject_metadata_d.h"
54 : #include "catalog/pg_proc_d.h"
55 : #include "catalog/pg_publication_d.h"
56 : #include "catalog/pg_shdepend_d.h"
57 : #include "catalog/pg_subscription_d.h"
58 : #include "catalog/pg_type_d.h"
59 : #include "common/connect.h"
60 : #include "common/int.h"
61 : #include "common/relpath.h"
62 : #include "common/shortest_dec.h"
63 : #include "compress_io.h"
64 : #include "dumputils.h"
65 : #include "fe_utils/option_utils.h"
66 : #include "fe_utils/string_utils.h"
67 : #include "filter.h"
68 : #include "getopt_long.h"
69 : #include "libpq/libpq-fs.h"
70 : #include "parallel.h"
71 : #include "pg_backup_db.h"
72 : #include "pg_backup_utils.h"
73 : #include "pg_dump.h"
74 : #include "storage/block.h"
75 :
76 : typedef struct
77 : {
78 : Oid roleoid; /* role's OID */
79 : const char *rolename; /* role's name */
80 : } RoleNameItem;
81 :
82 : typedef struct
83 : {
84 : const char *descr; /* comment for an object */
85 : Oid classoid; /* object class (catalog OID) */
86 : Oid objoid; /* object OID */
87 : int objsubid; /* subobject (table column #) */
88 : } CommentItem;
89 :
90 : typedef struct
91 : {
92 : const char *provider; /* label provider of this security label */
93 : const char *label; /* security label for an object */
94 : Oid classoid; /* object class (catalog OID) */
95 : Oid objoid; /* object OID */
96 : int objsubid; /* subobject (table column #) */
97 : } SecLabelItem;
98 :
99 : typedef struct
100 : {
101 : Oid oid; /* object OID */
102 : char relkind; /* object kind */
103 : RelFileNumber relfilenumber; /* object filenode */
104 : Oid toast_oid; /* toast table OID */
105 : RelFileNumber toast_relfilenumber; /* toast table filenode */
106 : Oid toast_index_oid; /* toast table index OID */
107 : RelFileNumber toast_index_relfilenumber; /* toast table index filenode */
108 : } BinaryUpgradeClassOidItem;
109 :
110 : /* sequence types */
111 : typedef enum SeqType
112 : {
113 : SEQTYPE_SMALLINT,
114 : SEQTYPE_INTEGER,
115 : SEQTYPE_BIGINT,
116 : } SeqType;
117 :
118 : static const char *const SeqTypeNames[] =
119 : {
120 : [SEQTYPE_SMALLINT] = "smallint",
121 : [SEQTYPE_INTEGER] = "integer",
122 : [SEQTYPE_BIGINT] = "bigint",
123 : };
124 :
125 : StaticAssertDecl(lengthof(SeqTypeNames) == (SEQTYPE_BIGINT + 1),
126 : "array length mismatch");
127 :
128 : typedef struct
129 : {
130 : Oid oid; /* sequence OID */
131 : SeqType seqtype; /* data type of sequence */
132 : bool cycled; /* whether sequence cycles */
133 : int64 minv; /* minimum value */
134 : int64 maxv; /* maximum value */
135 : int64 startv; /* start value */
136 : int64 incby; /* increment value */
137 : int64 cache; /* cache size */
138 : int64 last_value; /* last value of sequence */
139 : bool is_called; /* whether nextval advances before returning */
140 : } SequenceItem;
141 :
142 : typedef enum OidOptions
143 : {
144 : zeroIsError = 1,
145 : zeroAsStar = 2,
146 : zeroAsNone = 4,
147 : } OidOptions;
148 :
149 : /* global decls */
150 : static bool dosync = true; /* Issue fsync() to make dump durable on disk. */
151 :
152 : static Oid g_last_builtin_oid; /* value of the last builtin oid */
153 :
154 : /* The specified names/patterns should to match at least one entity */
155 : static int strict_names = 0;
156 :
157 : static pg_compress_algorithm compression_algorithm = PG_COMPRESSION_NONE;
158 :
159 : /*
160 : * Object inclusion/exclusion lists
161 : *
162 : * The string lists record the patterns given by command-line switches,
163 : * which we then convert to lists of OIDs of matching objects.
164 : */
165 : static SimpleStringList schema_include_patterns = {NULL, NULL};
166 : static SimpleOidList schema_include_oids = {NULL, NULL};
167 : static SimpleStringList schema_exclude_patterns = {NULL, NULL};
168 : static SimpleOidList schema_exclude_oids = {NULL, NULL};
169 :
170 : static SimpleStringList table_include_patterns = {NULL, NULL};
171 : static SimpleStringList table_include_patterns_and_children = {NULL, NULL};
172 : static SimpleOidList table_include_oids = {NULL, NULL};
173 : static SimpleStringList table_exclude_patterns = {NULL, NULL};
174 : static SimpleStringList table_exclude_patterns_and_children = {NULL, NULL};
175 : static SimpleOidList table_exclude_oids = {NULL, NULL};
176 : static SimpleStringList tabledata_exclude_patterns = {NULL, NULL};
177 : static SimpleStringList tabledata_exclude_patterns_and_children = {NULL, NULL};
178 : static SimpleOidList tabledata_exclude_oids = {NULL, NULL};
179 :
180 : static SimpleStringList foreign_servers_include_patterns = {NULL, NULL};
181 : static SimpleOidList foreign_servers_include_oids = {NULL, NULL};
182 :
183 : static SimpleStringList extension_include_patterns = {NULL, NULL};
184 : static SimpleOidList extension_include_oids = {NULL, NULL};
185 :
186 : static SimpleStringList extension_exclude_patterns = {NULL, NULL};
187 : static SimpleOidList extension_exclude_oids = {NULL, NULL};
188 :
189 : static const CatalogId nilCatalogId = {0, 0};
190 :
191 : /* override for standard extra_float_digits setting */
192 : static bool have_extra_float_digits = false;
193 : static int extra_float_digits;
194 :
195 : /* sorted table of role names */
196 : static RoleNameItem *rolenames = NULL;
197 : static int nrolenames = 0;
198 :
199 : /* sorted table of comments */
200 : static CommentItem *comments = NULL;
201 : static int ncomments = 0;
202 :
203 : /* sorted table of security labels */
204 : static SecLabelItem *seclabels = NULL;
205 : static int nseclabels = 0;
206 :
207 : /* sorted table of pg_class information for binary upgrade */
208 : static BinaryUpgradeClassOidItem *binaryUpgradeClassOids = NULL;
209 : static int nbinaryUpgradeClassOids = 0;
210 :
211 : /* sorted table of sequences */
212 : static SequenceItem *sequences = NULL;
213 : static int nsequences = 0;
214 :
215 : /*
216 : * For binary upgrade, the dump ID of pg_largeobject_metadata is saved for use
217 : * as a dependency for pg_shdepend and any large object comments/seclabels.
218 : */
219 : static DumpId lo_metadata_dumpId;
220 :
221 : /* Maximum number of relations to fetch in a fetchAttributeStats() call. */
222 : #define MAX_ATTR_STATS_RELS 64
223 :
224 : /*
225 : * The default number of rows per INSERT when
226 : * --inserts is specified without --rows-per-insert
227 : */
228 : #define DUMP_DEFAULT_ROWS_PER_INSERT 1
229 :
230 : /*
231 : * Maximum number of large objects to group into a single ArchiveEntry.
232 : * At some point we might want to make this user-controllable, but for now
233 : * a hard-wired setting will suffice.
234 : */
235 : #define MAX_BLOBS_PER_ARCHIVE_ENTRY 1000
236 :
237 : /*
238 : * Macro for producing quoted, schema-qualified name of a dumpable object.
239 : */
240 : #define fmtQualifiedDumpable(obj) \
241 : fmtQualifiedId((obj)->dobj.namespace->dobj.name, \
242 : (obj)->dobj.name)
243 :
244 : static void help(const char *progname);
245 : static void setup_connection(Archive *AH,
246 : const char *dumpencoding, const char *dumpsnapshot,
247 : char *use_role);
248 : static ArchiveFormat parseArchiveFormat(const char *format, ArchiveMode *mode);
249 : static void expand_schema_name_patterns(Archive *fout,
250 : SimpleStringList *patterns,
251 : SimpleOidList *oids,
252 : bool strict_names);
253 : static void expand_extension_name_patterns(Archive *fout,
254 : SimpleStringList *patterns,
255 : SimpleOidList *oids,
256 : bool strict_names);
257 : static void expand_foreign_server_name_patterns(Archive *fout,
258 : SimpleStringList *patterns,
259 : SimpleOidList *oids);
260 : static void expand_table_name_patterns(Archive *fout,
261 : SimpleStringList *patterns,
262 : SimpleOidList *oids,
263 : bool strict_names,
264 : bool with_child_tables);
265 : static void prohibit_crossdb_refs(PGconn *conn, const char *dbname,
266 : const char *pattern);
267 :
268 : static NamespaceInfo *findNamespace(Oid nsoid);
269 : static void dumpTableData(Archive *fout, const TableDataInfo *tdinfo);
270 : static void refreshMatViewData(Archive *fout, const TableDataInfo *tdinfo);
271 : static const char *getRoleName(const char *roleoid_str);
272 : static void collectRoleNames(Archive *fout);
273 : static void getAdditionalACLs(Archive *fout);
274 : static void dumpCommentExtended(Archive *fout, const char *type,
275 : const char *name, const char *namespace,
276 : const char *owner, CatalogId catalogId,
277 : int subid, DumpId dumpId,
278 : const char *initdb_comment);
279 : static inline void dumpComment(Archive *fout, const char *type,
280 : const char *name, const char *namespace,
281 : const char *owner, CatalogId catalogId,
282 : int subid, DumpId dumpId);
283 : static int findComments(Oid classoid, Oid objoid, CommentItem **items);
284 : static void collectComments(Archive *fout);
285 : static void dumpSecLabel(Archive *fout, const char *type, const char *name,
286 : const char *namespace, const char *owner,
287 : CatalogId catalogId, int subid, DumpId dumpId);
288 : static int findSecLabels(Oid classoid, Oid objoid, SecLabelItem **items);
289 : static void collectSecLabels(Archive *fout);
290 : static void dumpDumpableObject(Archive *fout, DumpableObject *dobj);
291 : static void dumpNamespace(Archive *fout, const NamespaceInfo *nspinfo);
292 : static void dumpExtension(Archive *fout, const ExtensionInfo *extinfo);
293 : static void dumpType(Archive *fout, const TypeInfo *tyinfo);
294 : static void dumpBaseType(Archive *fout, const TypeInfo *tyinfo);
295 : static void dumpEnumType(Archive *fout, const TypeInfo *tyinfo);
296 : static void dumpRangeType(Archive *fout, const TypeInfo *tyinfo);
297 : static void dumpUndefinedType(Archive *fout, const TypeInfo *tyinfo);
298 : static void dumpDomain(Archive *fout, const TypeInfo *tyinfo);
299 : static void dumpCompositeType(Archive *fout, const TypeInfo *tyinfo);
300 : static void dumpCompositeTypeColComments(Archive *fout, const TypeInfo *tyinfo,
301 : PGresult *res);
302 : static void dumpShellType(Archive *fout, const ShellTypeInfo *stinfo);
303 : static void dumpProcLang(Archive *fout, const ProcLangInfo *plang);
304 : static void dumpFunc(Archive *fout, const FuncInfo *finfo);
305 : static void dumpCast(Archive *fout, const CastInfo *cast);
306 : static void dumpTransform(Archive *fout, const TransformInfo *transform);
307 : static void dumpOpr(Archive *fout, const OprInfo *oprinfo);
308 : static void dumpAccessMethod(Archive *fout, const AccessMethodInfo *aminfo);
309 : static void dumpOpclass(Archive *fout, const OpclassInfo *opcinfo);
310 : static void dumpOpfamily(Archive *fout, const OpfamilyInfo *opfinfo);
311 : static void dumpCollation(Archive *fout, const CollInfo *collinfo);
312 : static void dumpConversion(Archive *fout, const ConvInfo *convinfo);
313 : static void dumpRule(Archive *fout, const RuleInfo *rinfo);
314 : static void dumpAgg(Archive *fout, const AggInfo *agginfo);
315 : static void dumpTrigger(Archive *fout, const TriggerInfo *tginfo);
316 : static void dumpEventTrigger(Archive *fout, const EventTriggerInfo *evtinfo);
317 : static void dumpTable(Archive *fout, const TableInfo *tbinfo);
318 : static void dumpTableSchema(Archive *fout, const TableInfo *tbinfo);
319 : static void dumpTableAttach(Archive *fout, const TableAttachInfo *attachinfo);
320 : static void dumpAttrDef(Archive *fout, const AttrDefInfo *adinfo);
321 : static void collectSequences(Archive *fout);
322 : static void dumpSequence(Archive *fout, const TableInfo *tbinfo);
323 : static void dumpSequenceData(Archive *fout, const TableDataInfo *tdinfo);
324 : static void dumpIndex(Archive *fout, const IndxInfo *indxinfo);
325 : static void dumpIndexAttach(Archive *fout, const IndexAttachInfo *attachinfo);
326 : static void dumpStatisticsExt(Archive *fout, const StatsExtInfo *statsextinfo);
327 : static void dumpConstraint(Archive *fout, const ConstraintInfo *coninfo);
328 : static void dumpTableConstraintComment(Archive *fout, const ConstraintInfo *coninfo);
329 : static void dumpTSParser(Archive *fout, const TSParserInfo *prsinfo);
330 : static void dumpTSDictionary(Archive *fout, const TSDictInfo *dictinfo);
331 : static void dumpTSTemplate(Archive *fout, const TSTemplateInfo *tmplinfo);
332 : static void dumpTSConfig(Archive *fout, const TSConfigInfo *cfginfo);
333 : static void dumpForeignDataWrapper(Archive *fout, const FdwInfo *fdwinfo);
334 : static void dumpForeignServer(Archive *fout, const ForeignServerInfo *srvinfo);
335 : static void dumpUserMappings(Archive *fout,
336 : const char *servername, const char *namespace,
337 : const char *owner, CatalogId catalogId, DumpId dumpId);
338 : static void dumpDefaultACL(Archive *fout, const DefaultACLInfo *daclinfo);
339 :
340 : static DumpId dumpACL(Archive *fout, DumpId objDumpId, DumpId altDumpId,
341 : const char *type, const char *name, const char *subname,
342 : const char *nspname, const char *tag, const char *owner,
343 : const DumpableAcl *dacl);
344 :
345 : static void getDependencies(Archive *fout);
346 : static void BuildArchiveDependencies(Archive *fout);
347 : static void findDumpableDependencies(ArchiveHandle *AH, const DumpableObject *dobj,
348 : DumpId **dependencies, int *nDeps, int *allocDeps);
349 :
350 : static DumpableObject *createBoundaryObjects(void);
351 : static void addBoundaryDependencies(DumpableObject **dobjs, int numObjs,
352 : DumpableObject *boundaryObjs);
353 :
354 : static void addConstrChildIdxDeps(DumpableObject *dobj, const IndxInfo *refidx);
355 : static void getDomainConstraints(Archive *fout, TypeInfo *tyinfo);
356 : static void getTableData(DumpOptions *dopt, TableInfo *tblinfo, int numTables, char relkind);
357 : static void makeTableDataInfo(DumpOptions *dopt, TableInfo *tbinfo);
358 : static void buildMatViewRefreshDependencies(Archive *fout);
359 : static void getTableDataFKConstraints(void);
360 : static void determineNotNullFlags(Archive *fout, PGresult *res, int r,
361 : TableInfo *tbinfo, int j,
362 : int i_notnull_name,
363 : int i_notnull_comment,
364 : int i_notnull_invalidoid,
365 : int i_notnull_noinherit,
366 : int i_notnull_islocal,
367 : PQExpBuffer *invalidnotnulloids);
368 : static char *format_function_arguments(const FuncInfo *finfo, const char *funcargs,
369 : bool is_agg);
370 : static char *format_function_signature(Archive *fout,
371 : const FuncInfo *finfo, bool honor_quotes);
372 : static char *convertRegProcReference(const char *proc);
373 : static char *getFormattedOperatorName(const char *oproid);
374 : static char *convertTSFunction(Archive *fout, Oid funcOid);
375 : static const char *getFormattedTypeName(Archive *fout, Oid oid, OidOptions opts);
376 : static void getLOs(Archive *fout);
377 : static void dumpLO(Archive *fout, const LoInfo *loinfo);
378 : static int dumpLOs(Archive *fout, const void *arg);
379 : static void dumpPolicy(Archive *fout, const PolicyInfo *polinfo);
380 : static void dumpPublication(Archive *fout, const PublicationInfo *pubinfo);
381 : static void dumpPublicationTable(Archive *fout, const PublicationRelInfo *pubrinfo);
382 : static void dumpSubscription(Archive *fout, const SubscriptionInfo *subinfo);
383 : static void dumpSubscriptionTable(Archive *fout, const SubRelInfo *subrinfo);
384 : static void dumpDatabase(Archive *fout);
385 : static void dumpDatabaseConfig(Archive *AH, PQExpBuffer outbuf,
386 : const char *dbname, Oid dboid);
387 : static void dumpEncoding(Archive *AH);
388 : static void dumpStdStrings(Archive *AH);
389 : static void dumpSearchPath(Archive *AH);
390 : static void binary_upgrade_set_type_oids_by_type_oid(Archive *fout,
391 : PQExpBuffer upgrade_buffer,
392 : Oid pg_type_oid,
393 : bool force_array_type,
394 : bool include_multirange_type);
395 : static void binary_upgrade_set_type_oids_by_rel(Archive *fout,
396 : PQExpBuffer upgrade_buffer,
397 : const TableInfo *tbinfo);
398 : static void collectBinaryUpgradeClassOids(Archive *fout);
399 : static void binary_upgrade_set_pg_class_oids(Archive *fout,
400 : PQExpBuffer upgrade_buffer,
401 : Oid pg_class_oid);
402 : static void binary_upgrade_extension_member(PQExpBuffer upgrade_buffer,
403 : const DumpableObject *dobj,
404 : const char *objtype,
405 : const char *objname,
406 : const char *objnamespace);
407 : static const char *getAttrName(int attrnum, const TableInfo *tblInfo);
408 : static const char *fmtCopyColumnList(const TableInfo *ti, PQExpBuffer buffer);
409 : static bool nonemptyReloptions(const char *reloptions);
410 : static void appendReloptionsArrayAH(PQExpBuffer buffer, const char *reloptions,
411 : const char *prefix, Archive *fout);
412 : static char *get_synchronized_snapshot(Archive *fout);
413 : static void set_restrict_relation_kind(Archive *AH, const char *value);
414 : static void setupDumpWorker(Archive *AH);
415 : static TableInfo *getRootTableInfo(const TableInfo *tbinfo);
416 : static bool forcePartitionRootLoad(const TableInfo *tbinfo);
417 : static void read_dump_filters(const char *filename, DumpOptions *dopt);
418 :
419 :
420 : int
421 586 : main(int argc, char **argv)
422 : {
423 : int c;
424 586 : const char *filename = NULL;
425 586 : const char *format = "p";
426 : TableInfo *tblinfo;
427 : int numTables;
428 : DumpableObject **dobjs;
429 : int numObjs;
430 : DumpableObject *boundaryObjs;
431 : int i;
432 : int optindex;
433 : RestoreOptions *ropt;
434 : Archive *fout; /* the script file */
435 586 : bool g_verbose = false;
436 586 : const char *dumpencoding = NULL;
437 586 : const char *dumpsnapshot = NULL;
438 586 : char *use_role = NULL;
439 586 : int numWorkers = 1;
440 586 : int plainText = 0;
441 586 : ArchiveFormat archiveFormat = archUnknown;
442 : ArchiveMode archiveMode;
443 586 : pg_compress_specification compression_spec = {0};
444 586 : char *compression_detail = NULL;
445 586 : char *compression_algorithm_str = "none";
446 586 : char *error_detail = NULL;
447 586 : bool user_compression_defined = false;
448 586 : DataDirSyncMethod sync_method = DATA_DIR_SYNC_METHOD_FSYNC;
449 586 : bool data_only = false;
450 586 : bool schema_only = false;
451 586 : bool statistics_only = false;
452 586 : bool with_statistics = false;
453 586 : bool no_data = false;
454 586 : bool no_schema = false;
455 586 : bool no_statistics = false;
456 :
457 : static DumpOptions dopt;
458 :
459 : static struct option long_options[] = {
460 : {"data-only", no_argument, NULL, 'a'},
461 : {"blobs", no_argument, NULL, 'b'},
462 : {"large-objects", no_argument, NULL, 'b'},
463 : {"no-blobs", no_argument, NULL, 'B'},
464 : {"no-large-objects", no_argument, NULL, 'B'},
465 : {"clean", no_argument, NULL, 'c'},
466 : {"create", no_argument, NULL, 'C'},
467 : {"dbname", required_argument, NULL, 'd'},
468 : {"extension", required_argument, NULL, 'e'},
469 : {"file", required_argument, NULL, 'f'},
470 : {"format", required_argument, NULL, 'F'},
471 : {"host", required_argument, NULL, 'h'},
472 : {"jobs", 1, NULL, 'j'},
473 : {"no-reconnect", no_argument, NULL, 'R'},
474 : {"no-owner", no_argument, NULL, 'O'},
475 : {"port", required_argument, NULL, 'p'},
476 : {"schema", required_argument, NULL, 'n'},
477 : {"exclude-schema", required_argument, NULL, 'N'},
478 : {"schema-only", no_argument, NULL, 's'},
479 : {"superuser", required_argument, NULL, 'S'},
480 : {"table", required_argument, NULL, 't'},
481 : {"exclude-table", required_argument, NULL, 'T'},
482 : {"no-password", no_argument, NULL, 'w'},
483 : {"password", no_argument, NULL, 'W'},
484 : {"username", required_argument, NULL, 'U'},
485 : {"verbose", no_argument, NULL, 'v'},
486 : {"no-privileges", no_argument, NULL, 'x'},
487 : {"no-acl", no_argument, NULL, 'x'},
488 : {"compress", required_argument, NULL, 'Z'},
489 : {"encoding", required_argument, NULL, 'E'},
490 : {"help", no_argument, NULL, '?'},
491 : {"version", no_argument, NULL, 'V'},
492 :
493 : /*
494 : * the following options don't have an equivalent short option letter
495 : */
496 : {"attribute-inserts", no_argument, &dopt.column_inserts, 1},
497 : {"binary-upgrade", no_argument, &dopt.binary_upgrade, 1},
498 : {"column-inserts", no_argument, &dopt.column_inserts, 1},
499 : {"disable-dollar-quoting", no_argument, &dopt.disable_dollar_quoting, 1},
500 : {"disable-triggers", no_argument, &dopt.disable_triggers, 1},
501 : {"enable-row-security", no_argument, &dopt.enable_row_security, 1},
502 : {"exclude-table-data", required_argument, NULL, 4},
503 : {"extra-float-digits", required_argument, NULL, 8},
504 : {"if-exists", no_argument, &dopt.if_exists, 1},
505 : {"inserts", no_argument, NULL, 9},
506 : {"lock-wait-timeout", required_argument, NULL, 2},
507 : {"no-table-access-method", no_argument, &dopt.outputNoTableAm, 1},
508 : {"no-tablespaces", no_argument, &dopt.outputNoTablespaces, 1},
509 : {"quote-all-identifiers", no_argument, "e_all_identifiers, 1},
510 : {"load-via-partition-root", no_argument, &dopt.load_via_partition_root, 1},
511 : {"role", required_argument, NULL, 3},
512 : {"section", required_argument, NULL, 5},
513 : {"serializable-deferrable", no_argument, &dopt.serializable_deferrable, 1},
514 : {"snapshot", required_argument, NULL, 6},
515 : {"statistics", no_argument, NULL, 22},
516 : {"statistics-only", no_argument, NULL, 18},
517 : {"strict-names", no_argument, &strict_names, 1},
518 : {"use-set-session-authorization", no_argument, &dopt.use_setsessauth, 1},
519 : {"no-comments", no_argument, &dopt.no_comments, 1},
520 : {"no-data", no_argument, NULL, 19},
521 : {"no-policies", no_argument, &dopt.no_policies, 1},
522 : {"no-publications", no_argument, &dopt.no_publications, 1},
523 : {"no-schema", no_argument, NULL, 20},
524 : {"no-security-labels", no_argument, &dopt.no_security_labels, 1},
525 : {"no-statistics", no_argument, NULL, 21},
526 : {"no-subscriptions", no_argument, &dopt.no_subscriptions, 1},
527 : {"no-toast-compression", no_argument, &dopt.no_toast_compression, 1},
528 : {"no-unlogged-table-data", no_argument, &dopt.no_unlogged_table_data, 1},
529 : {"no-sync", no_argument, NULL, 7},
530 : {"on-conflict-do-nothing", no_argument, &dopt.do_nothing, 1},
531 : {"rows-per-insert", required_argument, NULL, 10},
532 : {"include-foreign-data", required_argument, NULL, 11},
533 : {"table-and-children", required_argument, NULL, 12},
534 : {"exclude-table-and-children", required_argument, NULL, 13},
535 : {"exclude-table-data-and-children", required_argument, NULL, 14},
536 : {"sync-method", required_argument, NULL, 15},
537 : {"filter", required_argument, NULL, 16},
538 : {"exclude-extension", required_argument, NULL, 17},
539 : {"sequence-data", no_argument, &dopt.sequence_data, 1},
540 : {"restrict-key", required_argument, NULL, 25},
541 :
542 : {NULL, 0, NULL, 0}
543 : };
544 :
545 586 : pg_logging_init(argv[0]);
546 586 : pg_logging_set_level(PG_LOG_WARNING);
547 586 : set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("pg_dump"));
548 :
549 : /*
550 : * Initialize what we need for parallel execution, especially for thread
551 : * support on Windows.
552 : */
553 586 : init_parallel_dump_utils();
554 :
555 586 : progname = get_progname(argv[0]);
556 :
557 586 : if (argc > 1)
558 : {
559 586 : if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0)
560 : {
561 2 : help(progname);
562 2 : exit_nicely(0);
563 : }
564 584 : if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0)
565 : {
566 124 : puts("pg_dump (PostgreSQL) " PG_VERSION);
567 124 : exit_nicely(0);
568 : }
569 : }
570 :
571 460 : InitDumpOptions(&dopt);
572 :
573 2550 : while ((c = getopt_long(argc, argv, "abBcCd:e:E:f:F:h:j:n:N:Op:RsS:t:T:U:vwWxXZ:",
574 2550 : long_options, &optindex)) != -1)
575 : {
576 2106 : switch (c)
577 : {
578 18 : case 'a': /* Dump data only */
579 18 : data_only = true;
580 18 : break;
581 :
582 2 : case 'b': /* Dump LOs */
583 2 : dopt.outputLOs = true;
584 2 : break;
585 :
586 4 : case 'B': /* Don't dump LOs */
587 4 : dopt.dontOutputLOs = true;
588 4 : break;
589 :
590 12 : case 'c': /* clean (i.e., drop) schema prior to create */
591 12 : dopt.outputClean = 1;
592 12 : break;
593 :
594 58 : case 'C': /* Create DB */
595 58 : dopt.outputCreateDB = 1;
596 58 : break;
597 :
598 10 : case 'd': /* database name */
599 10 : dopt.cparams.dbname = pg_strdup(optarg);
600 10 : break;
601 :
602 8 : case 'e': /* include extension(s) */
603 8 : simple_string_list_append(&extension_include_patterns, optarg);
604 8 : dopt.include_everything = false;
605 8 : break;
606 :
607 4 : case 'E': /* Dump encoding */
608 4 : dumpencoding = pg_strdup(optarg);
609 4 : break;
610 :
611 366 : case 'f':
612 366 : filename = pg_strdup(optarg);
613 366 : break;
614 :
615 220 : case 'F':
616 220 : format = pg_strdup(optarg);
617 220 : break;
618 :
619 68 : case 'h': /* server host */
620 68 : dopt.cparams.pghost = pg_strdup(optarg);
621 68 : break;
622 :
623 22 : case 'j': /* number of dump jobs */
624 22 : if (!option_parse_int(optarg, "-j/--jobs", 1,
625 : PG_MAX_JOBS,
626 : &numWorkers))
627 2 : exit_nicely(1);
628 20 : break;
629 :
630 34 : case 'n': /* include schema(s) */
631 34 : simple_string_list_append(&schema_include_patterns, optarg);
632 34 : dopt.include_everything = false;
633 34 : break;
634 :
635 2 : case 'N': /* exclude schema(s) */
636 2 : simple_string_list_append(&schema_exclude_patterns, optarg);
637 2 : break;
638 :
639 4 : case 'O': /* Don't reconnect to match owner */
640 4 : dopt.outputNoOwner = 1;
641 4 : break;
642 :
643 146 : case 'p': /* server port */
644 146 : dopt.cparams.pgport = pg_strdup(optarg);
645 146 : break;
646 :
647 4 : case 'R':
648 : /* no-op, still accepted for backwards compatibility */
649 4 : break;
650 :
651 14 : case 's': /* dump schema only */
652 14 : schema_only = true;
653 14 : break;
654 :
655 2 : case 'S': /* Username for superuser in plain text output */
656 2 : dopt.outputSuperuser = pg_strdup(optarg);
657 2 : break;
658 :
659 16 : case 't': /* include table(s) */
660 16 : simple_string_list_append(&table_include_patterns, optarg);
661 16 : dopt.include_everything = false;
662 16 : break;
663 :
664 8 : case 'T': /* exclude table(s) */
665 8 : simple_string_list_append(&table_exclude_patterns, optarg);
666 8 : break;
667 :
668 72 : case 'U':
669 72 : dopt.cparams.username = pg_strdup(optarg);
670 72 : break;
671 :
672 12 : case 'v': /* verbose */
673 12 : g_verbose = true;
674 12 : pg_logging_increase_verbosity();
675 12 : break;
676 :
677 2 : case 'w':
678 2 : dopt.cparams.promptPassword = TRI_NO;
679 2 : break;
680 :
681 0 : case 'W':
682 0 : dopt.cparams.promptPassword = TRI_YES;
683 0 : break;
684 :
685 4 : case 'x': /* skip ACL dump */
686 4 : dopt.aclsSkip = true;
687 4 : break;
688 :
689 24 : case 'Z': /* Compression */
690 24 : parse_compress_options(optarg, &compression_algorithm_str,
691 : &compression_detail);
692 24 : user_compression_defined = true;
693 24 : break;
694 :
695 258 : case 0:
696 : /* This covers the long options. */
697 258 : break;
698 :
699 4 : case 2: /* lock-wait-timeout */
700 4 : dopt.lockWaitTimeout = pg_strdup(optarg);
701 4 : break;
702 :
703 6 : case 3: /* SET ROLE */
704 6 : use_role = pg_strdup(optarg);
705 6 : break;
706 :
707 2 : case 4: /* exclude table(s) data */
708 2 : simple_string_list_append(&tabledata_exclude_patterns, optarg);
709 2 : break;
710 :
711 12 : case 5: /* section */
712 12 : set_dump_section(optarg, &dopt.dumpSections);
713 12 : break;
714 :
715 0 : case 6: /* snapshot */
716 0 : dumpsnapshot = pg_strdup(optarg);
717 0 : break;
718 :
719 282 : case 7: /* no-sync */
720 282 : dosync = false;
721 282 : break;
722 :
723 2 : case 8:
724 2 : have_extra_float_digits = true;
725 2 : if (!option_parse_int(optarg, "--extra-float-digits", -15, 3,
726 : &extra_float_digits))
727 2 : exit_nicely(1);
728 0 : break;
729 :
730 4 : case 9: /* inserts */
731 :
732 : /*
733 : * dump_inserts also stores --rows-per-insert, careful not to
734 : * overwrite that.
735 : */
736 4 : if (dopt.dump_inserts == 0)
737 4 : dopt.dump_inserts = DUMP_DEFAULT_ROWS_PER_INSERT;
738 4 : break;
739 :
740 4 : case 10: /* rows per insert */
741 4 : if (!option_parse_int(optarg, "--rows-per-insert", 1, INT_MAX,
742 : &dopt.dump_inserts))
743 2 : exit_nicely(1);
744 2 : break;
745 :
746 8 : case 11: /* include foreign data */
747 8 : simple_string_list_append(&foreign_servers_include_patterns,
748 : optarg);
749 8 : break;
750 :
751 2 : case 12: /* include table(s) and their children */
752 2 : simple_string_list_append(&table_include_patterns_and_children,
753 : optarg);
754 2 : dopt.include_everything = false;
755 2 : break;
756 :
757 2 : case 13: /* exclude table(s) and their children */
758 2 : simple_string_list_append(&table_exclude_patterns_and_children,
759 : optarg);
760 2 : break;
761 :
762 2 : case 14: /* exclude data of table(s) and children */
763 2 : simple_string_list_append(&tabledata_exclude_patterns_and_children,
764 : optarg);
765 2 : break;
766 :
767 0 : case 15:
768 0 : if (!parse_sync_method(optarg, &sync_method))
769 0 : exit_nicely(1);
770 0 : break;
771 :
772 52 : case 16: /* read object filters from file */
773 52 : read_dump_filters(optarg, &dopt);
774 44 : break;
775 :
776 2 : case 17: /* exclude extension(s) */
777 2 : simple_string_list_append(&extension_exclude_patterns,
778 : optarg);
779 2 : break;
780 :
781 8 : case 18:
782 8 : statistics_only = true;
783 8 : break;
784 :
785 72 : case 19:
786 72 : no_data = true;
787 72 : break;
788 :
789 4 : case 20:
790 4 : no_schema = true;
791 4 : break;
792 :
793 16 : case 21:
794 16 : no_statistics = true;
795 16 : break;
796 :
797 174 : case 22:
798 174 : with_statistics = true;
799 174 : break;
800 :
801 52 : case 25:
802 52 : dopt.restrict_key = pg_strdup(optarg);
803 52 : break;
804 :
805 2 : default:
806 : /* getopt_long already emitted a complaint */
807 2 : pg_log_error_hint("Try \"%s --help\" for more information.", progname);
808 2 : exit_nicely(1);
809 : }
810 : }
811 :
812 : /*
813 : * Non-option argument specifies database name as long as it wasn't
814 : * already specified with -d / --dbname
815 : */
816 444 : if (optind < argc && dopt.cparams.dbname == NULL)
817 372 : dopt.cparams.dbname = argv[optind++];
818 :
819 : /* Complain if any arguments remain */
820 444 : if (optind < argc)
821 : {
822 2 : pg_log_error("too many command-line arguments (first is \"%s\")",
823 : argv[optind]);
824 2 : pg_log_error_hint("Try \"%s --help\" for more information.", progname);
825 2 : exit_nicely(1);
826 : }
827 :
828 : /* --column-inserts implies --inserts */
829 442 : if (dopt.column_inserts && dopt.dump_inserts == 0)
830 2 : dopt.dump_inserts = DUMP_DEFAULT_ROWS_PER_INSERT;
831 :
832 : /* reject conflicting "-only" options */
833 442 : if (data_only && schema_only)
834 2 : pg_fatal("options -s/--schema-only and -a/--data-only cannot be used together");
835 440 : if (schema_only && statistics_only)
836 2 : pg_fatal("options -s/--schema-only and --statistics-only cannot be used together");
837 438 : if (data_only && statistics_only)
838 2 : pg_fatal("options -a/--data-only and --statistics-only cannot be used together");
839 :
840 : /* reject conflicting "-only" and "no-" options */
841 436 : if (data_only && no_data)
842 0 : pg_fatal("options -a/--data-only and --no-data cannot be used together");
843 436 : if (schema_only && no_schema)
844 0 : pg_fatal("options -s/--schema-only and --no-schema cannot be used together");
845 436 : if (statistics_only && no_statistics)
846 2 : pg_fatal("options --statistics-only and --no-statistics cannot be used together");
847 :
848 : /* reject conflicting "no-" options */
849 434 : if (with_statistics && no_statistics)
850 0 : pg_fatal("options --statistics and --no-statistics cannot be used together");
851 :
852 : /* reject conflicting "-only" options */
853 434 : if (data_only && with_statistics)
854 0 : pg_fatal("options %s and %s cannot be used together",
855 : "-a/--data-only", "--statistics");
856 434 : if (schema_only && with_statistics)
857 2 : pg_fatal("options %s and %s cannot be used together",
858 : "-s/--schema-only", "--statistics");
859 :
860 432 : if (schema_only && foreign_servers_include_patterns.head != NULL)
861 2 : pg_fatal("options -s/--schema-only and --include-foreign-data cannot be used together");
862 :
863 430 : if (numWorkers > 1 && foreign_servers_include_patterns.head != NULL)
864 2 : pg_fatal("option --include-foreign-data is not supported with parallel backup");
865 :
866 428 : if (data_only && dopt.outputClean)
867 2 : pg_fatal("options -c/--clean and -a/--data-only cannot be used together");
868 :
869 426 : if (dopt.if_exists && !dopt.outputClean)
870 2 : pg_fatal("option --if-exists requires option -c/--clean");
871 :
872 : /*
873 : * Set derivative flags. Ambiguous or nonsensical combinations, e.g.
874 : * "--schema-only --no-schema", will have already caused an error in one
875 : * of the checks above.
876 : */
877 424 : dopt.dumpData = ((dopt.dumpData && !schema_only && !statistics_only) ||
878 848 : data_only) && !no_data;
879 424 : dopt.dumpSchema = ((dopt.dumpSchema && !data_only && !statistics_only) ||
880 848 : schema_only) && !no_schema;
881 424 : dopt.dumpStatistics = ((dopt.dumpStatistics && !schema_only && !data_only) ||
882 848 : (statistics_only || with_statistics)) && !no_statistics;
883 :
884 :
885 : /*
886 : * --inserts are already implied above if --column-inserts or
887 : * --rows-per-insert were specified.
888 : */
889 424 : if (dopt.do_nothing && dopt.dump_inserts == 0)
890 2 : pg_fatal("option --on-conflict-do-nothing requires option --inserts, --rows-per-insert, or --column-inserts");
891 :
892 : /* Identify archive format to emit */
893 422 : archiveFormat = parseArchiveFormat(format, &archiveMode);
894 :
895 : /* archiveFormat specific setup */
896 420 : if (archiveFormat == archNull)
897 : {
898 308 : plainText = 1;
899 :
900 : /*
901 : * If you don't provide a restrict key, one will be appointed for you.
902 : */
903 308 : if (!dopt.restrict_key)
904 256 : dopt.restrict_key = generate_restrict_key();
905 308 : if (!dopt.restrict_key)
906 0 : pg_fatal("could not generate restrict key");
907 308 : if (!valid_restrict_key(dopt.restrict_key))
908 0 : pg_fatal("invalid restrict key");
909 : }
910 112 : else if (dopt.restrict_key)
911 0 : pg_fatal("option --restrict-key can only be used with --format=plain");
912 :
913 : /*
914 : * Custom and directory formats are compressed by default with gzip when
915 : * available, not the others. If gzip is not available, no compression is
916 : * done by default.
917 : */
918 420 : if ((archiveFormat == archCustom || archiveFormat == archDirectory) &&
919 106 : !user_compression_defined)
920 : {
921 : #ifdef HAVE_LIBZ
922 96 : compression_algorithm_str = "gzip";
923 : #else
924 : compression_algorithm_str = "none";
925 : #endif
926 : }
927 :
928 : /*
929 : * Compression options
930 : */
931 420 : if (!parse_compress_algorithm(compression_algorithm_str,
932 : &compression_algorithm))
933 2 : pg_fatal("unrecognized compression algorithm: \"%s\"",
934 : compression_algorithm_str);
935 :
936 418 : parse_compress_specification(compression_algorithm, compression_detail,
937 : &compression_spec);
938 418 : error_detail = validate_compress_specification(&compression_spec);
939 418 : if (error_detail != NULL)
940 6 : pg_fatal("invalid compression specification: %s",
941 : error_detail);
942 :
943 412 : error_detail = supports_compression(compression_spec);
944 412 : if (error_detail != NULL)
945 0 : pg_fatal("%s", error_detail);
946 :
947 : /*
948 : * Disable support for zstd workers for now - these are based on
949 : * threading, and it's unclear how it interacts with parallel dumps on
950 : * platforms where that relies on threads too (e.g. Windows).
951 : */
952 412 : if (compression_spec.options & PG_COMPRESSION_OPTION_WORKERS)
953 0 : pg_log_warning("compression option \"%s\" is not currently supported by pg_dump",
954 : "workers");
955 :
956 : /*
957 : * If emitting an archive format, we always want to emit a DATABASE item,
958 : * in case --create is specified at pg_restore time.
959 : */
960 412 : if (!plainText)
961 112 : dopt.outputCreateDB = 1;
962 :
963 : /* Parallel backup only in the directory archive format so far */
964 412 : if (archiveFormat != archDirectory && numWorkers > 1)
965 2 : pg_fatal("parallel backup only supported by the directory format");
966 :
967 : /* Open the output file */
968 410 : fout = CreateArchive(filename, archiveFormat, compression_spec,
969 : dosync, archiveMode, setupDumpWorker, sync_method);
970 :
971 : /* Make dump options accessible right away */
972 408 : SetArchiveOptions(fout, &dopt, NULL);
973 :
974 : /* Register the cleanup hook */
975 408 : on_exit_close_archive(fout);
976 :
977 : /* Let the archiver know how noisy to be */
978 408 : fout->verbose = g_verbose;
979 :
980 :
981 : /*
982 : * We allow the server to be back to 9.2, and up to any minor release of
983 : * our own major version. (See also version check in pg_dumpall.c.)
984 : */
985 408 : fout->minRemoteVersion = 90200;
986 408 : fout->maxRemoteVersion = (PG_VERSION_NUM / 100) * 100 + 99;
987 :
988 408 : fout->numWorkers = numWorkers;
989 :
990 : /*
991 : * Open the database using the Archiver, so it knows about it. Errors mean
992 : * death.
993 : */
994 408 : ConnectDatabaseAhx(fout, &dopt.cparams, false);
995 404 : setup_connection(fout, dumpencoding, dumpsnapshot, use_role);
996 :
997 : /*
998 : * On hot standbys, never try to dump unlogged table data, since it will
999 : * just throw an error.
1000 : */
1001 404 : if (fout->isStandby)
1002 8 : dopt.no_unlogged_table_data = true;
1003 :
1004 : /*
1005 : * Find the last built-in OID, if needed (prior to 8.1)
1006 : *
1007 : * With 8.1 and above, we can just use FirstNormalObjectId - 1.
1008 : */
1009 404 : g_last_builtin_oid = FirstNormalObjectId - 1;
1010 :
1011 404 : pg_log_info("last built-in OID is %u", g_last_builtin_oid);
1012 :
1013 : /* Expand schema selection patterns into OID lists */
1014 404 : if (schema_include_patterns.head != NULL)
1015 : {
1016 36 : expand_schema_name_patterns(fout, &schema_include_patterns,
1017 : &schema_include_oids,
1018 : strict_names);
1019 24 : if (schema_include_oids.head == NULL)
1020 2 : pg_fatal("no matching schemas were found");
1021 : }
1022 390 : expand_schema_name_patterns(fout, &schema_exclude_patterns,
1023 : &schema_exclude_oids,
1024 : false);
1025 : /* non-matching exclusion patterns aren't an error */
1026 :
1027 : /* Expand table selection patterns into OID lists */
1028 390 : expand_table_name_patterns(fout, &table_include_patterns,
1029 : &table_include_oids,
1030 : strict_names, false);
1031 380 : expand_table_name_patterns(fout, &table_include_patterns_and_children,
1032 : &table_include_oids,
1033 : strict_names, true);
1034 380 : if ((table_include_patterns.head != NULL ||
1035 358 : table_include_patterns_and_children.head != NULL) &&
1036 26 : table_include_oids.head == NULL)
1037 4 : pg_fatal("no matching tables were found");
1038 :
1039 376 : expand_table_name_patterns(fout, &table_exclude_patterns,
1040 : &table_exclude_oids,
1041 : false, false);
1042 376 : expand_table_name_patterns(fout, &table_exclude_patterns_and_children,
1043 : &table_exclude_oids,
1044 : false, true);
1045 :
1046 376 : expand_table_name_patterns(fout, &tabledata_exclude_patterns,
1047 : &tabledata_exclude_oids,
1048 : false, false);
1049 376 : expand_table_name_patterns(fout, &tabledata_exclude_patterns_and_children,
1050 : &tabledata_exclude_oids,
1051 : false, true);
1052 :
1053 376 : expand_foreign_server_name_patterns(fout, &foreign_servers_include_patterns,
1054 : &foreign_servers_include_oids);
1055 :
1056 : /* non-matching exclusion patterns aren't an error */
1057 :
1058 : /* Expand extension selection patterns into OID lists */
1059 374 : if (extension_include_patterns.head != NULL)
1060 : {
1061 10 : expand_extension_name_patterns(fout, &extension_include_patterns,
1062 : &extension_include_oids,
1063 : strict_names);
1064 10 : if (extension_include_oids.head == NULL)
1065 2 : pg_fatal("no matching extensions were found");
1066 : }
1067 372 : expand_extension_name_patterns(fout, &extension_exclude_patterns,
1068 : &extension_exclude_oids,
1069 : false);
1070 : /* non-matching exclusion patterns aren't an error */
1071 :
1072 : /*
1073 : * Dumping LOs is the default for dumps where an inclusion switch is not
1074 : * used (an "include everything" dump). -B can be used to exclude LOs
1075 : * from those dumps. -b can be used to include LOs even when an inclusion
1076 : * switch is used.
1077 : *
1078 : * -s means "schema only" and LOs are data, not schema, so we never
1079 : * include LOs when -s is used.
1080 : */
1081 372 : if (dopt.include_everything && dopt.dumpData && !dopt.dontOutputLOs)
1082 242 : dopt.outputLOs = true;
1083 :
1084 : /*
1085 : * Collect role names so we can map object owner OIDs to names.
1086 : */
1087 372 : collectRoleNames(fout);
1088 :
1089 : /*
1090 : * Now scan the database and create DumpableObject structs for all the
1091 : * objects we intend to dump.
1092 : */
1093 372 : tblinfo = getSchemaData(fout, &numTables);
1094 :
1095 370 : if (dopt.dumpData)
1096 : {
1097 290 : getTableData(&dopt, tblinfo, numTables, 0);
1098 290 : buildMatViewRefreshDependencies(fout);
1099 290 : if (!dopt.dumpSchema)
1100 14 : getTableDataFKConstraints();
1101 : }
1102 :
1103 370 : if (!dopt.dumpData && dopt.sequence_data)
1104 64 : getTableData(&dopt, tblinfo, numTables, RELKIND_SEQUENCE);
1105 :
1106 : /*
1107 : * For binary upgrade mode, dump pg_largeobject_metadata and the
1108 : * associated pg_shdepend rows. This is faster to restore than the
1109 : * equivalent set of large object commands. We can only do this for
1110 : * upgrades from v12 and newer; in older versions, pg_largeobject_metadata
1111 : * was created WITH OIDS, so the OID column is hidden and won't be dumped.
1112 : */
1113 370 : if (dopt.binary_upgrade && fout->remoteVersion >= 120000)
1114 : {
1115 72 : TableInfo *lo_metadata = findTableByOid(LargeObjectMetadataRelationId);
1116 72 : TableInfo *shdepend = findTableByOid(SharedDependRelationId);
1117 :
1118 72 : makeTableDataInfo(&dopt, lo_metadata);
1119 72 : makeTableDataInfo(&dopt, shdepend);
1120 :
1121 : /*
1122 : * Save pg_largeobject_metadata's dump ID for use as a dependency for
1123 : * pg_shdepend and any large object comments/seclabels.
1124 : */
1125 72 : lo_metadata_dumpId = lo_metadata->dataObj->dobj.dumpId;
1126 72 : addObjectDependency(&shdepend->dataObj->dobj, lo_metadata_dumpId);
1127 :
1128 : /*
1129 : * Only dump large object shdepend rows for this database.
1130 : */
1131 72 : shdepend->dataObj->filtercond = "WHERE classid = 'pg_largeobject'::regclass "
1132 : "AND dbid = (SELECT oid FROM pg_database "
1133 : " WHERE datname = current_database())";
1134 :
1135 : /*
1136 : * If upgrading from v16 or newer, only dump large objects with
1137 : * comments/seclabels. For these upgrades, pg_upgrade can copy/link
1138 : * pg_largeobject_metadata's files (which is usually faster) but we
1139 : * still need to dump LOs with comments/seclabels here so that the
1140 : * subsequent COMMENT and SECURITY LABEL commands work. pg_upgrade
1141 : * can't copy/link the files from older versions because aclitem
1142 : * (needed by pg_largeobject_metadata.lomacl) changed its storage
1143 : * format in v16.
1144 : */
1145 72 : if (fout->remoteVersion >= 160000)
1146 72 : lo_metadata->dataObj->filtercond = "WHERE oid IN "
1147 : "(SELECT objoid FROM pg_description "
1148 : "WHERE classoid = " CppAsString2(LargeObjectRelationId) " "
1149 : "UNION SELECT objoid FROM pg_seclabel "
1150 : "WHERE classoid = " CppAsString2(LargeObjectRelationId) ")";
1151 : }
1152 :
1153 : /*
1154 : * In binary-upgrade mode, we do not have to worry about the actual LO
1155 : * data or the associated metadata that resides in the pg_largeobject and
1156 : * pg_largeobject_metadata tables, respectively.
1157 : *
1158 : * However, we do need to collect LO information as there may be comments
1159 : * or other information on LOs that we do need to dump out.
1160 : */
1161 370 : if (dopt.outputLOs || dopt.binary_upgrade)
1162 314 : getLOs(fout);
1163 :
1164 : /*
1165 : * Collect dependency data to assist in ordering the objects.
1166 : */
1167 370 : getDependencies(fout);
1168 :
1169 : /*
1170 : * Collect ACLs, comments, and security labels, if wanted.
1171 : */
1172 370 : if (!dopt.aclsSkip)
1173 366 : getAdditionalACLs(fout);
1174 370 : if (!dopt.no_comments)
1175 370 : collectComments(fout);
1176 370 : if (!dopt.no_security_labels)
1177 370 : collectSecLabels(fout);
1178 :
1179 : /* For binary upgrade mode, collect required pg_class information. */
1180 370 : if (dopt.binary_upgrade)
1181 72 : collectBinaryUpgradeClassOids(fout);
1182 :
1183 : /* Collect sequence information. */
1184 370 : collectSequences(fout);
1185 :
1186 : /* Lastly, create dummy objects to represent the section boundaries */
1187 370 : boundaryObjs = createBoundaryObjects();
1188 :
1189 : /* Get pointers to all the known DumpableObjects */
1190 370 : getDumpableObjects(&dobjs, &numObjs);
1191 :
1192 : /*
1193 : * Add dummy dependencies to enforce the dump section ordering.
1194 : */
1195 370 : addBoundaryDependencies(dobjs, numObjs, boundaryObjs);
1196 :
1197 : /*
1198 : * Sort the objects into a safe dump order (no forward references).
1199 : *
1200 : * We rely on dependency information to help us determine a safe order, so
1201 : * the initial sort is mostly for cosmetic purposes: we sort by name to
1202 : * ensure that logically identical schemas will dump identically.
1203 : */
1204 370 : sortDumpableObjectsByTypeName(dobjs, numObjs);
1205 :
1206 370 : sortDumpableObjects(dobjs, numObjs,
1207 370 : boundaryObjs[0].dumpId, boundaryObjs[1].dumpId);
1208 :
1209 : /*
1210 : * Create archive TOC entries for all the objects to be dumped, in a safe
1211 : * order.
1212 : */
1213 :
1214 : /*
1215 : * First the special entries for ENCODING, STDSTRINGS, and SEARCHPATH.
1216 : */
1217 370 : dumpEncoding(fout);
1218 370 : dumpStdStrings(fout);
1219 370 : dumpSearchPath(fout);
1220 :
1221 : /* The database items are always next, unless we don't want them at all */
1222 370 : if (dopt.outputCreateDB)
1223 168 : dumpDatabase(fout);
1224 :
1225 : /* Now the rearrangeable objects. */
1226 1380772 : for (i = 0; i < numObjs; i++)
1227 1380402 : dumpDumpableObject(fout, dobjs[i]);
1228 :
1229 : /*
1230 : * Set up options info to ensure we dump what we want.
1231 : */
1232 370 : ropt = NewRestoreOptions();
1233 370 : ropt->filename = filename;
1234 :
1235 : /* if you change this list, see dumpOptionsFromRestoreOptions */
1236 370 : ropt->cparams.dbname = dopt.cparams.dbname ? pg_strdup(dopt.cparams.dbname) : NULL;
1237 370 : ropt->cparams.pgport = dopt.cparams.pgport ? pg_strdup(dopt.cparams.pgport) : NULL;
1238 370 : ropt->cparams.pghost = dopt.cparams.pghost ? pg_strdup(dopt.cparams.pghost) : NULL;
1239 370 : ropt->cparams.username = dopt.cparams.username ? pg_strdup(dopt.cparams.username) : NULL;
1240 370 : ropt->cparams.promptPassword = dopt.cparams.promptPassword;
1241 370 : ropt->dropSchema = dopt.outputClean;
1242 370 : ropt->dumpData = dopt.dumpData;
1243 370 : ropt->dumpSchema = dopt.dumpSchema;
1244 370 : ropt->dumpStatistics = dopt.dumpStatistics;
1245 370 : ropt->if_exists = dopt.if_exists;
1246 370 : ropt->column_inserts = dopt.column_inserts;
1247 370 : ropt->dumpSections = dopt.dumpSections;
1248 370 : ropt->aclsSkip = dopt.aclsSkip;
1249 370 : ropt->superuser = dopt.outputSuperuser;
1250 370 : ropt->createDB = dopt.outputCreateDB;
1251 370 : ropt->noOwner = dopt.outputNoOwner;
1252 370 : ropt->noTableAm = dopt.outputNoTableAm;
1253 370 : ropt->noTablespace = dopt.outputNoTablespaces;
1254 370 : ropt->disable_triggers = dopt.disable_triggers;
1255 370 : ropt->use_setsessauth = dopt.use_setsessauth;
1256 370 : ropt->disable_dollar_quoting = dopt.disable_dollar_quoting;
1257 370 : ropt->dump_inserts = dopt.dump_inserts;
1258 370 : ropt->no_comments = dopt.no_comments;
1259 370 : ropt->no_policies = dopt.no_policies;
1260 370 : ropt->no_publications = dopt.no_publications;
1261 370 : ropt->no_security_labels = dopt.no_security_labels;
1262 370 : ropt->no_subscriptions = dopt.no_subscriptions;
1263 370 : ropt->lockWaitTimeout = dopt.lockWaitTimeout;
1264 370 : ropt->include_everything = dopt.include_everything;
1265 370 : ropt->enable_row_security = dopt.enable_row_security;
1266 370 : ropt->sequence_data = dopt.sequence_data;
1267 370 : ropt->binary_upgrade = dopt.binary_upgrade;
1268 370 : ropt->restrict_key = dopt.restrict_key ? pg_strdup(dopt.restrict_key) : NULL;
1269 :
1270 370 : ropt->compression_spec = compression_spec;
1271 :
1272 370 : ropt->suppressDumpWarnings = true; /* We've already shown them */
1273 :
1274 370 : SetArchiveOptions(fout, &dopt, ropt);
1275 :
1276 : /* Mark which entries should be output */
1277 370 : ProcessArchiveRestoreOptions(fout);
1278 :
1279 : /*
1280 : * The archive's TOC entries are now marked as to which ones will actually
1281 : * be output, so we can set up their dependency lists properly. This isn't
1282 : * necessary for plain-text output, though.
1283 : */
1284 370 : if (!plainText)
1285 110 : BuildArchiveDependencies(fout);
1286 :
1287 : /*
1288 : * And finally we can do the actual output.
1289 : *
1290 : * Note: for non-plain-text output formats, the output file is written
1291 : * inside CloseArchive(). This is, um, bizarre; but not worth changing
1292 : * right now.
1293 : */
1294 370 : if (plainText)
1295 260 : RestoreArchive(fout);
1296 :
1297 368 : CloseArchive(fout);
1298 :
1299 368 : exit_nicely(0);
1300 : }
1301 :
1302 :
1303 : static void
1304 2 : help(const char *progname)
1305 : {
1306 2 : printf(_("%s exports a PostgreSQL database as an SQL script or to other formats.\n\n"), progname);
1307 2 : printf(_("Usage:\n"));
1308 2 : printf(_(" %s [OPTION]... [DBNAME]\n"), progname);
1309 :
1310 2 : printf(_("\nGeneral options:\n"));
1311 2 : printf(_(" -f, --file=FILENAME output file or directory name\n"));
1312 2 : printf(_(" -F, --format=c|d|t|p output file format (custom, directory, tar,\n"
1313 : " plain text (default))\n"));
1314 2 : printf(_(" -j, --jobs=NUM use this many parallel jobs to dump\n"));
1315 2 : printf(_(" -v, --verbose verbose mode\n"));
1316 2 : printf(_(" -V, --version output version information, then exit\n"));
1317 2 : printf(_(" -Z, --compress=METHOD[:DETAIL]\n"
1318 : " compress as specified\n"));
1319 2 : printf(_(" --lock-wait-timeout=TIMEOUT fail after waiting TIMEOUT for a table lock\n"));
1320 2 : printf(_(" --no-sync do not wait for changes to be written safely to disk\n"));
1321 2 : printf(_(" --sync-method=METHOD set method for syncing files to disk\n"));
1322 2 : printf(_(" -?, --help show this help, then exit\n"));
1323 :
1324 2 : printf(_("\nOptions controlling the output content:\n"));
1325 2 : printf(_(" -a, --data-only dump only the data, not the schema or statistics\n"));
1326 2 : printf(_(" -b, --large-objects include large objects in dump\n"));
1327 2 : printf(_(" --blobs (same as --large-objects, deprecated)\n"));
1328 2 : printf(_(" -B, --no-large-objects exclude large objects in dump\n"));
1329 2 : printf(_(" --no-blobs (same as --no-large-objects, deprecated)\n"));
1330 2 : printf(_(" -c, --clean clean (drop) database objects before recreating\n"));
1331 2 : printf(_(" -C, --create include commands to create database in dump\n"));
1332 2 : printf(_(" -e, --extension=PATTERN dump the specified extension(s) only\n"));
1333 2 : printf(_(" -E, --encoding=ENCODING dump the data in encoding ENCODING\n"));
1334 2 : printf(_(" -n, --schema=PATTERN dump the specified schema(s) only\n"));
1335 2 : printf(_(" -N, --exclude-schema=PATTERN do NOT dump the specified schema(s)\n"));
1336 2 : printf(_(" -O, --no-owner skip restoration of object ownership in\n"
1337 : " plain-text format\n"));
1338 2 : printf(_(" -s, --schema-only dump only the schema, no data or statistics\n"));
1339 2 : printf(_(" -S, --superuser=NAME superuser user name to use in plain-text format\n"));
1340 2 : printf(_(" -t, --table=PATTERN dump only the specified table(s)\n"));
1341 2 : printf(_(" -T, --exclude-table=PATTERN do NOT dump the specified table(s)\n"));
1342 2 : printf(_(" -x, --no-privileges do not dump privileges (grant/revoke)\n"));
1343 2 : printf(_(" --binary-upgrade for use by upgrade utilities only\n"));
1344 2 : printf(_(" --column-inserts dump data as INSERT commands with column names\n"));
1345 2 : printf(_(" --disable-dollar-quoting disable dollar quoting, use SQL standard quoting\n"));
1346 2 : printf(_(" --disable-triggers disable triggers during data-only restore\n"));
1347 2 : printf(_(" --enable-row-security enable row security (dump only content user has\n"
1348 : " access to)\n"));
1349 2 : printf(_(" --exclude-extension=PATTERN do NOT dump the specified extension(s)\n"));
1350 2 : printf(_(" --exclude-table-and-children=PATTERN\n"
1351 : " do NOT dump the specified table(s), including\n"
1352 : " child and partition tables\n"));
1353 2 : printf(_(" --exclude-table-data=PATTERN do NOT dump data for the specified table(s)\n"));
1354 2 : printf(_(" --exclude-table-data-and-children=PATTERN\n"
1355 : " do NOT dump data for the specified table(s),\n"
1356 : " including child and partition tables\n"));
1357 2 : printf(_(" --extra-float-digits=NUM override default setting for extra_float_digits\n"));
1358 2 : printf(_(" --filter=FILENAME include or exclude objects and data from dump\n"
1359 : " based on expressions in FILENAME\n"));
1360 2 : printf(_(" --if-exists use IF EXISTS when dropping objects\n"));
1361 2 : printf(_(" --include-foreign-data=PATTERN\n"
1362 : " include data of foreign tables on foreign\n"
1363 : " servers matching PATTERN\n"));
1364 2 : printf(_(" --inserts dump data as INSERT commands, rather than COPY\n"));
1365 2 : printf(_(" --load-via-partition-root load partitions via the root table\n"));
1366 2 : printf(_(" --no-comments do not dump comment commands\n"));
1367 2 : printf(_(" --no-data do not dump data\n"));
1368 2 : printf(_(" --no-policies do not dump row security policies\n"));
1369 2 : printf(_(" --no-publications do not dump publications\n"));
1370 2 : printf(_(" --no-schema do not dump schema\n"));
1371 2 : printf(_(" --no-security-labels do not dump security label assignments\n"));
1372 2 : printf(_(" --no-statistics do not dump statistics\n"));
1373 2 : printf(_(" --no-subscriptions do not dump subscriptions\n"));
1374 2 : printf(_(" --no-table-access-method do not dump table access methods\n"));
1375 2 : printf(_(" --no-tablespaces do not dump tablespace assignments\n"));
1376 2 : printf(_(" --no-toast-compression do not dump TOAST compression methods\n"));
1377 2 : printf(_(" --no-unlogged-table-data do not dump unlogged table data\n"));
1378 2 : printf(_(" --on-conflict-do-nothing add ON CONFLICT DO NOTHING to INSERT commands\n"));
1379 2 : printf(_(" --quote-all-identifiers quote all identifiers, even if not key words\n"));
1380 2 : printf(_(" --restrict-key=RESTRICT_KEY use provided string as psql \\restrict key\n"));
1381 2 : printf(_(" --rows-per-insert=NROWS number of rows per INSERT; implies --inserts\n"));
1382 2 : printf(_(" --section=SECTION dump named section (pre-data, data, or post-data)\n"));
1383 2 : printf(_(" --sequence-data include sequence data in dump\n"));
1384 2 : printf(_(" --serializable-deferrable wait until the dump can run without anomalies\n"));
1385 2 : printf(_(" --snapshot=SNAPSHOT use given snapshot for the dump\n"));
1386 2 : printf(_(" --statistics dump the statistics\n"));
1387 2 : printf(_(" --statistics-only dump only the statistics, not schema or data\n"));
1388 2 : printf(_(" --strict-names require table and/or schema include patterns to\n"
1389 : " match at least one entity each\n"));
1390 2 : printf(_(" --table-and-children=PATTERN dump only the specified table(s), including\n"
1391 : " child and partition tables\n"));
1392 2 : printf(_(" --use-set-session-authorization\n"
1393 : " use SET SESSION AUTHORIZATION commands instead of\n"
1394 : " ALTER OWNER commands to set ownership\n"));
1395 :
1396 2 : printf(_("\nConnection options:\n"));
1397 2 : printf(_(" -d, --dbname=DBNAME database to dump\n"));
1398 2 : printf(_(" -h, --host=HOSTNAME database server host or socket directory\n"));
1399 2 : printf(_(" -p, --port=PORT database server port number\n"));
1400 2 : printf(_(" -U, --username=NAME connect as specified database user\n"));
1401 2 : printf(_(" -w, --no-password never prompt for password\n"));
1402 2 : printf(_(" -W, --password force password prompt (should happen automatically)\n"));
1403 2 : printf(_(" --role=ROLENAME do SET ROLE before dump\n"));
1404 :
1405 2 : printf(_("\nIf no database name is supplied, then the PGDATABASE environment\n"
1406 : "variable value is used.\n\n"));
1407 2 : printf(_("Report bugs to <%s>.\n"), PACKAGE_BUGREPORT);
1408 2 : printf(_("%s home page: <%s>\n"), PACKAGE_NAME, PACKAGE_URL);
1409 2 : }
1410 :
1411 : static void
1412 436 : setup_connection(Archive *AH, const char *dumpencoding,
1413 : const char *dumpsnapshot, char *use_role)
1414 : {
1415 436 : DumpOptions *dopt = AH->dopt;
1416 436 : PGconn *conn = GetConnection(AH);
1417 : const char *std_strings;
1418 :
1419 436 : PQclear(ExecuteSqlQueryForSingleRow(AH, ALWAYS_SECURE_SEARCH_PATH_SQL));
1420 :
1421 : /*
1422 : * Set the client encoding if requested.
1423 : */
1424 436 : if (dumpencoding)
1425 : {
1426 36 : if (PQsetClientEncoding(conn, dumpencoding) < 0)
1427 0 : pg_fatal("invalid client encoding \"%s\" specified",
1428 : dumpencoding);
1429 : }
1430 :
1431 : /*
1432 : * Get the active encoding and the standard_conforming_strings setting, so
1433 : * we know how to escape strings.
1434 : */
1435 436 : AH->encoding = PQclientEncoding(conn);
1436 436 : setFmtEncoding(AH->encoding);
1437 :
1438 436 : std_strings = PQparameterStatus(conn, "standard_conforming_strings");
1439 436 : AH->std_strings = (std_strings && strcmp(std_strings, "on") == 0);
1440 :
1441 : /*
1442 : * Set the role if requested. In a parallel dump worker, we'll be passed
1443 : * use_role == NULL, but AH->use_role is already set (if user specified it
1444 : * originally) and we should use that.
1445 : */
1446 436 : if (!use_role && AH->use_role)
1447 4 : use_role = AH->use_role;
1448 :
1449 : /* Set the role if requested */
1450 436 : if (use_role)
1451 : {
1452 10 : PQExpBuffer query = createPQExpBuffer();
1453 :
1454 10 : appendPQExpBuffer(query, "SET ROLE %s", fmtId(use_role));
1455 10 : ExecuteSqlStatement(AH, query->data);
1456 10 : destroyPQExpBuffer(query);
1457 :
1458 : /* save it for possible later use by parallel workers */
1459 10 : if (!AH->use_role)
1460 6 : AH->use_role = pg_strdup(use_role);
1461 : }
1462 :
1463 : /* Set the datestyle to ISO to ensure the dump's portability */
1464 436 : ExecuteSqlStatement(AH, "SET DATESTYLE = ISO");
1465 :
1466 : /* Likewise, avoid using sql_standard intervalstyle */
1467 436 : ExecuteSqlStatement(AH, "SET INTERVALSTYLE = POSTGRES");
1468 :
1469 : /*
1470 : * Use an explicitly specified extra_float_digits if it has been provided.
1471 : * Otherwise, set extra_float_digits so that we can dump float data
1472 : * exactly (given correctly implemented float I/O code, anyway).
1473 : */
1474 436 : if (have_extra_float_digits)
1475 : {
1476 0 : PQExpBuffer q = createPQExpBuffer();
1477 :
1478 0 : appendPQExpBuffer(q, "SET extra_float_digits TO %d",
1479 : extra_float_digits);
1480 0 : ExecuteSqlStatement(AH, q->data);
1481 0 : destroyPQExpBuffer(q);
1482 : }
1483 : else
1484 436 : ExecuteSqlStatement(AH, "SET extra_float_digits TO 3");
1485 :
1486 : /*
1487 : * Disable synchronized scanning, to prevent unpredictable changes in row
1488 : * ordering across a dump and reload.
1489 : */
1490 436 : ExecuteSqlStatement(AH, "SET synchronize_seqscans TO off");
1491 :
1492 : /*
1493 : * Disable timeouts if supported.
1494 : */
1495 436 : ExecuteSqlStatement(AH, "SET statement_timeout = 0");
1496 436 : if (AH->remoteVersion >= 90300)
1497 436 : ExecuteSqlStatement(AH, "SET lock_timeout = 0");
1498 436 : if (AH->remoteVersion >= 90600)
1499 436 : ExecuteSqlStatement(AH, "SET idle_in_transaction_session_timeout = 0");
1500 436 : if (AH->remoteVersion >= 170000)
1501 436 : ExecuteSqlStatement(AH, "SET transaction_timeout = 0");
1502 :
1503 : /*
1504 : * Quote all identifiers, if requested.
1505 : */
1506 436 : if (quote_all_identifiers)
1507 68 : ExecuteSqlStatement(AH, "SET quote_all_identifiers = true");
1508 :
1509 : /*
1510 : * Adjust row-security mode, if supported.
1511 : */
1512 436 : if (AH->remoteVersion >= 90500)
1513 : {
1514 436 : if (dopt->enable_row_security)
1515 0 : ExecuteSqlStatement(AH, "SET row_security = on");
1516 : else
1517 436 : ExecuteSqlStatement(AH, "SET row_security = off");
1518 : }
1519 :
1520 : /*
1521 : * For security reasons, we restrict the expansion of non-system views and
1522 : * access to foreign tables during the pg_dump process. This restriction
1523 : * is adjusted when dumping foreign table data.
1524 : */
1525 436 : set_restrict_relation_kind(AH, "view, foreign-table");
1526 :
1527 : /*
1528 : * Initialize prepared-query state to "nothing prepared". We do this here
1529 : * so that a parallel dump worker will have its own state.
1530 : */
1531 436 : AH->is_prepared = (bool *) pg_malloc0(NUM_PREP_QUERIES * sizeof(bool));
1532 :
1533 : /*
1534 : * Start transaction-snapshot mode transaction to dump consistent data.
1535 : */
1536 436 : ExecuteSqlStatement(AH, "BEGIN");
1537 :
1538 : /*
1539 : * To support the combination of serializable_deferrable with the jobs
1540 : * option we use REPEATABLE READ for the worker connections that are
1541 : * passed a snapshot. As long as the snapshot is acquired in a
1542 : * SERIALIZABLE, READ ONLY, DEFERRABLE transaction, its use within a
1543 : * REPEATABLE READ transaction provides the appropriate integrity
1544 : * guarantees. This is a kluge, but safe for back-patching.
1545 : */
1546 436 : if (dopt->serializable_deferrable && AH->sync_snapshot_id == NULL)
1547 0 : ExecuteSqlStatement(AH,
1548 : "SET TRANSACTION ISOLATION LEVEL "
1549 : "SERIALIZABLE, READ ONLY, DEFERRABLE");
1550 : else
1551 436 : ExecuteSqlStatement(AH,
1552 : "SET TRANSACTION ISOLATION LEVEL "
1553 : "REPEATABLE READ, READ ONLY");
1554 :
1555 : /*
1556 : * If user specified a snapshot to use, select that. In a parallel dump
1557 : * worker, we'll be passed dumpsnapshot == NULL, but AH->sync_snapshot_id
1558 : * is already set (if the server can handle it) and we should use that.
1559 : */
1560 436 : if (dumpsnapshot)
1561 0 : AH->sync_snapshot_id = pg_strdup(dumpsnapshot);
1562 :
1563 436 : if (AH->sync_snapshot_id)
1564 : {
1565 32 : PQExpBuffer query = createPQExpBuffer();
1566 :
1567 32 : appendPQExpBufferStr(query, "SET TRANSACTION SNAPSHOT ");
1568 32 : appendStringLiteralConn(query, AH->sync_snapshot_id, conn);
1569 32 : ExecuteSqlStatement(AH, query->data);
1570 32 : destroyPQExpBuffer(query);
1571 : }
1572 404 : else if (AH->numWorkers > 1)
1573 : {
1574 16 : if (AH->isStandby && AH->remoteVersion < 100000)
1575 0 : pg_fatal("parallel dumps from standby servers are not supported by this server version");
1576 16 : AH->sync_snapshot_id = get_synchronized_snapshot(AH);
1577 : }
1578 436 : }
1579 :
1580 : /* Set up connection for a parallel worker process */
1581 : static void
1582 32 : setupDumpWorker(Archive *AH)
1583 : {
1584 : /*
1585 : * We want to re-select all the same values the leader connection is
1586 : * using. We'll have inherited directly-usable values in
1587 : * AH->sync_snapshot_id and AH->use_role, but we need to translate the
1588 : * inherited encoding value back to a string to pass to setup_connection.
1589 : */
1590 32 : setup_connection(AH,
1591 : pg_encoding_to_char(AH->encoding),
1592 : NULL,
1593 : NULL);
1594 32 : }
1595 :
1596 : static char *
1597 16 : get_synchronized_snapshot(Archive *fout)
1598 : {
1599 16 : char *query = "SELECT pg_catalog.pg_export_snapshot()";
1600 : char *result;
1601 : PGresult *res;
1602 :
1603 16 : res = ExecuteSqlQueryForSingleRow(fout, query);
1604 16 : result = pg_strdup(PQgetvalue(res, 0, 0));
1605 16 : PQclear(res);
1606 :
1607 16 : return result;
1608 : }
1609 :
1610 : static ArchiveFormat
1611 422 : parseArchiveFormat(const char *format, ArchiveMode *mode)
1612 : {
1613 : ArchiveFormat archiveFormat;
1614 :
1615 422 : *mode = archModeWrite;
1616 :
1617 422 : if (pg_strcasecmp(format, "a") == 0 || pg_strcasecmp(format, "append") == 0)
1618 : {
1619 : /* This is used by pg_dumpall, and is not documented */
1620 86 : archiveFormat = archNull;
1621 86 : *mode = archModeAppend;
1622 : }
1623 336 : else if (pg_strcasecmp(format, "c") == 0)
1624 0 : archiveFormat = archCustom;
1625 336 : else if (pg_strcasecmp(format, "custom") == 0)
1626 86 : archiveFormat = archCustom;
1627 250 : else if (pg_strcasecmp(format, "d") == 0)
1628 0 : archiveFormat = archDirectory;
1629 250 : else if (pg_strcasecmp(format, "directory") == 0)
1630 20 : archiveFormat = archDirectory;
1631 230 : else if (pg_strcasecmp(format, "p") == 0)
1632 216 : archiveFormat = archNull;
1633 14 : else if (pg_strcasecmp(format, "plain") == 0)
1634 6 : archiveFormat = archNull;
1635 8 : else if (pg_strcasecmp(format, "t") == 0)
1636 0 : archiveFormat = archTar;
1637 8 : else if (pg_strcasecmp(format, "tar") == 0)
1638 6 : archiveFormat = archTar;
1639 : else
1640 2 : pg_fatal("invalid output format \"%s\" specified", format);
1641 420 : return archiveFormat;
1642 : }
1643 :
1644 : /*
1645 : * Find the OIDs of all schemas matching the given list of patterns,
1646 : * and append them to the given OID list.
1647 : */
1648 : static void
1649 426 : expand_schema_name_patterns(Archive *fout,
1650 : SimpleStringList *patterns,
1651 : SimpleOidList *oids,
1652 : bool strict_names)
1653 : {
1654 : PQExpBuffer query;
1655 : PGresult *res;
1656 : SimpleStringListCell *cell;
1657 : int i;
1658 :
1659 426 : if (patterns->head == NULL)
1660 384 : return; /* nothing to do */
1661 :
1662 42 : query = createPQExpBuffer();
1663 :
1664 : /*
1665 : * The loop below runs multiple SELECTs might sometimes result in
1666 : * duplicate entries in the OID list, but we don't care.
1667 : */
1668 :
1669 72 : for (cell = patterns->head; cell; cell = cell->next)
1670 : {
1671 : PQExpBufferData dbbuf;
1672 : int dotcnt;
1673 :
1674 42 : appendPQExpBufferStr(query,
1675 : "SELECT oid FROM pg_catalog.pg_namespace n\n");
1676 42 : initPQExpBuffer(&dbbuf);
1677 42 : processSQLNamePattern(GetConnection(fout), query, cell->val, false,
1678 : false, NULL, "n.nspname", NULL, NULL, &dbbuf,
1679 : &dotcnt);
1680 42 : if (dotcnt > 1)
1681 4 : pg_fatal("improper qualified name (too many dotted names): %s",
1682 : cell->val);
1683 38 : else if (dotcnt == 1)
1684 6 : prohibit_crossdb_refs(GetConnection(fout), dbbuf.data, cell->val);
1685 32 : termPQExpBuffer(&dbbuf);
1686 :
1687 32 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1688 32 : if (strict_names && PQntuples(res) == 0)
1689 2 : pg_fatal("no matching schemas were found for pattern \"%s\"", cell->val);
1690 :
1691 58 : for (i = 0; i < PQntuples(res); i++)
1692 : {
1693 28 : simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1694 : }
1695 :
1696 30 : PQclear(res);
1697 30 : resetPQExpBuffer(query);
1698 : }
1699 :
1700 30 : destroyPQExpBuffer(query);
1701 : }
1702 :
1703 : /*
1704 : * Find the OIDs of all extensions matching the given list of patterns,
1705 : * and append them to the given OID list.
1706 : */
1707 : static void
1708 382 : expand_extension_name_patterns(Archive *fout,
1709 : SimpleStringList *patterns,
1710 : SimpleOidList *oids,
1711 : bool strict_names)
1712 : {
1713 : PQExpBuffer query;
1714 : PGresult *res;
1715 : SimpleStringListCell *cell;
1716 : int i;
1717 :
1718 382 : if (patterns->head == NULL)
1719 368 : return; /* nothing to do */
1720 :
1721 14 : query = createPQExpBuffer();
1722 :
1723 : /*
1724 : * The loop below runs multiple SELECTs might sometimes result in
1725 : * duplicate entries in the OID list, but we don't care.
1726 : */
1727 28 : for (cell = patterns->head; cell; cell = cell->next)
1728 : {
1729 : int dotcnt;
1730 :
1731 14 : appendPQExpBufferStr(query,
1732 : "SELECT oid FROM pg_catalog.pg_extension e\n");
1733 14 : processSQLNamePattern(GetConnection(fout), query, cell->val, false,
1734 : false, NULL, "e.extname", NULL, NULL, NULL,
1735 : &dotcnt);
1736 14 : if (dotcnt > 0)
1737 0 : pg_fatal("improper qualified name (too many dotted names): %s",
1738 : cell->val);
1739 :
1740 14 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1741 14 : if (strict_names && PQntuples(res) == 0)
1742 0 : pg_fatal("no matching extensions were found for pattern \"%s\"", cell->val);
1743 :
1744 26 : for (i = 0; i < PQntuples(res); i++)
1745 : {
1746 12 : simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1747 : }
1748 :
1749 14 : PQclear(res);
1750 14 : resetPQExpBuffer(query);
1751 : }
1752 :
1753 14 : destroyPQExpBuffer(query);
1754 : }
1755 :
1756 : /*
1757 : * Find the OIDs of all foreign servers matching the given list of patterns,
1758 : * and append them to the given OID list.
1759 : */
1760 : static void
1761 376 : expand_foreign_server_name_patterns(Archive *fout,
1762 : SimpleStringList *patterns,
1763 : SimpleOidList *oids)
1764 : {
1765 : PQExpBuffer query;
1766 : PGresult *res;
1767 : SimpleStringListCell *cell;
1768 : int i;
1769 :
1770 376 : if (patterns->head == NULL)
1771 370 : return; /* nothing to do */
1772 :
1773 6 : query = createPQExpBuffer();
1774 :
1775 : /*
1776 : * The loop below runs multiple SELECTs might sometimes result in
1777 : * duplicate entries in the OID list, but we don't care.
1778 : */
1779 :
1780 10 : for (cell = patterns->head; cell; cell = cell->next)
1781 : {
1782 : int dotcnt;
1783 :
1784 6 : appendPQExpBufferStr(query,
1785 : "SELECT oid FROM pg_catalog.pg_foreign_server s\n");
1786 6 : processSQLNamePattern(GetConnection(fout), query, cell->val, false,
1787 : false, NULL, "s.srvname", NULL, NULL, NULL,
1788 : &dotcnt);
1789 6 : if (dotcnt > 0)
1790 0 : pg_fatal("improper qualified name (too many dotted names): %s",
1791 : cell->val);
1792 :
1793 6 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1794 6 : if (PQntuples(res) == 0)
1795 2 : pg_fatal("no matching foreign servers were found for pattern \"%s\"", cell->val);
1796 :
1797 8 : for (i = 0; i < PQntuples(res); i++)
1798 4 : simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1799 :
1800 4 : PQclear(res);
1801 4 : resetPQExpBuffer(query);
1802 : }
1803 :
1804 4 : destroyPQExpBuffer(query);
1805 : }
1806 :
1807 : /*
1808 : * Find the OIDs of all tables matching the given list of patterns,
1809 : * and append them to the given OID list. See also expand_dbname_patterns()
1810 : * in pg_dumpall.c
1811 : */
1812 : static void
1813 2274 : expand_table_name_patterns(Archive *fout,
1814 : SimpleStringList *patterns, SimpleOidList *oids,
1815 : bool strict_names, bool with_child_tables)
1816 : {
1817 : PQExpBuffer query;
1818 : PGresult *res;
1819 : SimpleStringListCell *cell;
1820 : int i;
1821 :
1822 2274 : if (patterns->head == NULL)
1823 2216 : return; /* nothing to do */
1824 :
1825 58 : query = createPQExpBuffer();
1826 :
1827 : /*
1828 : * this might sometimes result in duplicate entries in the OID list, but
1829 : * we don't care.
1830 : */
1831 :
1832 118 : for (cell = patterns->head; cell; cell = cell->next)
1833 : {
1834 : PQExpBufferData dbbuf;
1835 : int dotcnt;
1836 :
1837 : /*
1838 : * Query must remain ABSOLUTELY devoid of unqualified names. This
1839 : * would be unnecessary given a pg_table_is_visible() variant taking a
1840 : * search_path argument.
1841 : *
1842 : * For with_child_tables, we start with the basic query's results and
1843 : * recursively search the inheritance tree to add child tables.
1844 : */
1845 70 : if (with_child_tables)
1846 : {
1847 12 : appendPQExpBufferStr(query, "WITH RECURSIVE partition_tree (relid) AS (\n");
1848 : }
1849 :
1850 70 : appendPQExpBuffer(query,
1851 : "SELECT c.oid"
1852 : "\nFROM pg_catalog.pg_class c"
1853 : "\n LEFT JOIN pg_catalog.pg_namespace n"
1854 : "\n ON n.oid OPERATOR(pg_catalog.=) c.relnamespace"
1855 : "\nWHERE c.relkind OPERATOR(pg_catalog.=) ANY"
1856 : "\n (array['%c', '%c', '%c', '%c', '%c', '%c'])\n",
1857 : RELKIND_RELATION, RELKIND_SEQUENCE, RELKIND_VIEW,
1858 : RELKIND_MATVIEW, RELKIND_FOREIGN_TABLE,
1859 : RELKIND_PARTITIONED_TABLE);
1860 70 : initPQExpBuffer(&dbbuf);
1861 70 : processSQLNamePattern(GetConnection(fout), query, cell->val, true,
1862 : false, "n.nspname", "c.relname", NULL,
1863 : "pg_catalog.pg_table_is_visible(c.oid)", &dbbuf,
1864 : &dotcnt);
1865 70 : if (dotcnt > 2)
1866 2 : pg_fatal("improper relation name (too many dotted names): %s",
1867 : cell->val);
1868 68 : else if (dotcnt == 2)
1869 4 : prohibit_crossdb_refs(GetConnection(fout), dbbuf.data, cell->val);
1870 64 : termPQExpBuffer(&dbbuf);
1871 :
1872 64 : if (with_child_tables)
1873 : {
1874 12 : appendPQExpBufferStr(query, "UNION"
1875 : "\nSELECT i.inhrelid"
1876 : "\nFROM partition_tree p"
1877 : "\n JOIN pg_catalog.pg_inherits i"
1878 : "\n ON p.relid OPERATOR(pg_catalog.=) i.inhparent"
1879 : "\n)"
1880 : "\nSELECT relid FROM partition_tree");
1881 : }
1882 :
1883 64 : ExecuteSqlStatement(fout, "RESET search_path");
1884 64 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1885 64 : PQclear(ExecuteSqlQueryForSingleRow(fout,
1886 : ALWAYS_SECURE_SEARCH_PATH_SQL));
1887 64 : if (strict_names && PQntuples(res) == 0)
1888 4 : pg_fatal("no matching tables were found for pattern \"%s\"", cell->val);
1889 :
1890 148 : for (i = 0; i < PQntuples(res); i++)
1891 : {
1892 88 : simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1893 : }
1894 :
1895 60 : PQclear(res);
1896 60 : resetPQExpBuffer(query);
1897 : }
1898 :
1899 48 : destroyPQExpBuffer(query);
1900 : }
1901 :
1902 : /*
1903 : * Verifies that the connected database name matches the given database name,
1904 : * and if not, dies with an error about the given pattern.
1905 : *
1906 : * The 'dbname' argument should be a literal name parsed from 'pattern'.
1907 : */
1908 : static void
1909 10 : prohibit_crossdb_refs(PGconn *conn, const char *dbname, const char *pattern)
1910 : {
1911 : const char *db;
1912 :
1913 10 : db = PQdb(conn);
1914 10 : if (db == NULL)
1915 0 : pg_fatal("You are currently not connected to a database.");
1916 :
1917 10 : if (strcmp(db, dbname) != 0)
1918 10 : pg_fatal("cross-database references are not implemented: %s",
1919 : pattern);
1920 0 : }
1921 :
1922 : /*
1923 : * checkExtensionMembership
1924 : * Determine whether object is an extension member, and if so,
1925 : * record an appropriate dependency and set the object's dump flag.
1926 : *
1927 : * It's important to call this for each object that could be an extension
1928 : * member. Generally, we integrate this with determining the object's
1929 : * to-be-dumped-ness, since extension membership overrides other rules for that.
1930 : *
1931 : * Returns true if object is an extension member, else false.
1932 : */
1933 : static bool
1934 1170032 : checkExtensionMembership(DumpableObject *dobj, Archive *fout)
1935 : {
1936 1170032 : ExtensionInfo *ext = findOwningExtension(dobj->catId);
1937 :
1938 1170032 : if (ext == NULL)
1939 1168436 : return false;
1940 :
1941 1596 : dobj->ext_member = true;
1942 :
1943 : /* Record dependency so that getDependencies needn't deal with that */
1944 1596 : addObjectDependency(dobj, ext->dobj.dumpId);
1945 :
1946 : /*
1947 : * In 9.6 and above, mark the member object to have any non-initial ACLs
1948 : * dumped. (Any initial ACLs will be removed later, using data from
1949 : * pg_init_privs, so that we'll dump only the delta from the extension's
1950 : * initial setup.)
1951 : *
1952 : * Prior to 9.6, we do not include any extension member components.
1953 : *
1954 : * In binary upgrades, we still dump all components of the members
1955 : * individually, since the idea is to exactly reproduce the database
1956 : * contents rather than replace the extension contents with something
1957 : * different.
1958 : *
1959 : * Note: it might be interesting someday to implement storage and delta
1960 : * dumping of extension members' RLS policies and/or security labels.
1961 : * However there is a pitfall for RLS policies: trying to dump them
1962 : * requires getting a lock on their tables, and the calling user might not
1963 : * have privileges for that. We need no lock to examine a table's ACLs,
1964 : * so the current feature doesn't have a problem of that sort.
1965 : */
1966 1596 : if (fout->dopt->binary_upgrade)
1967 338 : dobj->dump = ext->dobj.dump;
1968 : else
1969 : {
1970 1258 : if (fout->remoteVersion < 90600)
1971 0 : dobj->dump = DUMP_COMPONENT_NONE;
1972 : else
1973 1258 : dobj->dump = ext->dobj.dump_contains & (DUMP_COMPONENT_ACL);
1974 : }
1975 :
1976 1596 : return true;
1977 : }
1978 :
1979 : /*
1980 : * selectDumpableNamespace: policy-setting subroutine
1981 : * Mark a namespace as to be dumped or not
1982 : */
1983 : static void
1984 2876 : selectDumpableNamespace(NamespaceInfo *nsinfo, Archive *fout)
1985 : {
1986 : /*
1987 : * DUMP_COMPONENT_DEFINITION typically implies a CREATE SCHEMA statement
1988 : * and (for --clean) a DROP SCHEMA statement. (In the absence of
1989 : * DUMP_COMPONENT_DEFINITION, this value is irrelevant.)
1990 : */
1991 2876 : nsinfo->create = true;
1992 :
1993 : /*
1994 : * If specific tables are being dumped, do not dump any complete
1995 : * namespaces. If specific namespaces are being dumped, dump just those
1996 : * namespaces. Otherwise, dump all non-system namespaces.
1997 : */
1998 2876 : if (table_include_oids.head != NULL)
1999 100 : nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
2000 2776 : else if (schema_include_oids.head != NULL)
2001 374 : nsinfo->dobj.dump_contains = nsinfo->dobj.dump =
2002 374 : simple_oid_list_member(&schema_include_oids,
2003 : nsinfo->dobj.catId.oid) ?
2004 374 : DUMP_COMPONENT_ALL : DUMP_COMPONENT_NONE;
2005 2402 : else if (fout->remoteVersion >= 90600 &&
2006 2402 : strcmp(nsinfo->dobj.name, "pg_catalog") == 0)
2007 : {
2008 : /*
2009 : * In 9.6 and above, we dump out any ACLs defined in pg_catalog, if
2010 : * they are interesting (and not the original ACLs which were set at
2011 : * initdb time, see pg_init_privs).
2012 : */
2013 328 : nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_ACL;
2014 : }
2015 2074 : else if (strncmp(nsinfo->dobj.name, "pg_", 3) == 0 ||
2016 1026 : strcmp(nsinfo->dobj.name, "information_schema") == 0)
2017 : {
2018 : /* Other system schemas don't get dumped */
2019 1376 : nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
2020 : }
2021 698 : else if (strcmp(nsinfo->dobj.name, "public") == 0)
2022 : {
2023 : /*
2024 : * The public schema is a strange beast that sits in a sort of
2025 : * no-mans-land between being a system object and a user object.
2026 : * CREATE SCHEMA would fail, so its DUMP_COMPONENT_DEFINITION is just
2027 : * a comment and an indication of ownership. If the owner is the
2028 : * default, omit that superfluous DUMP_COMPONENT_DEFINITION. Before
2029 : * v15, the default owner was BOOTSTRAP_SUPERUSERID.
2030 : */
2031 320 : nsinfo->create = false;
2032 320 : nsinfo->dobj.dump = DUMP_COMPONENT_ALL;
2033 320 : if (nsinfo->nspowner == ROLE_PG_DATABASE_OWNER)
2034 224 : nsinfo->dobj.dump &= ~DUMP_COMPONENT_DEFINITION;
2035 320 : nsinfo->dobj.dump_contains = DUMP_COMPONENT_ALL;
2036 :
2037 : /*
2038 : * Also, make like it has a comment even if it doesn't; this is so
2039 : * that we'll emit a command to drop the comment, if appropriate.
2040 : * (Without this, we'd not call dumpCommentExtended for it.)
2041 : */
2042 320 : nsinfo->dobj.components |= DUMP_COMPONENT_COMMENT;
2043 : }
2044 : else
2045 378 : nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_ALL;
2046 :
2047 : /*
2048 : * In any case, a namespace can be excluded by an exclusion switch
2049 : */
2050 3924 : if (nsinfo->dobj.dump_contains &&
2051 1048 : simple_oid_list_member(&schema_exclude_oids,
2052 : nsinfo->dobj.catId.oid))
2053 6 : nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
2054 :
2055 : /*
2056 : * If the schema belongs to an extension, allow extension membership to
2057 : * override the dump decision for the schema itself. However, this does
2058 : * not change dump_contains, so this won't change what we do with objects
2059 : * within the schema. (If they belong to the extension, they'll get
2060 : * suppressed by it, otherwise not.)
2061 : */
2062 2876 : (void) checkExtensionMembership(&nsinfo->dobj, fout);
2063 2876 : }
2064 :
2065 : /*
2066 : * selectDumpableTable: policy-setting subroutine
2067 : * Mark a table as to be dumped or not
2068 : */
2069 : static void
2070 98624 : selectDumpableTable(TableInfo *tbinfo, Archive *fout)
2071 : {
2072 98624 : if (checkExtensionMembership(&tbinfo->dobj, fout))
2073 450 : return; /* extension membership overrides all else */
2074 :
2075 : /*
2076 : * If specific tables are being dumped, dump just those tables; else, dump
2077 : * according to the parent namespace's dump flag.
2078 : */
2079 98174 : if (table_include_oids.head != NULL)
2080 10400 : tbinfo->dobj.dump = simple_oid_list_member(&table_include_oids,
2081 : tbinfo->dobj.catId.oid) ?
2082 5200 : DUMP_COMPONENT_ALL : DUMP_COMPONENT_NONE;
2083 : else
2084 92974 : tbinfo->dobj.dump = tbinfo->dobj.namespace->dobj.dump_contains;
2085 :
2086 : /*
2087 : * In any case, a table can be excluded by an exclusion switch
2088 : */
2089 160858 : if (tbinfo->dobj.dump &&
2090 62684 : simple_oid_list_member(&table_exclude_oids,
2091 : tbinfo->dobj.catId.oid))
2092 24 : tbinfo->dobj.dump = DUMP_COMPONENT_NONE;
2093 : }
2094 :
2095 : /*
2096 : * selectDumpableType: policy-setting subroutine
2097 : * Mark a type as to be dumped or not
2098 : *
2099 : * If it's a table's rowtype or an autogenerated array type, we also apply a
2100 : * special type code to facilitate sorting into the desired order. (We don't
2101 : * want to consider those to be ordinary types because that would bring tables
2102 : * up into the datatype part of the dump order.) We still set the object's
2103 : * dump flag; that's not going to cause the dummy type to be dumped, but we
2104 : * need it so that casts involving such types will be dumped correctly -- see
2105 : * dumpCast. This means the flag should be set the same as for the underlying
2106 : * object (the table or base type).
2107 : */
2108 : static void
2109 270250 : selectDumpableType(TypeInfo *tyinfo, Archive *fout)
2110 : {
2111 : /* skip complex types, except for standalone composite types */
2112 270250 : if (OidIsValid(tyinfo->typrelid) &&
2113 97144 : tyinfo->typrelkind != RELKIND_COMPOSITE_TYPE)
2114 : {
2115 96768 : TableInfo *tytable = findTableByOid(tyinfo->typrelid);
2116 :
2117 96768 : tyinfo->dobj.objType = DO_DUMMY_TYPE;
2118 96768 : if (tytable != NULL)
2119 96768 : tyinfo->dobj.dump = tytable->dobj.dump;
2120 : else
2121 0 : tyinfo->dobj.dump = DUMP_COMPONENT_NONE;
2122 96768 : return;
2123 : }
2124 :
2125 : /* skip auto-generated array and multirange types */
2126 173482 : if (tyinfo->isArray || tyinfo->isMultirange)
2127 : {
2128 132210 : tyinfo->dobj.objType = DO_DUMMY_TYPE;
2129 :
2130 : /*
2131 : * Fall through to set the dump flag; we assume that the subsequent
2132 : * rules will do the same thing as they would for the array's base
2133 : * type or multirange's range type. (We cannot reliably look up the
2134 : * base type here, since getTypes may not have processed it yet.)
2135 : */
2136 : }
2137 :
2138 173482 : if (checkExtensionMembership(&tyinfo->dobj, fout))
2139 300 : return; /* extension membership overrides all else */
2140 :
2141 : /* Dump based on if the contents of the namespace are being dumped */
2142 173182 : tyinfo->dobj.dump = tyinfo->dobj.namespace->dobj.dump_contains;
2143 : }
2144 :
2145 : /*
2146 : * selectDumpableDefaultACL: policy-setting subroutine
2147 : * Mark a default ACL as to be dumped or not
2148 : *
2149 : * For per-schema default ACLs, dump if the schema is to be dumped.
2150 : * Otherwise dump if we are dumping "everything". Note that dumpSchema
2151 : * and aclsSkip are checked separately.
2152 : */
2153 : static void
2154 436 : selectDumpableDefaultACL(DefaultACLInfo *dinfo, DumpOptions *dopt)
2155 : {
2156 : /* Default ACLs can't be extension members */
2157 :
2158 436 : if (dinfo->dobj.namespace)
2159 : /* default ACLs are considered part of the namespace */
2160 204 : dinfo->dobj.dump = dinfo->dobj.namespace->dobj.dump_contains;
2161 : else
2162 232 : dinfo->dobj.dump = dopt->include_everything ?
2163 232 : DUMP_COMPONENT_ALL : DUMP_COMPONENT_NONE;
2164 436 : }
2165 :
2166 : /*
2167 : * selectDumpableCast: policy-setting subroutine
2168 : * Mark a cast as to be dumped or not
2169 : *
2170 : * Casts do not belong to any particular namespace (since they haven't got
2171 : * names), nor do they have identifiable owners. To distinguish user-defined
2172 : * casts from built-in ones, we must resort to checking whether the cast's
2173 : * OID is in the range reserved for initdb.
2174 : */
2175 : static void
2176 87506 : selectDumpableCast(CastInfo *cast, Archive *fout)
2177 : {
2178 87506 : if (checkExtensionMembership(&cast->dobj, fout))
2179 0 : return; /* extension membership overrides all else */
2180 :
2181 : /*
2182 : * This would be DUMP_COMPONENT_ACL for from-initdb casts, but they do not
2183 : * support ACLs currently.
2184 : */
2185 87506 : if (cast->dobj.catId.oid <= (Oid) g_last_builtin_oid)
2186 87320 : cast->dobj.dump = DUMP_COMPONENT_NONE;
2187 : else
2188 186 : cast->dobj.dump = fout->dopt->include_everything ?
2189 186 : DUMP_COMPONENT_ALL : DUMP_COMPONENT_NONE;
2190 : }
2191 :
2192 : /*
2193 : * selectDumpableProcLang: policy-setting subroutine
2194 : * Mark a procedural language as to be dumped or not
2195 : *
2196 : * Procedural languages do not belong to any particular namespace. To
2197 : * identify built-in languages, we must resort to checking whether the
2198 : * language's OID is in the range reserved for initdb.
2199 : */
2200 : static void
2201 472 : selectDumpableProcLang(ProcLangInfo *plang, Archive *fout)
2202 : {
2203 472 : if (checkExtensionMembership(&plang->dobj, fout))
2204 370 : return; /* extension membership overrides all else */
2205 :
2206 : /*
2207 : * Only include procedural languages when we are dumping everything.
2208 : *
2209 : * For from-initdb procedural languages, only include ACLs, as we do for
2210 : * the pg_catalog namespace. We need this because procedural languages do
2211 : * not live in any namespace.
2212 : */
2213 102 : if (!fout->dopt->include_everything)
2214 16 : plang->dobj.dump = DUMP_COMPONENT_NONE;
2215 : else
2216 : {
2217 86 : if (plang->dobj.catId.oid <= (Oid) g_last_builtin_oid)
2218 0 : plang->dobj.dump = fout->remoteVersion < 90600 ?
2219 0 : DUMP_COMPONENT_NONE : DUMP_COMPONENT_ACL;
2220 : else
2221 86 : plang->dobj.dump = DUMP_COMPONENT_ALL;
2222 : }
2223 : }
2224 :
2225 : /*
2226 : * selectDumpableAccessMethod: policy-setting subroutine
2227 : * Mark an access method as to be dumped or not
2228 : *
2229 : * Access methods do not belong to any particular namespace. To identify
2230 : * built-in access methods, we must resort to checking whether the
2231 : * method's OID is in the range reserved for initdb.
2232 : */
2233 : static void
2234 2858 : selectDumpableAccessMethod(AccessMethodInfo *method, Archive *fout)
2235 : {
2236 : /* see getAccessMethods() comment about v9.6. */
2237 2858 : if (fout->remoteVersion < 90600)
2238 : {
2239 0 : method->dobj.dump = DUMP_COMPONENT_NONE;
2240 0 : return;
2241 : }
2242 :
2243 2858 : if (checkExtensionMembership(&method->dobj, fout))
2244 50 : return; /* extension membership overrides all else */
2245 :
2246 : /*
2247 : * This would be DUMP_COMPONENT_ACL for from-initdb access methods, but
2248 : * they do not support ACLs currently.
2249 : */
2250 2808 : if (method->dobj.catId.oid <= (Oid) g_last_builtin_oid)
2251 2590 : method->dobj.dump = DUMP_COMPONENT_NONE;
2252 : else
2253 218 : method->dobj.dump = fout->dopt->include_everything ?
2254 218 : DUMP_COMPONENT_ALL : DUMP_COMPONENT_NONE;
2255 : }
2256 :
2257 : /*
2258 : * selectDumpableExtension: policy-setting subroutine
2259 : * Mark an extension as to be dumped or not
2260 : *
2261 : * Built-in extensions should be skipped except for checking ACLs, since we
2262 : * assume those will already be installed in the target database. We identify
2263 : * such extensions by their having OIDs in the range reserved for initdb.
2264 : * We dump all user-added extensions by default. No extensions are dumped
2265 : * if include_everything is false (i.e., a --schema or --table switch was
2266 : * given), except if --extension specifies a list of extensions to dump.
2267 : */
2268 : static void
2269 432 : selectDumpableExtension(ExtensionInfo *extinfo, DumpOptions *dopt)
2270 : {
2271 : /*
2272 : * Use DUMP_COMPONENT_ACL for built-in extensions, to allow users to
2273 : * change permissions on their member objects, if they wish to, and have
2274 : * those changes preserved.
2275 : */
2276 432 : if (extinfo->dobj.catId.oid <= (Oid) g_last_builtin_oid)
2277 372 : extinfo->dobj.dump = extinfo->dobj.dump_contains = DUMP_COMPONENT_ACL;
2278 : else
2279 : {
2280 : /* check if there is a list of extensions to dump */
2281 60 : if (extension_include_oids.head != NULL)
2282 8 : extinfo->dobj.dump = extinfo->dobj.dump_contains =
2283 8 : simple_oid_list_member(&extension_include_oids,
2284 : extinfo->dobj.catId.oid) ?
2285 8 : DUMP_COMPONENT_ALL : DUMP_COMPONENT_NONE;
2286 : else
2287 52 : extinfo->dobj.dump = extinfo->dobj.dump_contains =
2288 52 : dopt->include_everything ?
2289 52 : DUMP_COMPONENT_ALL : DUMP_COMPONENT_NONE;
2290 :
2291 : /* check that the extension is not explicitly excluded */
2292 112 : if (extinfo->dobj.dump &&
2293 52 : simple_oid_list_member(&extension_exclude_oids,
2294 : extinfo->dobj.catId.oid))
2295 4 : extinfo->dobj.dump = extinfo->dobj.dump_contains = DUMP_COMPONENT_NONE;
2296 : }
2297 432 : }
2298 :
2299 : /*
2300 : * selectDumpablePublicationObject: policy-setting subroutine
2301 : * Mark a publication object as to be dumped or not
2302 : *
2303 : * A publication can have schemas and tables which have schemas, but those are
2304 : * ignored in decision making, because publications are only dumped when we are
2305 : * dumping everything.
2306 : */
2307 : static void
2308 1058 : selectDumpablePublicationObject(DumpableObject *dobj, Archive *fout)
2309 : {
2310 1058 : if (checkExtensionMembership(dobj, fout))
2311 0 : return; /* extension membership overrides all else */
2312 :
2313 1058 : dobj->dump = fout->dopt->include_everything ?
2314 1058 : DUMP_COMPONENT_ALL : DUMP_COMPONENT_NONE;
2315 : }
2316 :
2317 : /*
2318 : * selectDumpableStatisticsObject: policy-setting subroutine
2319 : * Mark an extended statistics object as to be dumped or not
2320 : *
2321 : * We dump an extended statistics object if the schema it's in and the table
2322 : * it's for are being dumped. (This'll need more thought if statistics
2323 : * objects ever support cross-table stats.)
2324 : */
2325 : static void
2326 362 : selectDumpableStatisticsObject(StatsExtInfo *sobj, Archive *fout)
2327 : {
2328 362 : if (checkExtensionMembership(&sobj->dobj, fout))
2329 0 : return; /* extension membership overrides all else */
2330 :
2331 362 : sobj->dobj.dump = sobj->dobj.namespace->dobj.dump_contains;
2332 362 : if (sobj->stattable == NULL ||
2333 362 : !(sobj->stattable->dobj.dump & DUMP_COMPONENT_DEFINITION))
2334 56 : sobj->dobj.dump = DUMP_COMPONENT_NONE;
2335 : }
2336 :
2337 : /*
2338 : * selectDumpableObject: policy-setting subroutine
2339 : * Mark a generic dumpable object as to be dumped or not
2340 : *
2341 : * Use this only for object types without a special-case routine above.
2342 : */
2343 : static void
2344 802794 : selectDumpableObject(DumpableObject *dobj, Archive *fout)
2345 : {
2346 802794 : if (checkExtensionMembership(dobj, fout))
2347 376 : return; /* extension membership overrides all else */
2348 :
2349 : /*
2350 : * Default policy is to dump if parent namespace is dumpable, or for
2351 : * non-namespace-associated items, dump if we're dumping "everything".
2352 : */
2353 802418 : if (dobj->namespace)
2354 800788 : dobj->dump = dobj->namespace->dobj.dump_contains;
2355 : else
2356 1630 : dobj->dump = fout->dopt->include_everything ?
2357 1630 : DUMP_COMPONENT_ALL : DUMP_COMPONENT_NONE;
2358 : }
2359 :
2360 : /*
2361 : * Dump a table's contents for loading using the COPY command
2362 : * - this routine is called by the Archiver when it wants the table
2363 : * to be dumped.
2364 : */
2365 : static int
2366 8654 : dumpTableData_copy(Archive *fout, const void *dcontext)
2367 : {
2368 8654 : TableDataInfo *tdinfo = (TableDataInfo *) dcontext;
2369 8654 : TableInfo *tbinfo = tdinfo->tdtable;
2370 8654 : const char *classname = tbinfo->dobj.name;
2371 8654 : PQExpBuffer q = createPQExpBuffer();
2372 :
2373 : /*
2374 : * Note: can't use getThreadLocalPQExpBuffer() here, we're calling fmtId
2375 : * which uses it already.
2376 : */
2377 8654 : PQExpBuffer clistBuf = createPQExpBuffer();
2378 8654 : PGconn *conn = GetConnection(fout);
2379 : PGresult *res;
2380 : int ret;
2381 : char *copybuf;
2382 : const char *column_list;
2383 :
2384 8654 : pg_log_info("dumping contents of table \"%s.%s\"",
2385 : tbinfo->dobj.namespace->dobj.name, classname);
2386 :
2387 : /*
2388 : * Specify the column list explicitly so that we have no possibility of
2389 : * retrieving data in the wrong column order. (The default column
2390 : * ordering of COPY will not be what we want in certain corner cases
2391 : * involving ADD COLUMN and inheritance.)
2392 : */
2393 8654 : column_list = fmtCopyColumnList(tbinfo, clistBuf);
2394 :
2395 : /*
2396 : * Use COPY (SELECT ...) TO when dumping a foreign table's data, and when
2397 : * a filter condition was specified. For other cases a simple COPY
2398 : * suffices.
2399 : */
2400 8654 : if (tdinfo->filtercond || tbinfo->relkind == RELKIND_FOREIGN_TABLE)
2401 : {
2402 : /* Temporary allows to access to foreign tables to dump data */
2403 146 : if (tbinfo->relkind == RELKIND_FOREIGN_TABLE)
2404 2 : set_restrict_relation_kind(fout, "view");
2405 :
2406 146 : appendPQExpBufferStr(q, "COPY (SELECT ");
2407 : /* klugery to get rid of parens in column list */
2408 146 : if (strlen(column_list) > 2)
2409 : {
2410 146 : appendPQExpBufferStr(q, column_list + 1);
2411 146 : q->data[q->len - 1] = ' ';
2412 : }
2413 : else
2414 0 : appendPQExpBufferStr(q, "* ");
2415 :
2416 292 : appendPQExpBuffer(q, "FROM %s %s) TO stdout;",
2417 146 : fmtQualifiedDumpable(tbinfo),
2418 146 : tdinfo->filtercond ? tdinfo->filtercond : "");
2419 : }
2420 : else
2421 : {
2422 8508 : appendPQExpBuffer(q, "COPY %s %s TO stdout;",
2423 8508 : fmtQualifiedDumpable(tbinfo),
2424 : column_list);
2425 : }
2426 8654 : res = ExecuteSqlQuery(fout, q->data, PGRES_COPY_OUT);
2427 8652 : PQclear(res);
2428 8652 : destroyPQExpBuffer(clistBuf);
2429 :
2430 : for (;;)
2431 : {
2432 3622420 : ret = PQgetCopyData(conn, ©buf, 0);
2433 :
2434 3622420 : if (ret < 0)
2435 8652 : break; /* done or error */
2436 :
2437 3613768 : if (copybuf)
2438 : {
2439 3613768 : WriteData(fout, copybuf, ret);
2440 3613768 : PQfreemem(copybuf);
2441 : }
2442 :
2443 : /* ----------
2444 : * THROTTLE:
2445 : *
2446 : * There was considerable discussion in late July, 2000 regarding
2447 : * slowing down pg_dump when backing up large tables. Users with both
2448 : * slow & fast (multi-processor) machines experienced performance
2449 : * degradation when doing a backup.
2450 : *
2451 : * Initial attempts based on sleeping for a number of ms for each ms
2452 : * of work were deemed too complex, then a simple 'sleep in each loop'
2453 : * implementation was suggested. The latter failed because the loop
2454 : * was too tight. Finally, the following was implemented:
2455 : *
2456 : * If throttle is non-zero, then
2457 : * See how long since the last sleep.
2458 : * Work out how long to sleep (based on ratio).
2459 : * If sleep is more than 100ms, then
2460 : * sleep
2461 : * reset timer
2462 : * EndIf
2463 : * EndIf
2464 : *
2465 : * where the throttle value was the number of ms to sleep per ms of
2466 : * work. The calculation was done in each loop.
2467 : *
2468 : * Most of the hard work is done in the backend, and this solution
2469 : * still did not work particularly well: on slow machines, the ratio
2470 : * was 50:1, and on medium paced machines, 1:1, and on fast
2471 : * multi-processor machines, it had little or no effect, for reasons
2472 : * that were unclear.
2473 : *
2474 : * Further discussion ensued, and the proposal was dropped.
2475 : *
2476 : * For those people who want this feature, it can be implemented using
2477 : * gettimeofday in each loop, calculating the time since last sleep,
2478 : * multiplying that by the sleep ratio, then if the result is more
2479 : * than a preset 'minimum sleep time' (say 100ms), call the 'select'
2480 : * function to sleep for a subsecond period ie.
2481 : *
2482 : * select(0, NULL, NULL, NULL, &tvi);
2483 : *
2484 : * This will return after the interval specified in the structure tvi.
2485 : * Finally, call gettimeofday again to save the 'last sleep time'.
2486 : * ----------
2487 : */
2488 : }
2489 8652 : archprintf(fout, "\\.\n\n\n");
2490 :
2491 8652 : if (ret == -2)
2492 : {
2493 : /* copy data transfer failed */
2494 0 : pg_log_error("Dumping the contents of table \"%s\" failed: PQgetCopyData() failed.", classname);
2495 0 : pg_log_error_detail("Error message from server: %s", PQerrorMessage(conn));
2496 0 : pg_log_error_detail("Command was: %s", q->data);
2497 0 : exit_nicely(1);
2498 : }
2499 :
2500 : /* Check command status and return to normal libpq state */
2501 8652 : res = PQgetResult(conn);
2502 8652 : if (PQresultStatus(res) != PGRES_COMMAND_OK)
2503 : {
2504 0 : pg_log_error("Dumping the contents of table \"%s\" failed: PQgetResult() failed.", classname);
2505 0 : pg_log_error_detail("Error message from server: %s", PQerrorMessage(conn));
2506 0 : pg_log_error_detail("Command was: %s", q->data);
2507 0 : exit_nicely(1);
2508 : }
2509 8652 : PQclear(res);
2510 :
2511 : /* Do this to ensure we've pumped libpq back to idle state */
2512 8652 : if (PQgetResult(conn) != NULL)
2513 0 : pg_log_warning("unexpected extra results during COPY of table \"%s\"",
2514 : classname);
2515 :
2516 8652 : destroyPQExpBuffer(q);
2517 :
2518 : /* Revert back the setting */
2519 8652 : if (tbinfo->relkind == RELKIND_FOREIGN_TABLE)
2520 0 : set_restrict_relation_kind(fout, "view, foreign-table");
2521 :
2522 8652 : return 1;
2523 : }
2524 :
2525 : /*
2526 : * Dump table data using INSERT commands.
2527 : *
2528 : * Caution: when we restore from an archive file direct to database, the
2529 : * INSERT commands emitted by this function have to be parsed by
2530 : * pg_backup_db.c's ExecuteSimpleCommands(), which will not handle comments,
2531 : * E'' strings, or dollar-quoted strings. So don't emit anything like that.
2532 : */
2533 : static int
2534 170 : dumpTableData_insert(Archive *fout, const void *dcontext)
2535 : {
2536 170 : TableDataInfo *tdinfo = (TableDataInfo *) dcontext;
2537 170 : TableInfo *tbinfo = tdinfo->tdtable;
2538 170 : DumpOptions *dopt = fout->dopt;
2539 170 : PQExpBuffer q = createPQExpBuffer();
2540 170 : PQExpBuffer insertStmt = NULL;
2541 : char *attgenerated;
2542 : PGresult *res;
2543 : int nfields,
2544 : i;
2545 170 : int rows_per_statement = dopt->dump_inserts;
2546 170 : int rows_this_statement = 0;
2547 :
2548 : /* Temporary allows to access to foreign tables to dump data */
2549 170 : if (tbinfo->relkind == RELKIND_FOREIGN_TABLE)
2550 0 : set_restrict_relation_kind(fout, "view");
2551 :
2552 : /*
2553 : * If we're going to emit INSERTs with column names, the most efficient
2554 : * way to deal with generated columns is to exclude them entirely. For
2555 : * INSERTs without column names, we have to emit DEFAULT rather than the
2556 : * actual column value --- but we can save a few cycles by fetching nulls
2557 : * rather than the uninteresting-to-us value.
2558 : */
2559 170 : attgenerated = (char *) pg_malloc(tbinfo->numatts * sizeof(char));
2560 170 : appendPQExpBufferStr(q, "DECLARE _pg_dump_cursor CURSOR FOR SELECT ");
2561 170 : nfields = 0;
2562 522 : for (i = 0; i < tbinfo->numatts; i++)
2563 : {
2564 352 : if (tbinfo->attisdropped[i])
2565 4 : continue;
2566 348 : if (tbinfo->attgenerated[i] && dopt->column_inserts)
2567 16 : continue;
2568 332 : if (nfields > 0)
2569 176 : appendPQExpBufferStr(q, ", ");
2570 332 : if (tbinfo->attgenerated[i])
2571 16 : appendPQExpBufferStr(q, "NULL");
2572 : else
2573 316 : appendPQExpBufferStr(q, fmtId(tbinfo->attnames[i]));
2574 332 : attgenerated[nfields] = tbinfo->attgenerated[i];
2575 332 : nfields++;
2576 : }
2577 : /* Servers before 9.4 will complain about zero-column SELECT */
2578 170 : if (nfields == 0)
2579 14 : appendPQExpBufferStr(q, "NULL");
2580 170 : appendPQExpBuffer(q, " FROM ONLY %s",
2581 170 : fmtQualifiedDumpable(tbinfo));
2582 170 : if (tdinfo->filtercond)
2583 0 : appendPQExpBuffer(q, " %s", tdinfo->filtercond);
2584 :
2585 170 : ExecuteSqlStatement(fout, q->data);
2586 :
2587 : while (1)
2588 : {
2589 274 : res = ExecuteSqlQuery(fout, "FETCH 100 FROM _pg_dump_cursor",
2590 : PGRES_TUPLES_OK);
2591 :
2592 : /* cross-check field count, allowing for dummy NULL if any */
2593 274 : if (nfields != PQnfields(res) &&
2594 20 : !(nfields == 0 && PQnfields(res) == 1))
2595 0 : pg_fatal("wrong number of fields retrieved from table \"%s\"",
2596 : tbinfo->dobj.name);
2597 :
2598 : /*
2599 : * First time through, we build as much of the INSERT statement as
2600 : * possible in "insertStmt", which we can then just print for each
2601 : * statement. If the table happens to have zero dumpable columns then
2602 : * this will be a complete statement, otherwise it will end in
2603 : * "VALUES" and be ready to have the row's column values printed.
2604 : */
2605 274 : if (insertStmt == NULL)
2606 : {
2607 : TableInfo *targettab;
2608 :
2609 170 : insertStmt = createPQExpBuffer();
2610 :
2611 : /*
2612 : * When load-via-partition-root is set or forced, get the root
2613 : * table name for the partition table, so that we can reload data
2614 : * through the root table.
2615 : */
2616 170 : if (tbinfo->ispartition &&
2617 96 : (dopt->load_via_partition_root ||
2618 48 : forcePartitionRootLoad(tbinfo)))
2619 14 : targettab = getRootTableInfo(tbinfo);
2620 : else
2621 156 : targettab = tbinfo;
2622 :
2623 170 : appendPQExpBuffer(insertStmt, "INSERT INTO %s ",
2624 170 : fmtQualifiedDumpable(targettab));
2625 :
2626 : /* corner case for zero-column table */
2627 170 : if (nfields == 0)
2628 : {
2629 14 : appendPQExpBufferStr(insertStmt, "DEFAULT VALUES;\n");
2630 : }
2631 : else
2632 : {
2633 : /* append the list of column names if required */
2634 156 : if (dopt->column_inserts)
2635 : {
2636 70 : appendPQExpBufferChar(insertStmt, '(');
2637 210 : for (int field = 0; field < nfields; field++)
2638 : {
2639 140 : if (field > 0)
2640 70 : appendPQExpBufferStr(insertStmt, ", ");
2641 140 : appendPQExpBufferStr(insertStmt,
2642 140 : fmtId(PQfname(res, field)));
2643 : }
2644 70 : appendPQExpBufferStr(insertStmt, ") ");
2645 : }
2646 :
2647 156 : if (tbinfo->needs_override)
2648 4 : appendPQExpBufferStr(insertStmt, "OVERRIDING SYSTEM VALUE ");
2649 :
2650 156 : appendPQExpBufferStr(insertStmt, "VALUES");
2651 : }
2652 : }
2653 :
2654 6816 : for (int tuple = 0; tuple < PQntuples(res); tuple++)
2655 : {
2656 : /* Write the INSERT if not in the middle of a multi-row INSERT. */
2657 6542 : if (rows_this_statement == 0)
2658 6530 : archputs(insertStmt->data, fout);
2659 :
2660 : /*
2661 : * If it is zero-column table then we've already written the
2662 : * complete statement, which will mean we've disobeyed
2663 : * --rows-per-insert when it's set greater than 1. We do support
2664 : * a way to make this multi-row with: SELECT UNION ALL SELECT
2665 : * UNION ALL ... but that's non-standard so we should avoid it
2666 : * given that using INSERTs is mostly only ever needed for
2667 : * cross-database exports.
2668 : */
2669 6542 : if (nfields == 0)
2670 12 : continue;
2671 :
2672 : /* Emit a row heading */
2673 6530 : if (rows_per_statement == 1)
2674 6512 : archputs(" (", fout);
2675 18 : else if (rows_this_statement > 0)
2676 12 : archputs(",\n\t(", fout);
2677 : else
2678 6 : archputs("\n\t(", fout);
2679 :
2680 19698 : for (int field = 0; field < nfields; field++)
2681 : {
2682 13168 : if (field > 0)
2683 6638 : archputs(", ", fout);
2684 13168 : if (attgenerated[field])
2685 : {
2686 4 : archputs("DEFAULT", fout);
2687 4 : continue;
2688 : }
2689 13164 : if (PQgetisnull(res, tuple, field))
2690 : {
2691 166 : archputs("NULL", fout);
2692 166 : continue;
2693 : }
2694 :
2695 : /* XXX This code is partially duplicated in ruleutils.c */
2696 12998 : switch (PQftype(res, field))
2697 : {
2698 8938 : case INT2OID:
2699 : case INT4OID:
2700 : case INT8OID:
2701 : case OIDOID:
2702 : case FLOAT4OID:
2703 : case FLOAT8OID:
2704 : case NUMERICOID:
2705 : {
2706 : /*
2707 : * These types are printed without quotes unless
2708 : * they contain values that aren't accepted by the
2709 : * scanner unquoted (e.g., 'NaN'). Note that
2710 : * strtod() and friends might accept NaN, so we
2711 : * can't use that to test.
2712 : *
2713 : * In reality we only need to defend against
2714 : * infinity and NaN, so we need not get too crazy
2715 : * about pattern matching here.
2716 : */
2717 8938 : const char *s = PQgetvalue(res, tuple, field);
2718 :
2719 8938 : if (strspn(s, "0123456789 +-eE.") == strlen(s))
2720 8934 : archputs(s, fout);
2721 : else
2722 4 : archprintf(fout, "'%s'", s);
2723 : }
2724 8938 : break;
2725 :
2726 4 : case BITOID:
2727 : case VARBITOID:
2728 4 : archprintf(fout, "B'%s'",
2729 : PQgetvalue(res, tuple, field));
2730 4 : break;
2731 :
2732 8 : case BOOLOID:
2733 8 : if (strcmp(PQgetvalue(res, tuple, field), "t") == 0)
2734 4 : archputs("true", fout);
2735 : else
2736 4 : archputs("false", fout);
2737 8 : break;
2738 :
2739 4048 : default:
2740 : /* All other types are printed as string literals. */
2741 4048 : resetPQExpBuffer(q);
2742 4048 : appendStringLiteralAH(q,
2743 : PQgetvalue(res, tuple, field),
2744 : fout);
2745 4048 : archputs(q->data, fout);
2746 4048 : break;
2747 : }
2748 : }
2749 :
2750 : /* Terminate the row ... */
2751 6530 : archputs(")", fout);
2752 :
2753 : /* ... and the statement, if the target no. of rows is reached */
2754 6530 : if (++rows_this_statement >= rows_per_statement)
2755 : {
2756 6516 : if (dopt->do_nothing)
2757 0 : archputs(" ON CONFLICT DO NOTHING;\n", fout);
2758 : else
2759 6516 : archputs(";\n", fout);
2760 : /* Reset the row counter */
2761 6516 : rows_this_statement = 0;
2762 : }
2763 : }
2764 :
2765 274 : if (PQntuples(res) <= 0)
2766 : {
2767 170 : PQclear(res);
2768 170 : break;
2769 : }
2770 104 : PQclear(res);
2771 : }
2772 :
2773 : /* Terminate any statements that didn't make the row count. */
2774 170 : if (rows_this_statement > 0)
2775 : {
2776 2 : if (dopt->do_nothing)
2777 0 : archputs(" ON CONFLICT DO NOTHING;\n", fout);
2778 : else
2779 2 : archputs(";\n", fout);
2780 : }
2781 :
2782 170 : archputs("\n\n", fout);
2783 :
2784 170 : ExecuteSqlStatement(fout, "CLOSE _pg_dump_cursor");
2785 :
2786 170 : destroyPQExpBuffer(q);
2787 170 : if (insertStmt != NULL)
2788 170 : destroyPQExpBuffer(insertStmt);
2789 170 : free(attgenerated);
2790 :
2791 : /* Revert back the setting */
2792 170 : if (tbinfo->relkind == RELKIND_FOREIGN_TABLE)
2793 0 : set_restrict_relation_kind(fout, "view, foreign-table");
2794 :
2795 170 : return 1;
2796 : }
2797 :
2798 : /*
2799 : * getRootTableInfo:
2800 : * get the root TableInfo for the given partition table.
2801 : */
2802 : static TableInfo *
2803 182 : getRootTableInfo(const TableInfo *tbinfo)
2804 : {
2805 : TableInfo *parentTbinfo;
2806 :
2807 : Assert(tbinfo->ispartition);
2808 : Assert(tbinfo->numParents == 1);
2809 :
2810 182 : parentTbinfo = tbinfo->parents[0];
2811 182 : while (parentTbinfo->ispartition)
2812 : {
2813 : Assert(parentTbinfo->numParents == 1);
2814 0 : parentTbinfo = parentTbinfo->parents[0];
2815 : }
2816 :
2817 182 : return parentTbinfo;
2818 : }
2819 :
2820 : /*
2821 : * forcePartitionRootLoad
2822 : * Check if we must force load_via_partition_root for this partition.
2823 : *
2824 : * This is required if any level of ancestral partitioned table has an
2825 : * unsafe partitioning scheme.
2826 : */
2827 : static bool
2828 2204 : forcePartitionRootLoad(const TableInfo *tbinfo)
2829 : {
2830 : TableInfo *parentTbinfo;
2831 :
2832 : Assert(tbinfo->ispartition);
2833 : Assert(tbinfo->numParents == 1);
2834 :
2835 2204 : parentTbinfo = tbinfo->parents[0];
2836 2204 : if (parentTbinfo->unsafe_partitions)
2837 182 : return true;
2838 2454 : while (parentTbinfo->ispartition)
2839 : {
2840 : Assert(parentTbinfo->numParents == 1);
2841 432 : parentTbinfo = parentTbinfo->parents[0];
2842 432 : if (parentTbinfo->unsafe_partitions)
2843 0 : return true;
2844 : }
2845 :
2846 2022 : return false;
2847 : }
2848 :
2849 : /*
2850 : * dumpTableData -
2851 : * dump the contents of a single table
2852 : *
2853 : * Actually, this just makes an ArchiveEntry for the table contents.
2854 : */
2855 : static void
2856 8992 : dumpTableData(Archive *fout, const TableDataInfo *tdinfo)
2857 : {
2858 8992 : DumpOptions *dopt = fout->dopt;
2859 8992 : TableInfo *tbinfo = tdinfo->tdtable;
2860 8992 : PQExpBuffer copyBuf = createPQExpBuffer();
2861 8992 : PQExpBuffer clistBuf = createPQExpBuffer();
2862 : DataDumperPtr dumpFn;
2863 8992 : char *tdDefn = NULL;
2864 : char *copyStmt;
2865 : const char *copyFrom;
2866 :
2867 : /* We had better have loaded per-column details about this table */
2868 : Assert(tbinfo->interesting);
2869 :
2870 : /*
2871 : * When load-via-partition-root is set or forced, get the root table name
2872 : * for the partition table, so that we can reload data through the root
2873 : * table. Then construct a comment to be inserted into the TOC entry's
2874 : * defn field, so that such cases can be identified reliably.
2875 : */
2876 8992 : if (tbinfo->ispartition &&
2877 4312 : (dopt->load_via_partition_root ||
2878 2156 : forcePartitionRootLoad(tbinfo)))
2879 168 : {
2880 : TableInfo *parentTbinfo;
2881 : char *sanitized;
2882 :
2883 168 : parentTbinfo = getRootTableInfo(tbinfo);
2884 168 : copyFrom = fmtQualifiedDumpable(parentTbinfo);
2885 168 : sanitized = sanitize_line(copyFrom, true);
2886 168 : printfPQExpBuffer(copyBuf, "-- load via partition root %s",
2887 : sanitized);
2888 168 : free(sanitized);
2889 168 : tdDefn = pg_strdup(copyBuf->data);
2890 : }
2891 : else
2892 8824 : copyFrom = fmtQualifiedDumpable(tbinfo);
2893 :
2894 8992 : if (dopt->dump_inserts == 0)
2895 : {
2896 : /* Dump/restore using COPY */
2897 8822 : dumpFn = dumpTableData_copy;
2898 : /* must use 2 steps here 'cause fmtId is nonreentrant */
2899 8822 : printfPQExpBuffer(copyBuf, "COPY %s ",
2900 : copyFrom);
2901 8822 : appendPQExpBuffer(copyBuf, "%s FROM stdin;\n",
2902 : fmtCopyColumnList(tbinfo, clistBuf));
2903 8822 : copyStmt = copyBuf->data;
2904 : }
2905 : else
2906 : {
2907 : /* Restore using INSERT */
2908 170 : dumpFn = dumpTableData_insert;
2909 170 : copyStmt = NULL;
2910 : }
2911 :
2912 : /*
2913 : * Note: although the TableDataInfo is a full DumpableObject, we treat its
2914 : * dependency on its table as "special" and pass it to ArchiveEntry now.
2915 : * See comments for BuildArchiveDependencies.
2916 : */
2917 8992 : if (tdinfo->dobj.dump & DUMP_COMPONENT_DATA)
2918 : {
2919 : TocEntry *te;
2920 :
2921 8992 : te = ArchiveEntry(fout, tdinfo->dobj.catId, tdinfo->dobj.dumpId,
2922 8992 : ARCHIVE_OPTS(.tag = tbinfo->dobj.name,
2923 : .namespace = tbinfo->dobj.namespace->dobj.name,
2924 : .owner = tbinfo->rolname,
2925 : .description = "TABLE DATA",
2926 : .section = SECTION_DATA,
2927 : .createStmt = tdDefn,
2928 : .copyStmt = copyStmt,
2929 : .deps = &(tbinfo->dobj.dumpId),
2930 : .nDeps = 1,
2931 : .dumpFn = dumpFn,
2932 : .dumpArg = tdinfo));
2933 :
2934 : /*
2935 : * Set the TocEntry's dataLength in case we are doing a parallel dump
2936 : * and want to order dump jobs by table size. We choose to measure
2937 : * dataLength in table pages (including TOAST pages) during dump, so
2938 : * no scaling is needed.
2939 : *
2940 : * However, relpages is declared as "integer" in pg_class, and hence
2941 : * also in TableInfo, but it's really BlockNumber a/k/a unsigned int.
2942 : * Cast so that we get the right interpretation of table sizes
2943 : * exceeding INT_MAX pages.
2944 : */
2945 8992 : te->dataLength = (BlockNumber) tbinfo->relpages;
2946 8992 : te->dataLength += (BlockNumber) tbinfo->toastpages;
2947 :
2948 : /*
2949 : * If pgoff_t is only 32 bits wide, the above refinement is useless,
2950 : * and instead we'd better worry about integer overflow. Clamp to
2951 : * INT_MAX if the correct result exceeds that.
2952 : */
2953 : if (sizeof(te->dataLength) == 4 &&
2954 : (tbinfo->relpages < 0 || tbinfo->toastpages < 0 ||
2955 : te->dataLength < 0))
2956 : te->dataLength = INT_MAX;
2957 : }
2958 :
2959 8992 : destroyPQExpBuffer(copyBuf);
2960 8992 : destroyPQExpBuffer(clistBuf);
2961 8992 : }
2962 :
2963 : /*
2964 : * refreshMatViewData -
2965 : * load or refresh the contents of a single materialized view
2966 : *
2967 : * Actually, this just makes an ArchiveEntry for the REFRESH MATERIALIZED VIEW
2968 : * statement.
2969 : */
2970 : static void
2971 864 : refreshMatViewData(Archive *fout, const TableDataInfo *tdinfo)
2972 : {
2973 864 : TableInfo *tbinfo = tdinfo->tdtable;
2974 : PQExpBuffer q;
2975 :
2976 : /* If the materialized view is not flagged as populated, skip this. */
2977 864 : if (!tbinfo->relispopulated)
2978 160 : return;
2979 :
2980 704 : q = createPQExpBuffer();
2981 :
2982 704 : appendPQExpBuffer(q, "REFRESH MATERIALIZED VIEW %s;\n",
2983 704 : fmtQualifiedDumpable(tbinfo));
2984 :
2985 704 : if (tdinfo->dobj.dump & DUMP_COMPONENT_DATA)
2986 704 : ArchiveEntry(fout,
2987 : tdinfo->dobj.catId, /* catalog ID */
2988 704 : tdinfo->dobj.dumpId, /* dump ID */
2989 704 : ARCHIVE_OPTS(.tag = tbinfo->dobj.name,
2990 : .namespace = tbinfo->dobj.namespace->dobj.name,
2991 : .owner = tbinfo->rolname,
2992 : .description = "MATERIALIZED VIEW DATA",
2993 : .section = SECTION_POST_DATA,
2994 : .createStmt = q->data,
2995 : .deps = tdinfo->dobj.dependencies,
2996 : .nDeps = tdinfo->dobj.nDeps));
2997 :
2998 704 : destroyPQExpBuffer(q);
2999 : }
3000 :
3001 : /*
3002 : * getTableData -
3003 : * set up dumpable objects representing the contents of tables
3004 : */
3005 : static void
3006 354 : getTableData(DumpOptions *dopt, TableInfo *tblinfo, int numTables, char relkind)
3007 : {
3008 : int i;
3009 :
3010 94988 : for (i = 0; i < numTables; i++)
3011 : {
3012 94634 : if (tblinfo[i].dobj.dump & DUMP_COMPONENT_DATA &&
3013 1858 : (!relkind || tblinfo[i].relkind == relkind))
3014 12598 : makeTableDataInfo(dopt, &(tblinfo[i]));
3015 : }
3016 354 : }
3017 :
3018 : /*
3019 : * Make a dumpable object for the data of this specific table
3020 : *
3021 : * Note: we make a TableDataInfo if and only if we are going to dump the
3022 : * table data; the "dump" field in such objects isn't very interesting.
3023 : */
3024 : static void
3025 12820 : makeTableDataInfo(DumpOptions *dopt, TableInfo *tbinfo)
3026 : {
3027 : TableDataInfo *tdinfo;
3028 :
3029 : /*
3030 : * Nothing to do if we already decided to dump the table. This will
3031 : * happen for "config" tables.
3032 : */
3033 12820 : if (tbinfo->dataObj != NULL)
3034 2 : return;
3035 :
3036 : /* Skip VIEWs (no data to dump) */
3037 12818 : if (tbinfo->relkind == RELKIND_VIEW)
3038 980 : return;
3039 : /* Skip FOREIGN TABLEs (no data to dump) unless requested explicitly */
3040 11838 : if (tbinfo->relkind == RELKIND_FOREIGN_TABLE &&
3041 88 : (foreign_servers_include_oids.head == NULL ||
3042 8 : !simple_oid_list_member(&foreign_servers_include_oids,
3043 : tbinfo->foreign_server)))
3044 86 : return;
3045 : /* Skip partitioned tables (data in partitions) */
3046 11752 : if (tbinfo->relkind == RELKIND_PARTITIONED_TABLE)
3047 1022 : return;
3048 :
3049 : /* Don't dump data in unlogged tables, if so requested */
3050 10730 : if (tbinfo->relpersistence == RELPERSISTENCE_UNLOGGED &&
3051 82 : dopt->no_unlogged_table_data)
3052 36 : return;
3053 :
3054 : /* Check that the data is not explicitly excluded */
3055 10694 : if (simple_oid_list_member(&tabledata_exclude_oids,
3056 : tbinfo->dobj.catId.oid))
3057 16 : return;
3058 :
3059 : /* OK, let's dump it */
3060 10678 : tdinfo = (TableDataInfo *) pg_malloc(sizeof(TableDataInfo));
3061 :
3062 10678 : if (tbinfo->relkind == RELKIND_MATVIEW)
3063 864 : tdinfo->dobj.objType = DO_REFRESH_MATVIEW;
3064 9814 : else if (tbinfo->relkind == RELKIND_SEQUENCE)
3065 822 : tdinfo->dobj.objType = DO_SEQUENCE_SET;
3066 : else
3067 8992 : tdinfo->dobj.objType = DO_TABLE_DATA;
3068 :
3069 : /*
3070 : * Note: use tableoid 0 so that this object won't be mistaken for
3071 : * something that pg_depend entries apply to.
3072 : */
3073 10678 : tdinfo->dobj.catId.tableoid = 0;
3074 10678 : tdinfo->dobj.catId.oid = tbinfo->dobj.catId.oid;
3075 10678 : AssignDumpId(&tdinfo->dobj);
3076 10678 : tdinfo->dobj.name = tbinfo->dobj.name;
3077 10678 : tdinfo->dobj.namespace = tbinfo->dobj.namespace;
3078 10678 : tdinfo->tdtable = tbinfo;
3079 10678 : tdinfo->filtercond = NULL; /* might get set later */
3080 10678 : addObjectDependency(&tdinfo->dobj, tbinfo->dobj.dumpId);
3081 :
3082 : /* A TableDataInfo contains data, of course */
3083 10678 : tdinfo->dobj.components |= DUMP_COMPONENT_DATA;
3084 :
3085 10678 : tbinfo->dataObj = tdinfo;
3086 :
3087 : /*
3088 : * Materialized view statistics must be restored after the data, because
3089 : * REFRESH MATERIALIZED VIEW replaces the storage and resets the stats.
3090 : *
3091 : * The dependency is added here because the statistics objects are created
3092 : * first.
3093 : */
3094 10678 : if (tbinfo->relkind == RELKIND_MATVIEW && tbinfo->stats != NULL)
3095 : {
3096 700 : tbinfo->stats->section = SECTION_POST_DATA;
3097 700 : addObjectDependency(&tbinfo->stats->dobj, tdinfo->dobj.dumpId);
3098 : }
3099 :
3100 : /* Make sure that we'll collect per-column info for this table. */
3101 10678 : tbinfo->interesting = true;
3102 : }
3103 :
3104 : /*
3105 : * The refresh for a materialized view must be dependent on the refresh for
3106 : * any materialized view that this one is dependent on.
3107 : *
3108 : * This must be called after all the objects are created, but before they are
3109 : * sorted.
3110 : */
3111 : static void
3112 290 : buildMatViewRefreshDependencies(Archive *fout)
3113 : {
3114 : PQExpBuffer query;
3115 : PGresult *res;
3116 : int ntups,
3117 : i;
3118 : int i_classid,
3119 : i_objid,
3120 : i_refobjid;
3121 :
3122 : /* No Mat Views before 9.3. */
3123 290 : if (fout->remoteVersion < 90300)
3124 0 : return;
3125 :
3126 290 : query = createPQExpBuffer();
3127 :
3128 290 : appendPQExpBufferStr(query, "WITH RECURSIVE w AS "
3129 : "( "
3130 : "SELECT d1.objid, d2.refobjid, c2.relkind AS refrelkind "
3131 : "FROM pg_depend d1 "
3132 : "JOIN pg_class c1 ON c1.oid = d1.objid "
3133 : "AND c1.relkind = " CppAsString2(RELKIND_MATVIEW)
3134 : " JOIN pg_rewrite r1 ON r1.ev_class = d1.objid "
3135 : "JOIN pg_depend d2 ON d2.classid = 'pg_rewrite'::regclass "
3136 : "AND d2.objid = r1.oid "
3137 : "AND d2.refobjid <> d1.objid "
3138 : "JOIN pg_class c2 ON c2.oid = d2.refobjid "
3139 : "AND c2.relkind IN (" CppAsString2(RELKIND_MATVIEW) ","
3140 : CppAsString2(RELKIND_VIEW) ") "
3141 : "WHERE d1.classid = 'pg_class'::regclass "
3142 : "UNION "
3143 : "SELECT w.objid, d3.refobjid, c3.relkind "
3144 : "FROM w "
3145 : "JOIN pg_rewrite r3 ON r3.ev_class = w.refobjid "
3146 : "JOIN pg_depend d3 ON d3.classid = 'pg_rewrite'::regclass "
3147 : "AND d3.objid = r3.oid "
3148 : "AND d3.refobjid <> w.refobjid "
3149 : "JOIN pg_class c3 ON c3.oid = d3.refobjid "
3150 : "AND c3.relkind IN (" CppAsString2(RELKIND_MATVIEW) ","
3151 : CppAsString2(RELKIND_VIEW) ") "
3152 : ") "
3153 : "SELECT 'pg_class'::regclass::oid AS classid, objid, refobjid "
3154 : "FROM w "
3155 : "WHERE refrelkind = " CppAsString2(RELKIND_MATVIEW));
3156 :
3157 290 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3158 :
3159 290 : ntups = PQntuples(res);
3160 :
3161 290 : i_classid = PQfnumber(res, "classid");
3162 290 : i_objid = PQfnumber(res, "objid");
3163 290 : i_refobjid = PQfnumber(res, "refobjid");
3164 :
3165 890 : for (i = 0; i < ntups; i++)
3166 : {
3167 : CatalogId objId;
3168 : CatalogId refobjId;
3169 : DumpableObject *dobj;
3170 : DumpableObject *refdobj;
3171 : TableInfo *tbinfo;
3172 : TableInfo *reftbinfo;
3173 :
3174 600 : objId.tableoid = atooid(PQgetvalue(res, i, i_classid));
3175 600 : objId.oid = atooid(PQgetvalue(res, i, i_objid));
3176 600 : refobjId.tableoid = objId.tableoid;
3177 600 : refobjId.oid = atooid(PQgetvalue(res, i, i_refobjid));
3178 :
3179 600 : dobj = findObjectByCatalogId(objId);
3180 600 : if (dobj == NULL)
3181 96 : continue;
3182 :
3183 : Assert(dobj->objType == DO_TABLE);
3184 600 : tbinfo = (TableInfo *) dobj;
3185 : Assert(tbinfo->relkind == RELKIND_MATVIEW);
3186 600 : dobj = (DumpableObject *) tbinfo->dataObj;
3187 600 : if (dobj == NULL)
3188 96 : continue;
3189 : Assert(dobj->objType == DO_REFRESH_MATVIEW);
3190 :
3191 504 : refdobj = findObjectByCatalogId(refobjId);
3192 504 : if (refdobj == NULL)
3193 0 : continue;
3194 :
3195 : Assert(refdobj->objType == DO_TABLE);
3196 504 : reftbinfo = (TableInfo *) refdobj;
3197 : Assert(reftbinfo->relkind == RELKIND_MATVIEW);
3198 504 : refdobj = (DumpableObject *) reftbinfo->dataObj;
3199 504 : if (refdobj == NULL)
3200 0 : continue;
3201 : Assert(refdobj->objType == DO_REFRESH_MATVIEW);
3202 :
3203 504 : addObjectDependency(dobj, refdobj->dumpId);
3204 :
3205 504 : if (!reftbinfo->relispopulated)
3206 80 : tbinfo->relispopulated = false;
3207 : }
3208 :
3209 290 : PQclear(res);
3210 :
3211 290 : destroyPQExpBuffer(query);
3212 : }
3213 :
3214 : /*
3215 : * getTableDataFKConstraints -
3216 : * add dump-order dependencies reflecting foreign key constraints
3217 : *
3218 : * This code is executed only in a data-only dump --- in schema+data dumps
3219 : * we handle foreign key issues by not creating the FK constraints until
3220 : * after the data is loaded. In a data-only dump, however, we want to
3221 : * order the table data objects in such a way that a table's referenced
3222 : * tables are restored first. (In the presence of circular references or
3223 : * self-references this may be impossible; we'll detect and complain about
3224 : * that during the dependency sorting step.)
3225 : */
3226 : static void
3227 14 : getTableDataFKConstraints(void)
3228 : {
3229 : DumpableObject **dobjs;
3230 : int numObjs;
3231 : int i;
3232 :
3233 : /* Search through all the dumpable objects for FK constraints */
3234 14 : getDumpableObjects(&dobjs, &numObjs);
3235 51690 : for (i = 0; i < numObjs; i++)
3236 : {
3237 51676 : if (dobjs[i]->objType == DO_FK_CONSTRAINT)
3238 : {
3239 16 : ConstraintInfo *cinfo = (ConstraintInfo *) dobjs[i];
3240 : TableInfo *ftable;
3241 :
3242 : /* Not interesting unless both tables are to be dumped */
3243 16 : if (cinfo->contable == NULL ||
3244 16 : cinfo->contable->dataObj == NULL)
3245 8 : continue;
3246 8 : ftable = findTableByOid(cinfo->confrelid);
3247 8 : if (ftable == NULL ||
3248 8 : ftable->dataObj == NULL)
3249 0 : continue;
3250 :
3251 : /*
3252 : * Okay, make referencing table's TABLE_DATA object depend on the
3253 : * referenced table's TABLE_DATA object.
3254 : */
3255 8 : addObjectDependency(&cinfo->contable->dataObj->dobj,
3256 8 : ftable->dataObj->dobj.dumpId);
3257 : }
3258 : }
3259 14 : free(dobjs);
3260 14 : }
3261 :
3262 :
3263 : /*
3264 : * dumpDatabase:
3265 : * dump the database definition
3266 : */
3267 : static void
3268 168 : dumpDatabase(Archive *fout)
3269 : {
3270 168 : DumpOptions *dopt = fout->dopt;
3271 168 : PQExpBuffer dbQry = createPQExpBuffer();
3272 168 : PQExpBuffer delQry = createPQExpBuffer();
3273 168 : PQExpBuffer creaQry = createPQExpBuffer();
3274 168 : PQExpBuffer labelq = createPQExpBuffer();
3275 168 : PGconn *conn = GetConnection(fout);
3276 : PGresult *res;
3277 : int i_tableoid,
3278 : i_oid,
3279 : i_datname,
3280 : i_datdba,
3281 : i_encoding,
3282 : i_datlocprovider,
3283 : i_collate,
3284 : i_ctype,
3285 : i_datlocale,
3286 : i_daticurules,
3287 : i_frozenxid,
3288 : i_minmxid,
3289 : i_datacl,
3290 : i_acldefault,
3291 : i_datistemplate,
3292 : i_datconnlimit,
3293 : i_datcollversion,
3294 : i_tablespace;
3295 : CatalogId dbCatId;
3296 : DumpId dbDumpId;
3297 : DumpableAcl dbdacl;
3298 : const char *datname,
3299 : *dba,
3300 : *encoding,
3301 : *datlocprovider,
3302 : *collate,
3303 : *ctype,
3304 : *locale,
3305 : *icurules,
3306 : *datistemplate,
3307 : *datconnlimit,
3308 : *tablespace;
3309 : uint32 frozenxid,
3310 : minmxid;
3311 : char *qdatname;
3312 :
3313 168 : pg_log_info("saving database definition");
3314 :
3315 : /*
3316 : * Fetch the database-level properties for this database.
3317 : */
3318 168 : appendPQExpBufferStr(dbQry, "SELECT tableoid, oid, datname, "
3319 : "datdba, "
3320 : "pg_encoding_to_char(encoding) AS encoding, "
3321 : "datcollate, datctype, datfrozenxid, "
3322 : "datacl, acldefault('d', datdba) AS acldefault, "
3323 : "datistemplate, datconnlimit, ");
3324 168 : if (fout->remoteVersion >= 90300)
3325 168 : appendPQExpBufferStr(dbQry, "datminmxid, ");
3326 : else
3327 0 : appendPQExpBufferStr(dbQry, "0 AS datminmxid, ");
3328 168 : if (fout->remoteVersion >= 170000)
3329 168 : appendPQExpBufferStr(dbQry, "datlocprovider, datlocale, datcollversion, ");
3330 0 : else if (fout->remoteVersion >= 150000)
3331 0 : appendPQExpBufferStr(dbQry, "datlocprovider, daticulocale AS datlocale, datcollversion, ");
3332 : else
3333 0 : appendPQExpBufferStr(dbQry, "'c' AS datlocprovider, NULL AS datlocale, NULL AS datcollversion, ");
3334 168 : if (fout->remoteVersion >= 160000)
3335 168 : appendPQExpBufferStr(dbQry, "daticurules, ");
3336 : else
3337 0 : appendPQExpBufferStr(dbQry, "NULL AS daticurules, ");
3338 168 : appendPQExpBufferStr(dbQry,
3339 : "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
3340 : "shobj_description(oid, 'pg_database') AS description "
3341 : "FROM pg_database "
3342 : "WHERE datname = current_database()");
3343 :
3344 168 : res = ExecuteSqlQueryForSingleRow(fout, dbQry->data);
3345 :
3346 168 : i_tableoid = PQfnumber(res, "tableoid");
3347 168 : i_oid = PQfnumber(res, "oid");
3348 168 : i_datname = PQfnumber(res, "datname");
3349 168 : i_datdba = PQfnumber(res, "datdba");
3350 168 : i_encoding = PQfnumber(res, "encoding");
3351 168 : i_datlocprovider = PQfnumber(res, "datlocprovider");
3352 168 : i_collate = PQfnumber(res, "datcollate");
3353 168 : i_ctype = PQfnumber(res, "datctype");
3354 168 : i_datlocale = PQfnumber(res, "datlocale");
3355 168 : i_daticurules = PQfnumber(res, "daticurules");
3356 168 : i_frozenxid = PQfnumber(res, "datfrozenxid");
3357 168 : i_minmxid = PQfnumber(res, "datminmxid");
3358 168 : i_datacl = PQfnumber(res, "datacl");
3359 168 : i_acldefault = PQfnumber(res, "acldefault");
3360 168 : i_datistemplate = PQfnumber(res, "datistemplate");
3361 168 : i_datconnlimit = PQfnumber(res, "datconnlimit");
3362 168 : i_datcollversion = PQfnumber(res, "datcollversion");
3363 168 : i_tablespace = PQfnumber(res, "tablespace");
3364 :
3365 168 : dbCatId.tableoid = atooid(PQgetvalue(res, 0, i_tableoid));
3366 168 : dbCatId.oid = atooid(PQgetvalue(res, 0, i_oid));
3367 168 : datname = PQgetvalue(res, 0, i_datname);
3368 168 : dba = getRoleName(PQgetvalue(res, 0, i_datdba));
3369 168 : encoding = PQgetvalue(res, 0, i_encoding);
3370 168 : datlocprovider = PQgetvalue(res, 0, i_datlocprovider);
3371 168 : collate = PQgetvalue(res, 0, i_collate);
3372 168 : ctype = PQgetvalue(res, 0, i_ctype);
3373 168 : if (!PQgetisnull(res, 0, i_datlocale))
3374 28 : locale = PQgetvalue(res, 0, i_datlocale);
3375 : else
3376 140 : locale = NULL;
3377 168 : if (!PQgetisnull(res, 0, i_daticurules))
3378 0 : icurules = PQgetvalue(res, 0, i_daticurules);
3379 : else
3380 168 : icurules = NULL;
3381 168 : frozenxid = atooid(PQgetvalue(res, 0, i_frozenxid));
3382 168 : minmxid = atooid(PQgetvalue(res, 0, i_minmxid));
3383 168 : dbdacl.acl = PQgetvalue(res, 0, i_datacl);
3384 168 : dbdacl.acldefault = PQgetvalue(res, 0, i_acldefault);
3385 168 : datistemplate = PQgetvalue(res, 0, i_datistemplate);
3386 168 : datconnlimit = PQgetvalue(res, 0, i_datconnlimit);
3387 168 : tablespace = PQgetvalue(res, 0, i_tablespace);
3388 :
3389 168 : qdatname = pg_strdup(fmtId(datname));
3390 :
3391 : /*
3392 : * Prepare the CREATE DATABASE command. We must specify OID (if we want
3393 : * to preserve that), as well as the encoding, locale, and tablespace
3394 : * since those can't be altered later. Other DB properties are left to
3395 : * the DATABASE PROPERTIES entry, so that they can be applied after
3396 : * reconnecting to the target DB.
3397 : *
3398 : * For binary upgrade, we use the FILE_COPY strategy because testing has
3399 : * shown it to be faster. When the server is in binary upgrade mode, it
3400 : * will also skip the checkpoints this strategy ordinarily performs.
3401 : */
3402 168 : if (dopt->binary_upgrade)
3403 : {
3404 70 : appendPQExpBuffer(creaQry,
3405 : "CREATE DATABASE %s WITH TEMPLATE = template0 "
3406 : "OID = %u STRATEGY = FILE_COPY",
3407 : qdatname, dbCatId.oid);
3408 : }
3409 : else
3410 : {
3411 98 : appendPQExpBuffer(creaQry, "CREATE DATABASE %s WITH TEMPLATE = template0",
3412 : qdatname);
3413 : }
3414 168 : if (strlen(encoding) > 0)
3415 : {
3416 168 : appendPQExpBufferStr(creaQry, " ENCODING = ");
3417 168 : appendStringLiteralAH(creaQry, encoding, fout);
3418 : }
3419 :
3420 168 : appendPQExpBufferStr(creaQry, " LOCALE_PROVIDER = ");
3421 168 : if (datlocprovider[0] == 'b')
3422 28 : appendPQExpBufferStr(creaQry, "builtin");
3423 140 : else if (datlocprovider[0] == 'c')
3424 140 : appendPQExpBufferStr(creaQry, "libc");
3425 0 : else if (datlocprovider[0] == 'i')
3426 0 : appendPQExpBufferStr(creaQry, "icu");
3427 : else
3428 0 : pg_fatal("unrecognized locale provider: %s",
3429 : datlocprovider);
3430 :
3431 168 : if (strlen(collate) > 0 && strcmp(collate, ctype) == 0)
3432 : {
3433 168 : appendPQExpBufferStr(creaQry, " LOCALE = ");
3434 168 : appendStringLiteralAH(creaQry, collate, fout);
3435 : }
3436 : else
3437 : {
3438 0 : if (strlen(collate) > 0)
3439 : {
3440 0 : appendPQExpBufferStr(creaQry, " LC_COLLATE = ");
3441 0 : appendStringLiteralAH(creaQry, collate, fout);
3442 : }
3443 0 : if (strlen(ctype) > 0)
3444 : {
3445 0 : appendPQExpBufferStr(creaQry, " LC_CTYPE = ");
3446 0 : appendStringLiteralAH(creaQry, ctype, fout);
3447 : }
3448 : }
3449 168 : if (locale)
3450 : {
3451 28 : if (datlocprovider[0] == 'b')
3452 28 : appendPQExpBufferStr(creaQry, " BUILTIN_LOCALE = ");
3453 : else
3454 0 : appendPQExpBufferStr(creaQry, " ICU_LOCALE = ");
3455 :
3456 28 : appendStringLiteralAH(creaQry, locale, fout);
3457 : }
3458 :
3459 168 : if (icurules)
3460 : {
3461 0 : appendPQExpBufferStr(creaQry, " ICU_RULES = ");
3462 0 : appendStringLiteralAH(creaQry, icurules, fout);
3463 : }
3464 :
3465 : /*
3466 : * For binary upgrade, carry over the collation version. For normal
3467 : * dump/restore, omit the version, so that it is computed upon restore.
3468 : */
3469 168 : if (dopt->binary_upgrade)
3470 : {
3471 70 : if (!PQgetisnull(res, 0, i_datcollversion))
3472 : {
3473 70 : appendPQExpBufferStr(creaQry, " COLLATION_VERSION = ");
3474 70 : appendStringLiteralAH(creaQry,
3475 : PQgetvalue(res, 0, i_datcollversion),
3476 : fout);
3477 : }
3478 : }
3479 :
3480 : /*
3481 : * Note: looking at dopt->outputNoTablespaces here is completely the wrong
3482 : * thing; the decision whether to specify a tablespace should be left till
3483 : * pg_restore, so that pg_restore --no-tablespaces applies. Ideally we'd
3484 : * label the DATABASE entry with the tablespace and let the normal
3485 : * tablespace selection logic work ... but CREATE DATABASE doesn't pay
3486 : * attention to default_tablespace, so that won't work.
3487 : */
3488 168 : if (strlen(tablespace) > 0 && strcmp(tablespace, "pg_default") != 0 &&
3489 10 : !dopt->outputNoTablespaces)
3490 10 : appendPQExpBuffer(creaQry, " TABLESPACE = %s",
3491 : fmtId(tablespace));
3492 168 : appendPQExpBufferStr(creaQry, ";\n");
3493 :
3494 168 : appendPQExpBuffer(delQry, "DROP DATABASE %s;\n",
3495 : qdatname);
3496 :
3497 168 : dbDumpId = createDumpId();
3498 :
3499 168 : ArchiveEntry(fout,
3500 : dbCatId, /* catalog ID */
3501 : dbDumpId, /* dump ID */
3502 168 : ARCHIVE_OPTS(.tag = datname,
3503 : .owner = dba,
3504 : .description = "DATABASE",
3505 : .section = SECTION_PRE_DATA,
3506 : .createStmt = creaQry->data,
3507 : .dropStmt = delQry->data));
3508 :
3509 : /* Compute correct tag for archive entry */
3510 168 : appendPQExpBuffer(labelq, "DATABASE %s", qdatname);
3511 :
3512 : /* Dump DB comment if any */
3513 : {
3514 : /*
3515 : * 8.2 and up keep comments on shared objects in a shared table, so we
3516 : * cannot use the dumpComment() code used for other database objects.
3517 : * Be careful that the ArchiveEntry parameters match that function.
3518 : */
3519 168 : char *comment = PQgetvalue(res, 0, PQfnumber(res, "description"));
3520 :
3521 168 : if (comment && *comment && !dopt->no_comments)
3522 : {
3523 78 : resetPQExpBuffer(dbQry);
3524 :
3525 : /*
3526 : * Generates warning when loaded into a differently-named
3527 : * database.
3528 : */
3529 78 : appendPQExpBuffer(dbQry, "COMMENT ON DATABASE %s IS ", qdatname);
3530 78 : appendStringLiteralAH(dbQry, comment, fout);
3531 78 : appendPQExpBufferStr(dbQry, ";\n");
3532 :
3533 78 : ArchiveEntry(fout, nilCatalogId, createDumpId(),
3534 78 : ARCHIVE_OPTS(.tag = labelq->data,
3535 : .owner = dba,
3536 : .description = "COMMENT",
3537 : .section = SECTION_NONE,
3538 : .createStmt = dbQry->data,
3539 : .deps = &dbDumpId,
3540 : .nDeps = 1));
3541 : }
3542 : }
3543 :
3544 : /* Dump DB security label, if enabled */
3545 168 : if (!dopt->no_security_labels)
3546 : {
3547 : PGresult *shres;
3548 : PQExpBuffer seclabelQry;
3549 :
3550 168 : seclabelQry = createPQExpBuffer();
3551 :
3552 168 : buildShSecLabelQuery("pg_database", dbCatId.oid, seclabelQry);
3553 168 : shres = ExecuteSqlQuery(fout, seclabelQry->data, PGRES_TUPLES_OK);
3554 168 : resetPQExpBuffer(seclabelQry);
3555 168 : emitShSecLabels(conn, shres, seclabelQry, "DATABASE", datname);
3556 168 : if (seclabelQry->len > 0)
3557 0 : ArchiveEntry(fout, nilCatalogId, createDumpId(),
3558 0 : ARCHIVE_OPTS(.tag = labelq->data,
3559 : .owner = dba,
3560 : .description = "SECURITY LABEL",
3561 : .section = SECTION_NONE,
3562 : .createStmt = seclabelQry->data,
3563 : .deps = &dbDumpId,
3564 : .nDeps = 1));
3565 168 : destroyPQExpBuffer(seclabelQry);
3566 168 : PQclear(shres);
3567 : }
3568 :
3569 : /*
3570 : * Dump ACL if any. Note that we do not support initial privileges
3571 : * (pg_init_privs) on databases.
3572 : */
3573 168 : dbdacl.privtype = 0;
3574 168 : dbdacl.initprivs = NULL;
3575 :
3576 168 : dumpACL(fout, dbDumpId, InvalidDumpId, "DATABASE",
3577 : qdatname, NULL, NULL,
3578 : NULL, dba, &dbdacl);
3579 :
3580 : /*
3581 : * Now construct a DATABASE PROPERTIES archive entry to restore any
3582 : * non-default database-level properties. (The reason this must be
3583 : * separate is that we cannot put any additional commands into the TOC
3584 : * entry that has CREATE DATABASE. pg_restore would execute such a group
3585 : * in an implicit transaction block, and the backend won't allow CREATE
3586 : * DATABASE in that context.)
3587 : */
3588 168 : resetPQExpBuffer(creaQry);
3589 168 : resetPQExpBuffer(delQry);
3590 :
3591 168 : if (strlen(datconnlimit) > 0 && strcmp(datconnlimit, "-1") != 0)
3592 0 : appendPQExpBuffer(creaQry, "ALTER DATABASE %s CONNECTION LIMIT = %s;\n",
3593 : qdatname, datconnlimit);
3594 :
3595 168 : if (strcmp(datistemplate, "t") == 0)
3596 : {
3597 20 : appendPQExpBuffer(creaQry, "ALTER DATABASE %s IS_TEMPLATE = true;\n",
3598 : qdatname);
3599 :
3600 : /*
3601 : * The backend won't accept DROP DATABASE on a template database. We
3602 : * can deal with that by removing the template marking before the DROP
3603 : * gets issued. We'd prefer to use ALTER DATABASE IF EXISTS here, but
3604 : * since no such command is currently supported, fake it with a direct
3605 : * UPDATE on pg_database.
3606 : */
3607 20 : appendPQExpBufferStr(delQry, "UPDATE pg_catalog.pg_database "
3608 : "SET datistemplate = false WHERE datname = ");
3609 20 : appendStringLiteralAH(delQry, datname, fout);
3610 20 : appendPQExpBufferStr(delQry, ";\n");
3611 : }
3612 :
3613 : /*
3614 : * We do not restore pg_database.dathasloginevt because it is set
3615 : * automatically on login event trigger creation.
3616 : */
3617 :
3618 : /* Add database-specific SET options */
3619 168 : dumpDatabaseConfig(fout, creaQry, datname, dbCatId.oid);
3620 :
3621 : /*
3622 : * We stick this binary-upgrade query into the DATABASE PROPERTIES archive
3623 : * entry, too, for lack of a better place.
3624 : */
3625 168 : if (dopt->binary_upgrade)
3626 : {
3627 70 : appendPQExpBufferStr(creaQry, "\n-- For binary upgrade, set datfrozenxid and datminmxid.\n");
3628 70 : appendPQExpBuffer(creaQry, "UPDATE pg_catalog.pg_database\n"
3629 : "SET datfrozenxid = '%u', datminmxid = '%u'\n"
3630 : "WHERE datname = ",
3631 : frozenxid, minmxid);
3632 70 : appendStringLiteralAH(creaQry, datname, fout);
3633 70 : appendPQExpBufferStr(creaQry, ";\n");
3634 : }
3635 :
3636 168 : if (creaQry->len > 0)
3637 78 : ArchiveEntry(fout, nilCatalogId, createDumpId(),
3638 78 : ARCHIVE_OPTS(.tag = datname,
3639 : .owner = dba,
3640 : .description = "DATABASE PROPERTIES",
3641 : .section = SECTION_PRE_DATA,
3642 : .createStmt = creaQry->data,
3643 : .dropStmt = delQry->data,
3644 : .deps = &dbDumpId));
3645 :
3646 : /*
3647 : * pg_largeobject comes from the old system intact, so set its
3648 : * relfrozenxids, relminmxids and relfilenode.
3649 : *
3650 : * pg_largeobject_metadata also comes from the old system intact for
3651 : * upgrades from v16 and newer, so set its relfrozenxids, relminmxids, and
3652 : * relfilenode, too. pg_upgrade can't copy/link the files from older
3653 : * versions because aclitem (needed by pg_largeobject_metadata.lomacl)
3654 : * changed its storage format in v16.
3655 : */
3656 168 : if (dopt->binary_upgrade)
3657 : {
3658 : PGresult *lo_res;
3659 70 : PQExpBuffer loFrozenQry = createPQExpBuffer();
3660 70 : PQExpBuffer loOutQry = createPQExpBuffer();
3661 70 : PQExpBuffer lomOutQry = createPQExpBuffer();
3662 70 : PQExpBuffer loHorizonQry = createPQExpBuffer();
3663 70 : PQExpBuffer lomHorizonQry = createPQExpBuffer();
3664 : int ii_relfrozenxid,
3665 : ii_relfilenode,
3666 : ii_oid,
3667 : ii_relminmxid;
3668 :
3669 70 : if (fout->remoteVersion >= 90300)
3670 70 : appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, relminmxid, relfilenode, oid\n"
3671 : "FROM pg_catalog.pg_class\n"
3672 : "WHERE oid IN (%u, %u, %u, %u);\n",
3673 : LargeObjectRelationId, LargeObjectLOidPNIndexId,
3674 : LargeObjectMetadataRelationId, LargeObjectMetadataOidIndexId);
3675 : else
3676 0 : appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, 0 AS relminmxid, relfilenode, oid\n"
3677 : "FROM pg_catalog.pg_class\n"
3678 : "WHERE oid IN (%u, %u);\n",
3679 : LargeObjectRelationId, LargeObjectLOidPNIndexId);
3680 :
3681 70 : lo_res = ExecuteSqlQuery(fout, loFrozenQry->data, PGRES_TUPLES_OK);
3682 :
3683 70 : ii_relfrozenxid = PQfnumber(lo_res, "relfrozenxid");
3684 70 : ii_relminmxid = PQfnumber(lo_res, "relminmxid");
3685 70 : ii_relfilenode = PQfnumber(lo_res, "relfilenode");
3686 70 : ii_oid = PQfnumber(lo_res, "oid");
3687 :
3688 70 : appendPQExpBufferStr(loHorizonQry, "\n-- For binary upgrade, set pg_largeobject relfrozenxid and relminmxid\n");
3689 70 : appendPQExpBufferStr(lomHorizonQry, "\n-- For binary upgrade, set pg_largeobject_metadata relfrozenxid and relminmxid\n");
3690 70 : appendPQExpBufferStr(loOutQry, "\n-- For binary upgrade, preserve pg_largeobject and index relfilenodes\n");
3691 70 : appendPQExpBufferStr(lomOutQry, "\n-- For binary upgrade, preserve pg_largeobject_metadata and index relfilenodes\n");
3692 350 : for (int i = 0; i < PQntuples(lo_res); ++i)
3693 : {
3694 : Oid oid;
3695 : RelFileNumber relfilenumber;
3696 : PQExpBuffer horizonQry;
3697 : PQExpBuffer outQry;
3698 :
3699 280 : oid = atooid(PQgetvalue(lo_res, i, ii_oid));
3700 280 : relfilenumber = atooid(PQgetvalue(lo_res, i, ii_relfilenode));
3701 :
3702 280 : if (oid == LargeObjectRelationId ||
3703 : oid == LargeObjectLOidPNIndexId)
3704 : {
3705 140 : horizonQry = loHorizonQry;
3706 140 : outQry = loOutQry;
3707 : }
3708 : else
3709 : {
3710 140 : horizonQry = lomHorizonQry;
3711 140 : outQry = lomOutQry;
3712 : }
3713 :
3714 280 : appendPQExpBuffer(horizonQry, "UPDATE pg_catalog.pg_class\n"
3715 : "SET relfrozenxid = '%u', relminmxid = '%u'\n"
3716 : "WHERE oid = %u;\n",
3717 280 : atooid(PQgetvalue(lo_res, i, ii_relfrozenxid)),
3718 280 : atooid(PQgetvalue(lo_res, i, ii_relminmxid)),
3719 280 : atooid(PQgetvalue(lo_res, i, ii_oid)));
3720 :
3721 280 : if (oid == LargeObjectRelationId ||
3722 : oid == LargeObjectMetadataRelationId)
3723 140 : appendPQExpBuffer(outQry,
3724 : "SELECT pg_catalog.binary_upgrade_set_next_heap_relfilenode('%u'::pg_catalog.oid);\n",
3725 : relfilenumber);
3726 140 : else if (oid == LargeObjectLOidPNIndexId ||
3727 : oid == LargeObjectMetadataOidIndexId)
3728 140 : appendPQExpBuffer(outQry,
3729 : "SELECT pg_catalog.binary_upgrade_set_next_index_relfilenode('%u'::pg_catalog.oid);\n",
3730 : relfilenumber);
3731 : }
3732 :
3733 70 : appendPQExpBufferStr(loOutQry,
3734 : "TRUNCATE pg_catalog.pg_largeobject;\n");
3735 70 : appendPQExpBufferStr(lomOutQry,
3736 : "TRUNCATE pg_catalog.pg_largeobject_metadata;\n");
3737 :
3738 70 : appendPQExpBufferStr(loOutQry, loHorizonQry->data);
3739 70 : appendPQExpBufferStr(lomOutQry, lomHorizonQry->data);
3740 :
3741 70 : ArchiveEntry(fout, nilCatalogId, createDumpId(),
3742 70 : ARCHIVE_OPTS(.tag = "pg_largeobject",
3743 : .description = "pg_largeobject",
3744 : .section = SECTION_PRE_DATA,
3745 : .createStmt = loOutQry->data));
3746 :
3747 70 : if (fout->remoteVersion >= 160000)
3748 70 : ArchiveEntry(fout, nilCatalogId, createDumpId(),
3749 70 : ARCHIVE_OPTS(.tag = "pg_largeobject_metadata",
3750 : .description = "pg_largeobject_metadata",
3751 : .section = SECTION_PRE_DATA,
3752 : .createStmt = lomOutQry->data));
3753 :
3754 70 : PQclear(lo_res);
3755 :
3756 70 : destroyPQExpBuffer(loFrozenQry);
3757 70 : destroyPQExpBuffer(loHorizonQry);
3758 70 : destroyPQExpBuffer(lomHorizonQry);
3759 70 : destroyPQExpBuffer(loOutQry);
3760 70 : destroyPQExpBuffer(lomOutQry);
3761 : }
3762 :
3763 168 : PQclear(res);
3764 :
3765 168 : free(qdatname);
3766 168 : destroyPQExpBuffer(dbQry);
3767 168 : destroyPQExpBuffer(delQry);
3768 168 : destroyPQExpBuffer(creaQry);
3769 168 : destroyPQExpBuffer(labelq);
3770 168 : }
3771 :
3772 : /*
3773 : * Collect any database-specific or role-and-database-specific SET options
3774 : * for this database, and append them to outbuf.
3775 : */
3776 : static void
3777 168 : dumpDatabaseConfig(Archive *AH, PQExpBuffer outbuf,
3778 : const char *dbname, Oid dboid)
3779 : {
3780 168 : PGconn *conn = GetConnection(AH);
3781 168 : PQExpBuffer buf = createPQExpBuffer();
3782 : PGresult *res;
3783 :
3784 : /* First collect database-specific options */
3785 168 : printfPQExpBuffer(buf, "SELECT unnest(setconfig) FROM pg_db_role_setting "
3786 : "WHERE setrole = 0 AND setdatabase = '%u'::oid",
3787 : dboid);
3788 :
3789 168 : res = ExecuteSqlQuery(AH, buf->data, PGRES_TUPLES_OK);
3790 :
3791 228 : for (int i = 0; i < PQntuples(res); i++)
3792 60 : makeAlterConfigCommand(conn, PQgetvalue(res, i, 0),
3793 : "DATABASE", dbname, NULL, NULL,
3794 : outbuf);
3795 :
3796 168 : PQclear(res);
3797 :
3798 : /* Now look for role-and-database-specific options */
3799 168 : printfPQExpBuffer(buf, "SELECT rolname, unnest(setconfig) "
3800 : "FROM pg_db_role_setting s, pg_roles r "
3801 : "WHERE setrole = r.oid AND setdatabase = '%u'::oid",
3802 : dboid);
3803 :
3804 168 : res = ExecuteSqlQuery(AH, buf->data, PGRES_TUPLES_OK);
3805 :
3806 168 : for (int i = 0; i < PQntuples(res); i++)
3807 0 : makeAlterConfigCommand(conn, PQgetvalue(res, i, 1),
3808 0 : "ROLE", PQgetvalue(res, i, 0),
3809 : "DATABASE", dbname,
3810 : outbuf);
3811 :
3812 168 : PQclear(res);
3813 :
3814 168 : destroyPQExpBuffer(buf);
3815 168 : }
3816 :
3817 : /*
3818 : * dumpEncoding: put the correct encoding into the archive
3819 : */
3820 : static void
3821 370 : dumpEncoding(Archive *AH)
3822 : {
3823 370 : const char *encname = pg_encoding_to_char(AH->encoding);
3824 370 : PQExpBuffer qry = createPQExpBuffer();
3825 :
3826 370 : pg_log_info("saving encoding = %s", encname);
3827 :
3828 370 : appendPQExpBufferStr(qry, "SET client_encoding = ");
3829 370 : appendStringLiteralAH(qry, encname, AH);
3830 370 : appendPQExpBufferStr(qry, ";\n");
3831 :
3832 370 : ArchiveEntry(AH, nilCatalogId, createDumpId(),
3833 370 : ARCHIVE_OPTS(.tag = "ENCODING",
3834 : .description = "ENCODING",
3835 : .section = SECTION_PRE_DATA,
3836 : .createStmt = qry->data));
3837 :
3838 370 : destroyPQExpBuffer(qry);
3839 370 : }
3840 :
3841 :
3842 : /*
3843 : * dumpStdStrings: put the correct escape string behavior into the archive
3844 : */
3845 : static void
3846 370 : dumpStdStrings(Archive *AH)
3847 : {
3848 370 : const char *stdstrings = AH->std_strings ? "on" : "off";
3849 370 : PQExpBuffer qry = createPQExpBuffer();
3850 :
3851 370 : pg_log_info("saving \"standard_conforming_strings = %s\"",
3852 : stdstrings);
3853 :
3854 370 : appendPQExpBuffer(qry, "SET standard_conforming_strings = '%s';\n",
3855 : stdstrings);
3856 :
3857 370 : ArchiveEntry(AH, nilCatalogId, createDumpId(),
3858 370 : ARCHIVE_OPTS(.tag = "STDSTRINGS",
3859 : .description = "STDSTRINGS",
3860 : .section = SECTION_PRE_DATA,
3861 : .createStmt = qry->data));
3862 :
3863 370 : destroyPQExpBuffer(qry);
3864 370 : }
3865 :
3866 : /*
3867 : * dumpSearchPath: record the active search_path in the archive
3868 : */
3869 : static void
3870 370 : dumpSearchPath(Archive *AH)
3871 : {
3872 370 : PQExpBuffer qry = createPQExpBuffer();
3873 370 : PQExpBuffer path = createPQExpBuffer();
3874 : PGresult *res;
3875 370 : char **schemanames = NULL;
3876 370 : int nschemanames = 0;
3877 : int i;
3878 :
3879 : /*
3880 : * We use the result of current_schemas(), not the search_path GUC,
3881 : * because that might contain wildcards such as "$user", which won't
3882 : * necessarily have the same value during restore. Also, this way avoids
3883 : * listing schemas that may appear in search_path but not actually exist,
3884 : * which seems like a prudent exclusion.
3885 : */
3886 370 : res = ExecuteSqlQueryForSingleRow(AH,
3887 : "SELECT pg_catalog.current_schemas(false)");
3888 :
3889 370 : if (!parsePGArray(PQgetvalue(res, 0, 0), &schemanames, &nschemanames))
3890 0 : pg_fatal("could not parse result of current_schemas()");
3891 :
3892 : /*
3893 : * We use set_config(), not a simple "SET search_path" command, because
3894 : * the latter has less-clean behavior if the search path is empty. While
3895 : * that's likely to get fixed at some point, it seems like a good idea to
3896 : * be as backwards-compatible as possible in what we put into archives.
3897 : */
3898 370 : for (i = 0; i < nschemanames; i++)
3899 : {
3900 0 : if (i > 0)
3901 0 : appendPQExpBufferStr(path, ", ");
3902 0 : appendPQExpBufferStr(path, fmtId(schemanames[i]));
3903 : }
3904 :
3905 370 : appendPQExpBufferStr(qry, "SELECT pg_catalog.set_config('search_path', ");
3906 370 : appendStringLiteralAH(qry, path->data, AH);
3907 370 : appendPQExpBufferStr(qry, ", false);\n");
3908 :
3909 370 : pg_log_info("saving \"search_path = %s\"", path->data);
3910 :
3911 370 : ArchiveEntry(AH, nilCatalogId, createDumpId(),
3912 370 : ARCHIVE_OPTS(.tag = "SEARCHPATH",
3913 : .description = "SEARCHPATH",
3914 : .section = SECTION_PRE_DATA,
3915 : .createStmt = qry->data));
3916 :
3917 : /* Also save it in AH->searchpath, in case we're doing plain text dump */
3918 370 : AH->searchpath = pg_strdup(qry->data);
3919 :
3920 370 : free(schemanames);
3921 370 : PQclear(res);
3922 370 : destroyPQExpBuffer(qry);
3923 370 : destroyPQExpBuffer(path);
3924 370 : }
3925 :
3926 :
3927 : /*
3928 : * getLOs:
3929 : * Collect schema-level data about large objects
3930 : */
3931 : static void
3932 314 : getLOs(Archive *fout)
3933 : {
3934 314 : DumpOptions *dopt = fout->dopt;
3935 314 : PQExpBuffer loQry = createPQExpBuffer();
3936 : PGresult *res;
3937 : int ntups;
3938 : int i;
3939 : int n;
3940 : int i_oid;
3941 : int i_lomowner;
3942 : int i_lomacl;
3943 : int i_acldefault;
3944 :
3945 314 : pg_log_info("reading large objects");
3946 :
3947 : /*
3948 : * Fetch LO OIDs and owner/ACL data. Order the data so that all the blobs
3949 : * with the same owner/ACL appear together.
3950 : */
3951 314 : appendPQExpBufferStr(loQry,
3952 : "SELECT oid, lomowner, lomacl, "
3953 : "acldefault('L', lomowner) AS acldefault "
3954 : "FROM pg_largeobject_metadata "
3955 : "ORDER BY lomowner, lomacl::pg_catalog.text, oid");
3956 :
3957 314 : res = ExecuteSqlQuery(fout, loQry->data, PGRES_TUPLES_OK);
3958 :
3959 314 : i_oid = PQfnumber(res, "oid");
3960 314 : i_lomowner = PQfnumber(res, "lomowner");
3961 314 : i_lomacl = PQfnumber(res, "lomacl");
3962 314 : i_acldefault = PQfnumber(res, "acldefault");
3963 :
3964 314 : ntups = PQntuples(res);
3965 :
3966 : /*
3967 : * Group the blobs into suitably-sized groups that have the same owner and
3968 : * ACL setting, and build a metadata and a data DumpableObject for each
3969 : * group. (If we supported initprivs for blobs, we'd have to insist that
3970 : * groups also share initprivs settings, since the DumpableObject only has
3971 : * room for one.) i is the index of the first tuple in the current group,
3972 : * and n is the number of tuples we include in the group.
3973 : */
3974 504 : for (i = 0; i < ntups; i += n)
3975 : {
3976 190 : Oid thisoid = atooid(PQgetvalue(res, i, i_oid));
3977 190 : char *thisowner = PQgetvalue(res, i, i_lomowner);
3978 190 : char *thisacl = PQgetvalue(res, i, i_lomacl);
3979 : LoInfo *loinfo;
3980 : DumpableObject *lodata;
3981 : char namebuf[64];
3982 :
3983 : /* Scan to find first tuple not to be included in group */
3984 190 : n = 1;
3985 220 : while (n < MAX_BLOBS_PER_ARCHIVE_ENTRY && i + n < ntups)
3986 : {
3987 120 : if (strcmp(thisowner, PQgetvalue(res, i + n, i_lomowner)) != 0 ||
3988 110 : strcmp(thisacl, PQgetvalue(res, i + n, i_lomacl)) != 0)
3989 : break;
3990 30 : n++;
3991 : }
3992 :
3993 : /* Build the metadata DumpableObject */
3994 190 : loinfo = (LoInfo *) pg_malloc(offsetof(LoInfo, looids) + n * sizeof(Oid));
3995 :
3996 190 : loinfo->dobj.objType = DO_LARGE_OBJECT;
3997 190 : loinfo->dobj.catId.tableoid = LargeObjectRelationId;
3998 190 : loinfo->dobj.catId.oid = thisoid;
3999 190 : AssignDumpId(&loinfo->dobj);
4000 :
4001 190 : if (n > 1)
4002 20 : snprintf(namebuf, sizeof(namebuf), "%u..%u", thisoid,
4003 20 : atooid(PQgetvalue(res, i + n - 1, i_oid)));
4004 : else
4005 170 : snprintf(namebuf, sizeof(namebuf), "%u", thisoid);
4006 190 : loinfo->dobj.name = pg_strdup(namebuf);
4007 190 : loinfo->dacl.acl = pg_strdup(thisacl);
4008 190 : loinfo->dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault));
4009 190 : loinfo->dacl.privtype = 0;
4010 190 : loinfo->dacl.initprivs = NULL;
4011 190 : loinfo->rolname = getRoleName(thisowner);
4012 190 : loinfo->numlos = n;
4013 190 : loinfo->looids[0] = thisoid;
4014 : /* Collect OIDs of the remaining blobs in this group */
4015 220 : for (int k = 1; k < n; k++)
4016 : {
4017 : CatalogId extraID;
4018 :
4019 30 : loinfo->looids[k] = atooid(PQgetvalue(res, i + k, i_oid));
4020 :
4021 : /* Make sure we can look up loinfo by any of the blobs' OIDs */
4022 30 : extraID.tableoid = LargeObjectRelationId;
4023 30 : extraID.oid = loinfo->looids[k];
4024 30 : recordAdditionalCatalogID(extraID, &loinfo->dobj);
4025 : }
4026 :
4027 : /* LOs have data */
4028 190 : loinfo->dobj.components |= DUMP_COMPONENT_DATA;
4029 :
4030 : /* Mark whether LO group has a non-empty ACL */
4031 190 : if (!PQgetisnull(res, i, i_lomacl))
4032 90 : loinfo->dobj.components |= DUMP_COMPONENT_ACL;
4033 :
4034 : /*
4035 : * In binary-upgrade mode for LOs, we do *not* dump out the LO data,
4036 : * as it will be copied by pg_upgrade, which simply copies the
4037 : * pg_largeobject table. We *do* however dump out anything but the
4038 : * data, as pg_upgrade copies just pg_largeobject, but not
4039 : * pg_largeobject_metadata, after the dump is restored. In versions
4040 : * before v12, this is done via proper large object commands. In
4041 : * newer versions, we dump the content of pg_largeobject_metadata and
4042 : * any associated pg_shdepend rows, which is faster to restore. (On
4043 : * <v12, pg_largeobject_metadata was created WITH OIDS, so the OID
4044 : * column is hidden and won't be dumped.)
4045 : */
4046 190 : if (dopt->binary_upgrade)
4047 : {
4048 26 : if (fout->remoteVersion >= 120000)
4049 : {
4050 : /*
4051 : * We should've saved pg_largeobject_metadata's dump ID before
4052 : * this point.
4053 : */
4054 : Assert(lo_metadata_dumpId);
4055 :
4056 26 : loinfo->dobj.dump &= ~(DUMP_COMPONENT_DATA | DUMP_COMPONENT_ACL | DUMP_COMPONENT_DEFINITION);
4057 :
4058 : /*
4059 : * Mark the large object as dependent on
4060 : * pg_largeobject_metadata so that any large object
4061 : * comments/seclables are dumped after it.
4062 : */
4063 26 : loinfo->dobj.dependencies = (DumpId *) pg_malloc(sizeof(DumpId));
4064 26 : loinfo->dobj.dependencies[0] = lo_metadata_dumpId;
4065 26 : loinfo->dobj.nDeps = loinfo->dobj.allocDeps = 1;
4066 : }
4067 : else
4068 0 : loinfo->dobj.dump &= ~DUMP_COMPONENT_DATA;
4069 : }
4070 :
4071 : /*
4072 : * Create a "BLOBS" data item for the group, too. This is just a
4073 : * placeholder for sorting; it carries no data now.
4074 : */
4075 190 : lodata = (DumpableObject *) pg_malloc(sizeof(DumpableObject));
4076 190 : lodata->objType = DO_LARGE_OBJECT_DATA;
4077 190 : lodata->catId = nilCatalogId;
4078 190 : AssignDumpId(lodata);
4079 190 : lodata->name = pg_strdup(namebuf);
4080 190 : lodata->components |= DUMP_COMPONENT_DATA;
4081 : /* Set up explicit dependency from data to metadata */
4082 190 : lodata->dependencies = (DumpId *) pg_malloc(sizeof(DumpId));
4083 190 : lodata->dependencies[0] = loinfo->dobj.dumpId;
4084 190 : lodata->nDeps = lodata->allocDeps = 1;
4085 : }
4086 :
4087 314 : PQclear(res);
4088 314 : destroyPQExpBuffer(loQry);
4089 314 : }
4090 :
4091 : /*
4092 : * dumpLO
4093 : *
4094 : * dump the definition (metadata) of the given large object group
4095 : */
4096 : static void
4097 178 : dumpLO(Archive *fout, const LoInfo *loinfo)
4098 : {
4099 178 : PQExpBuffer cquery = createPQExpBuffer();
4100 :
4101 : /*
4102 : * The "definition" is just a newline-separated list of OIDs. We need to
4103 : * put something into the dropStmt too, but it can just be a comment.
4104 : */
4105 386 : for (int i = 0; i < loinfo->numlos; i++)
4106 208 : appendPQExpBuffer(cquery, "%u\n", loinfo->looids[i]);
4107 :
4108 178 : if (loinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
4109 164 : ArchiveEntry(fout, loinfo->dobj.catId, loinfo->dobj.dumpId,
4110 164 : ARCHIVE_OPTS(.tag = loinfo->dobj.name,
4111 : .owner = loinfo->rolname,
4112 : .description = "BLOB METADATA",
4113 : .section = SECTION_DATA,
4114 : .createStmt = cquery->data,
4115 : .dropStmt = "-- dummy"));
4116 :
4117 : /*
4118 : * Dump per-blob comments and seclabels if any. We assume these are rare
4119 : * enough that it's okay to generate retail TOC entries for them.
4120 : */
4121 178 : if (loinfo->dobj.dump & (DUMP_COMPONENT_COMMENT |
4122 : DUMP_COMPONENT_SECLABEL))
4123 : {
4124 230 : for (int i = 0; i < loinfo->numlos; i++)
4125 : {
4126 : CatalogId catId;
4127 : char namebuf[32];
4128 :
4129 : /* Build identifying info for this blob */
4130 130 : catId.tableoid = loinfo->dobj.catId.tableoid;
4131 130 : catId.oid = loinfo->looids[i];
4132 130 : snprintf(namebuf, sizeof(namebuf), "%u", loinfo->looids[i]);
4133 :
4134 130 : if (loinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
4135 130 : dumpComment(fout, "LARGE OBJECT", namebuf,
4136 130 : NULL, loinfo->rolname,
4137 130 : catId, 0, loinfo->dobj.dumpId);
4138 :
4139 130 : if (loinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
4140 20 : dumpSecLabel(fout, "LARGE OBJECT", namebuf,
4141 20 : NULL, loinfo->rolname,
4142 20 : catId, 0, loinfo->dobj.dumpId);
4143 : }
4144 : }
4145 :
4146 : /*
4147 : * Dump the ACLs if any (remember that all blobs in the group will have
4148 : * the same ACL). If there's just one blob, dump a simple ACL entry; if
4149 : * there's more, make a "LARGE OBJECTS" entry that really contains only
4150 : * the ACL for the first blob. _printTocEntry() will be cued by the tag
4151 : * string to emit a mutated version for each blob.
4152 : */
4153 178 : if (loinfo->dobj.dump & DUMP_COMPONENT_ACL)
4154 : {
4155 : char namebuf[32];
4156 :
4157 : /* Build identifying info for the first blob */
4158 78 : snprintf(namebuf, sizeof(namebuf), "%u", loinfo->looids[0]);
4159 :
4160 78 : if (loinfo->numlos > 1)
4161 : {
4162 : char tagbuf[64];
4163 :
4164 0 : snprintf(tagbuf, sizeof(tagbuf), "LARGE OBJECTS %u..%u",
4165 0 : loinfo->looids[0], loinfo->looids[loinfo->numlos - 1]);
4166 :
4167 0 : dumpACL(fout, loinfo->dobj.dumpId, InvalidDumpId,
4168 : "LARGE OBJECT", namebuf, NULL, NULL,
4169 0 : tagbuf, loinfo->rolname, &loinfo->dacl);
4170 : }
4171 : else
4172 : {
4173 78 : dumpACL(fout, loinfo->dobj.dumpId, InvalidDumpId,
4174 : "LARGE OBJECT", namebuf, NULL, NULL,
4175 78 : NULL, loinfo->rolname, &loinfo->dacl);
4176 : }
4177 : }
4178 :
4179 178 : destroyPQExpBuffer(cquery);
4180 178 : }
4181 :
4182 : /*
4183 : * dumpLOs:
4184 : * dump the data contents of the large objects in the given group
4185 : */
4186 : static int
4187 156 : dumpLOs(Archive *fout, const void *arg)
4188 : {
4189 156 : const LoInfo *loinfo = (const LoInfo *) arg;
4190 156 : PGconn *conn = GetConnection(fout);
4191 : char buf[LOBBUFSIZE];
4192 :
4193 156 : pg_log_info("saving large objects \"%s\"", loinfo->dobj.name);
4194 :
4195 328 : for (int i = 0; i < loinfo->numlos; i++)
4196 : {
4197 172 : Oid loOid = loinfo->looids[i];
4198 : int loFd;
4199 : int cnt;
4200 :
4201 : /* Open the LO */
4202 172 : loFd = lo_open(conn, loOid, INV_READ);
4203 172 : if (loFd == -1)
4204 0 : pg_fatal("could not open large object %u: %s",
4205 : loOid, PQerrorMessage(conn));
4206 :
4207 172 : StartLO(fout, loOid);
4208 :
4209 : /* Now read it in chunks, sending data to archive */
4210 : do
4211 : {
4212 262 : cnt = lo_read(conn, loFd, buf, LOBBUFSIZE);
4213 262 : if (cnt < 0)
4214 0 : pg_fatal("error reading large object %u: %s",
4215 : loOid, PQerrorMessage(conn));
4216 :
4217 262 : WriteData(fout, buf, cnt);
4218 262 : } while (cnt > 0);
4219 :
4220 172 : lo_close(conn, loFd);
4221 :
4222 172 : EndLO(fout, loOid);
4223 : }
4224 :
4225 156 : return 1;
4226 : }
4227 :
4228 : /*
4229 : * getPolicies
4230 : * get information about all RLS policies on dumpable tables.
4231 : */
4232 : void
4233 370 : getPolicies(Archive *fout, TableInfo tblinfo[], int numTables)
4234 : {
4235 370 : DumpOptions *dopt = fout->dopt;
4236 : PQExpBuffer query;
4237 : PQExpBuffer tbloids;
4238 : PGresult *res;
4239 : PolicyInfo *polinfo;
4240 : int i_oid;
4241 : int i_tableoid;
4242 : int i_polrelid;
4243 : int i_polname;
4244 : int i_polcmd;
4245 : int i_polpermissive;
4246 : int i_polroles;
4247 : int i_polqual;
4248 : int i_polwithcheck;
4249 : int i,
4250 : j,
4251 : ntups;
4252 :
4253 : /* No policies before 9.5 */
4254 370 : if (fout->remoteVersion < 90500)
4255 0 : return;
4256 :
4257 : /* Skip if --no-policies was specified */
4258 370 : if (dopt->no_policies)
4259 2 : return;
4260 :
4261 368 : query = createPQExpBuffer();
4262 368 : tbloids = createPQExpBuffer();
4263 :
4264 : /*
4265 : * Identify tables of interest, and check which ones have RLS enabled.
4266 : */
4267 368 : appendPQExpBufferChar(tbloids, '{');
4268 98278 : for (i = 0; i < numTables; i++)
4269 : {
4270 97910 : TableInfo *tbinfo = &tblinfo[i];
4271 :
4272 : /* Ignore row security on tables not to be dumped */
4273 97910 : if (!(tbinfo->dobj.dump & DUMP_COMPONENT_POLICY))
4274 83342 : continue;
4275 :
4276 : /* It can't have RLS or policies if it's not a table */
4277 14568 : if (tbinfo->relkind != RELKIND_RELATION &&
4278 4160 : tbinfo->relkind != RELKIND_PARTITIONED_TABLE)
4279 2940 : continue;
4280 :
4281 : /* Add it to the list of table OIDs to be probed below */
4282 11628 : if (tbloids->len > 1) /* do we have more than the '{'? */
4283 11388 : appendPQExpBufferChar(tbloids, ',');
4284 11628 : appendPQExpBuffer(tbloids, "%u", tbinfo->dobj.catId.oid);
4285 :
4286 : /* Is RLS enabled? (That's separate from whether it has policies) */
4287 11628 : if (tbinfo->rowsec)
4288 : {
4289 118 : tbinfo->dobj.components |= DUMP_COMPONENT_POLICY;
4290 :
4291 : /*
4292 : * We represent RLS being enabled on a table by creating a
4293 : * PolicyInfo object with null polname.
4294 : *
4295 : * Note: use tableoid 0 so that this object won't be mistaken for
4296 : * something that pg_depend entries apply to.
4297 : */
4298 118 : polinfo = pg_malloc(sizeof(PolicyInfo));
4299 118 : polinfo->dobj.objType = DO_POLICY;
4300 118 : polinfo->dobj.catId.tableoid = 0;
4301 118 : polinfo->dobj.catId.oid = tbinfo->dobj.catId.oid;
4302 118 : AssignDumpId(&polinfo->dobj);
4303 118 : polinfo->dobj.namespace = tbinfo->dobj.namespace;
4304 118 : polinfo->dobj.name = pg_strdup(tbinfo->dobj.name);
4305 118 : polinfo->poltable = tbinfo;
4306 118 : polinfo->polname = NULL;
4307 118 : polinfo->polcmd = '\0';
4308 118 : polinfo->polpermissive = 0;
4309 118 : polinfo->polroles = NULL;
4310 118 : polinfo->polqual = NULL;
4311 118 : polinfo->polwithcheck = NULL;
4312 : }
4313 : }
4314 368 : appendPQExpBufferChar(tbloids, '}');
4315 :
4316 : /*
4317 : * Now, read all RLS policies belonging to the tables of interest, and
4318 : * create PolicyInfo objects for them. (Note that we must filter the
4319 : * results server-side not locally, because we dare not apply pg_get_expr
4320 : * to tables we don't have lock on.)
4321 : */
4322 368 : pg_log_info("reading row-level security policies");
4323 :
4324 368 : printfPQExpBuffer(query,
4325 : "SELECT pol.oid, pol.tableoid, pol.polrelid, pol.polname, pol.polcmd, ");
4326 368 : if (fout->remoteVersion >= 100000)
4327 368 : appendPQExpBufferStr(query, "pol.polpermissive, ");
4328 : else
4329 0 : appendPQExpBufferStr(query, "'t' as polpermissive, ");
4330 368 : appendPQExpBuffer(query,
4331 : "CASE WHEN pol.polroles = '{0}' THEN NULL ELSE "
4332 : " pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.quote_ident(rolname) from pg_catalog.pg_roles WHERE oid = ANY(pol.polroles)), ', ') END AS polroles, "
4333 : "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid) AS polqual, "
4334 : "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid) AS polwithcheck "
4335 : "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
4336 : "JOIN pg_catalog.pg_policy pol ON (src.tbloid = pol.polrelid)",
4337 : tbloids->data);
4338 :
4339 368 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4340 :
4341 368 : ntups = PQntuples(res);
4342 368 : if (ntups > 0)
4343 : {
4344 98 : i_oid = PQfnumber(res, "oid");
4345 98 : i_tableoid = PQfnumber(res, "tableoid");
4346 98 : i_polrelid = PQfnumber(res, "polrelid");
4347 98 : i_polname = PQfnumber(res, "polname");
4348 98 : i_polcmd = PQfnumber(res, "polcmd");
4349 98 : i_polpermissive = PQfnumber(res, "polpermissive");
4350 98 : i_polroles = PQfnumber(res, "polroles");
4351 98 : i_polqual = PQfnumber(res, "polqual");
4352 98 : i_polwithcheck = PQfnumber(res, "polwithcheck");
4353 :
4354 98 : polinfo = pg_malloc(ntups * sizeof(PolicyInfo));
4355 :
4356 716 : for (j = 0; j < ntups; j++)
4357 : {
4358 618 : Oid polrelid = atooid(PQgetvalue(res, j, i_polrelid));
4359 618 : TableInfo *tbinfo = findTableByOid(polrelid);
4360 :
4361 618 : tbinfo->dobj.components |= DUMP_COMPONENT_POLICY;
4362 :
4363 618 : polinfo[j].dobj.objType = DO_POLICY;
4364 618 : polinfo[j].dobj.catId.tableoid =
4365 618 : atooid(PQgetvalue(res, j, i_tableoid));
4366 618 : polinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
4367 618 : AssignDumpId(&polinfo[j].dobj);
4368 618 : polinfo[j].dobj.namespace = tbinfo->dobj.namespace;
4369 618 : polinfo[j].poltable = tbinfo;
4370 618 : polinfo[j].polname = pg_strdup(PQgetvalue(res, j, i_polname));
4371 618 : polinfo[j].dobj.name = pg_strdup(polinfo[j].polname);
4372 :
4373 618 : polinfo[j].polcmd = *(PQgetvalue(res, j, i_polcmd));
4374 618 : polinfo[j].polpermissive = *(PQgetvalue(res, j, i_polpermissive)) == 't';
4375 :
4376 618 : if (PQgetisnull(res, j, i_polroles))
4377 266 : polinfo[j].polroles = NULL;
4378 : else
4379 352 : polinfo[j].polroles = pg_strdup(PQgetvalue(res, j, i_polroles));
4380 :
4381 618 : if (PQgetisnull(res, j, i_polqual))
4382 88 : polinfo[j].polqual = NULL;
4383 : else
4384 530 : polinfo[j].polqual = pg_strdup(PQgetvalue(res, j, i_polqual));
4385 :
4386 618 : if (PQgetisnull(res, j, i_polwithcheck))
4387 324 : polinfo[j].polwithcheck = NULL;
4388 : else
4389 294 : polinfo[j].polwithcheck
4390 294 : = pg_strdup(PQgetvalue(res, j, i_polwithcheck));
4391 : }
4392 : }
4393 :
4394 368 : PQclear(res);
4395 :
4396 368 : destroyPQExpBuffer(query);
4397 368 : destroyPQExpBuffer(tbloids);
4398 : }
4399 :
4400 : /*
4401 : * dumpPolicy
4402 : * dump the definition of the given policy
4403 : */
4404 : static void
4405 736 : dumpPolicy(Archive *fout, const PolicyInfo *polinfo)
4406 : {
4407 736 : DumpOptions *dopt = fout->dopt;
4408 736 : TableInfo *tbinfo = polinfo->poltable;
4409 : PQExpBuffer query;
4410 : PQExpBuffer delqry;
4411 : PQExpBuffer polprefix;
4412 : char *qtabname;
4413 : const char *cmd;
4414 : char *tag;
4415 :
4416 : /* Do nothing if not dumping schema */
4417 736 : if (!dopt->dumpSchema)
4418 98 : return;
4419 :
4420 : /*
4421 : * If polname is NULL, then this record is just indicating that ROW LEVEL
4422 : * SECURITY is enabled for the table. Dump as ALTER TABLE <table> ENABLE
4423 : * ROW LEVEL SECURITY.
4424 : */
4425 638 : if (polinfo->polname == NULL)
4426 : {
4427 104 : query = createPQExpBuffer();
4428 :
4429 104 : appendPQExpBuffer(query, "ALTER TABLE %s ENABLE ROW LEVEL SECURITY;",
4430 104 : fmtQualifiedDumpable(tbinfo));
4431 :
4432 : /*
4433 : * We must emit the ROW SECURITY object's dependency on its table
4434 : * explicitly, because it will not match anything in pg_depend (unlike
4435 : * the case for other PolicyInfo objects).
4436 : */
4437 104 : if (polinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
4438 104 : ArchiveEntry(fout, polinfo->dobj.catId, polinfo->dobj.dumpId,
4439 104 : ARCHIVE_OPTS(.tag = polinfo->dobj.name,
4440 : .namespace = polinfo->dobj.namespace->dobj.name,
4441 : .owner = tbinfo->rolname,
4442 : .description = "ROW SECURITY",
4443 : .section = SECTION_POST_DATA,
4444 : .createStmt = query->data,
4445 : .deps = &(tbinfo->dobj.dumpId),
4446 : .nDeps = 1));
4447 :
4448 104 : destroyPQExpBuffer(query);
4449 104 : return;
4450 : }
4451 :
4452 534 : if (polinfo->polcmd == '*')
4453 178 : cmd = "";
4454 356 : else if (polinfo->polcmd == 'r')
4455 94 : cmd = " FOR SELECT";
4456 262 : else if (polinfo->polcmd == 'a')
4457 74 : cmd = " FOR INSERT";
4458 188 : else if (polinfo->polcmd == 'w')
4459 94 : cmd = " FOR UPDATE";
4460 94 : else if (polinfo->polcmd == 'd')
4461 94 : cmd = " FOR DELETE";
4462 : else
4463 0 : pg_fatal("unexpected policy command type: %c",
4464 : polinfo->polcmd);
4465 :
4466 534 : query = createPQExpBuffer();
4467 534 : delqry = createPQExpBuffer();
4468 534 : polprefix = createPQExpBuffer();
4469 :
4470 534 : qtabname = pg_strdup(fmtId(tbinfo->dobj.name));
4471 :
4472 534 : appendPQExpBuffer(query, "CREATE POLICY %s", fmtId(polinfo->polname));
4473 :
4474 534 : appendPQExpBuffer(query, " ON %s%s%s", fmtQualifiedDumpable(tbinfo),
4475 534 : !polinfo->polpermissive ? " AS RESTRICTIVE" : "", cmd);
4476 :
4477 534 : if (polinfo->polroles != NULL)
4478 296 : appendPQExpBuffer(query, " TO %s", polinfo->polroles);
4479 :
4480 534 : if (polinfo->polqual != NULL)
4481 460 : appendPQExpBuffer(query, " USING (%s)", polinfo->polqual);
4482 :
4483 534 : if (polinfo->polwithcheck != NULL)
4484 252 : appendPQExpBuffer(query, " WITH CHECK (%s)", polinfo->polwithcheck);
4485 :
4486 534 : appendPQExpBufferStr(query, ";\n");
4487 :
4488 534 : appendPQExpBuffer(delqry, "DROP POLICY %s", fmtId(polinfo->polname));
4489 534 : appendPQExpBuffer(delqry, " ON %s;\n", fmtQualifiedDumpable(tbinfo));
4490 :
4491 534 : appendPQExpBuffer(polprefix, "POLICY %s ON",
4492 534 : fmtId(polinfo->polname));
4493 :
4494 534 : tag = psprintf("%s %s", tbinfo->dobj.name, polinfo->dobj.name);
4495 :
4496 534 : if (polinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
4497 534 : ArchiveEntry(fout, polinfo->dobj.catId, polinfo->dobj.dumpId,
4498 534 : ARCHIVE_OPTS(.tag = tag,
4499 : .namespace = polinfo->dobj.namespace->dobj.name,
4500 : .owner = tbinfo->rolname,
4501 : .description = "POLICY",
4502 : .section = SECTION_POST_DATA,
4503 : .createStmt = query->data,
4504 : .dropStmt = delqry->data));
4505 :
4506 534 : if (polinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
4507 74 : dumpComment(fout, polprefix->data, qtabname,
4508 74 : tbinfo->dobj.namespace->dobj.name, tbinfo->rolname,
4509 74 : polinfo->dobj.catId, 0, polinfo->dobj.dumpId);
4510 :
4511 534 : free(tag);
4512 534 : destroyPQExpBuffer(query);
4513 534 : destroyPQExpBuffer(delqry);
4514 534 : destroyPQExpBuffer(polprefix);
4515 534 : free(qtabname);
4516 : }
4517 :
4518 : /*
4519 : * getPublications
4520 : * get information about publications
4521 : */
4522 : void
4523 370 : getPublications(Archive *fout)
4524 : {
4525 370 : DumpOptions *dopt = fout->dopt;
4526 : PQExpBuffer query;
4527 : PGresult *res;
4528 : PublicationInfo *pubinfo;
4529 : int i_tableoid;
4530 : int i_oid;
4531 : int i_pubname;
4532 : int i_pubowner;
4533 : int i_puballtables;
4534 : int i_puballsequences;
4535 : int i_pubinsert;
4536 : int i_pubupdate;
4537 : int i_pubdelete;
4538 : int i_pubtruncate;
4539 : int i_pubviaroot;
4540 : int i_pubgencols;
4541 : int i,
4542 : ntups;
4543 :
4544 370 : if (dopt->no_publications || fout->remoteVersion < 100000)
4545 0 : return;
4546 :
4547 370 : query = createPQExpBuffer();
4548 :
4549 : /* Get the publications. */
4550 370 : appendPQExpBufferStr(query, "SELECT p.tableoid, p.oid, p.pubname, "
4551 : "p.pubowner, p.puballtables, p.pubinsert, "
4552 : "p.pubupdate, p.pubdelete, ");
4553 :
4554 370 : if (fout->remoteVersion >= 110000)
4555 370 : appendPQExpBufferStr(query, "p.pubtruncate, ");
4556 : else
4557 0 : appendPQExpBufferStr(query, "false AS pubtruncate, ");
4558 :
4559 370 : if (fout->remoteVersion >= 130000)
4560 370 : appendPQExpBufferStr(query, "p.pubviaroot, ");
4561 : else
4562 0 : appendPQExpBufferStr(query, "false AS pubviaroot, ");
4563 :
4564 370 : if (fout->remoteVersion >= 180000)
4565 370 : appendPQExpBufferStr(query, "p.pubgencols, ");
4566 : else
4567 0 : appendPQExpBuffer(query, "'%c' AS pubgencols, ", PUBLISH_GENCOLS_NONE);
4568 :
4569 370 : if (fout->remoteVersion >= 190000)
4570 370 : appendPQExpBufferStr(query, "p.puballsequences ");
4571 : else
4572 0 : appendPQExpBufferStr(query, "false AS puballsequences ");
4573 :
4574 370 : appendPQExpBufferStr(query, "FROM pg_publication p");
4575 :
4576 370 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4577 :
4578 370 : ntups = PQntuples(res);
4579 :
4580 370 : if (ntups == 0)
4581 252 : goto cleanup;
4582 :
4583 118 : i_tableoid = PQfnumber(res, "tableoid");
4584 118 : i_oid = PQfnumber(res, "oid");
4585 118 : i_pubname = PQfnumber(res, "pubname");
4586 118 : i_pubowner = PQfnumber(res, "pubowner");
4587 118 : i_puballtables = PQfnumber(res, "puballtables");
4588 118 : i_puballsequences = PQfnumber(res, "puballsequences");
4589 118 : i_pubinsert = PQfnumber(res, "pubinsert");
4590 118 : i_pubupdate = PQfnumber(res, "pubupdate");
4591 118 : i_pubdelete = PQfnumber(res, "pubdelete");
4592 118 : i_pubtruncate = PQfnumber(res, "pubtruncate");
4593 118 : i_pubviaroot = PQfnumber(res, "pubviaroot");
4594 118 : i_pubgencols = PQfnumber(res, "pubgencols");
4595 :
4596 118 : pubinfo = pg_malloc(ntups * sizeof(PublicationInfo));
4597 :
4598 904 : for (i = 0; i < ntups; i++)
4599 : {
4600 786 : pubinfo[i].dobj.objType = DO_PUBLICATION;
4601 786 : pubinfo[i].dobj.catId.tableoid =
4602 786 : atooid(PQgetvalue(res, i, i_tableoid));
4603 786 : pubinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4604 786 : AssignDumpId(&pubinfo[i].dobj);
4605 786 : pubinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_pubname));
4606 786 : pubinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_pubowner));
4607 786 : pubinfo[i].puballtables =
4608 786 : (strcmp(PQgetvalue(res, i, i_puballtables), "t") == 0);
4609 786 : pubinfo[i].puballsequences =
4610 786 : (strcmp(PQgetvalue(res, i, i_puballsequences), "t") == 0);
4611 786 : pubinfo[i].pubinsert =
4612 786 : (strcmp(PQgetvalue(res, i, i_pubinsert), "t") == 0);
4613 786 : pubinfo[i].pubupdate =
4614 786 : (strcmp(PQgetvalue(res, i, i_pubupdate), "t") == 0);
4615 786 : pubinfo[i].pubdelete =
4616 786 : (strcmp(PQgetvalue(res, i, i_pubdelete), "t") == 0);
4617 786 : pubinfo[i].pubtruncate =
4618 786 : (strcmp(PQgetvalue(res, i, i_pubtruncate), "t") == 0);
4619 786 : pubinfo[i].pubviaroot =
4620 786 : (strcmp(PQgetvalue(res, i, i_pubviaroot), "t") == 0);
4621 786 : pubinfo[i].pubgencols_type =
4622 786 : *(PQgetvalue(res, i, i_pubgencols));
4623 :
4624 : /* Decide whether we want to dump it */
4625 786 : selectDumpableObject(&(pubinfo[i].dobj), fout);
4626 : }
4627 :
4628 118 : cleanup:
4629 370 : PQclear(res);
4630 :
4631 370 : destroyPQExpBuffer(query);
4632 : }
4633 :
4634 : /*
4635 : * dumpPublication
4636 : * dump the definition of the given publication
4637 : */
4638 : static void
4639 654 : dumpPublication(Archive *fout, const PublicationInfo *pubinfo)
4640 : {
4641 654 : DumpOptions *dopt = fout->dopt;
4642 : PQExpBuffer delq;
4643 : PQExpBuffer query;
4644 : char *qpubname;
4645 654 : bool first = true;
4646 :
4647 : /* Do nothing if not dumping schema */
4648 654 : if (!dopt->dumpSchema)
4649 84 : return;
4650 :
4651 570 : delq = createPQExpBuffer();
4652 570 : query = createPQExpBuffer();
4653 :
4654 570 : qpubname = pg_strdup(fmtId(pubinfo->dobj.name));
4655 :
4656 570 : appendPQExpBuffer(delq, "DROP PUBLICATION %s;\n",
4657 : qpubname);
4658 :
4659 570 : appendPQExpBuffer(query, "CREATE PUBLICATION %s",
4660 : qpubname);
4661 :
4662 570 : if (pubinfo->puballtables && pubinfo->puballsequences)
4663 74 : appendPQExpBufferStr(query, " FOR ALL TABLES, ALL SEQUENCES");
4664 496 : else if (pubinfo->puballtables)
4665 76 : appendPQExpBufferStr(query, " FOR ALL TABLES");
4666 420 : else if (pubinfo->puballsequences)
4667 74 : appendPQExpBufferStr(query, " FOR ALL SEQUENCES");
4668 :
4669 570 : appendPQExpBufferStr(query, " WITH (publish = '");
4670 570 : if (pubinfo->pubinsert)
4671 : {
4672 422 : appendPQExpBufferStr(query, "insert");
4673 422 : first = false;
4674 : }
4675 :
4676 570 : if (pubinfo->pubupdate)
4677 : {
4678 422 : if (!first)
4679 422 : appendPQExpBufferStr(query, ", ");
4680 :
4681 422 : appendPQExpBufferStr(query, "update");
4682 422 : first = false;
4683 : }
4684 :
4685 570 : if (pubinfo->pubdelete)
4686 : {
4687 422 : if (!first)
4688 422 : appendPQExpBufferStr(query, ", ");
4689 :
4690 422 : appendPQExpBufferStr(query, "delete");
4691 422 : first = false;
4692 : }
4693 :
4694 570 : if (pubinfo->pubtruncate)
4695 : {
4696 422 : if (!first)
4697 422 : appendPQExpBufferStr(query, ", ");
4698 :
4699 422 : appendPQExpBufferStr(query, "truncate");
4700 422 : first = false;
4701 : }
4702 :
4703 570 : appendPQExpBufferChar(query, '\'');
4704 :
4705 570 : if (pubinfo->pubviaroot)
4706 10 : appendPQExpBufferStr(query, ", publish_via_partition_root = true");
4707 :
4708 570 : if (pubinfo->pubgencols_type == PUBLISH_GENCOLS_STORED)
4709 74 : appendPQExpBufferStr(query, ", publish_generated_columns = stored");
4710 :
4711 570 : appendPQExpBufferStr(query, ");\n");
4712 :
4713 570 : if (pubinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
4714 570 : ArchiveEntry(fout, pubinfo->dobj.catId, pubinfo->dobj.dumpId,
4715 570 : ARCHIVE_OPTS(.tag = pubinfo->dobj.name,
4716 : .owner = pubinfo->rolname,
4717 : .description = "PUBLICATION",
4718 : .section = SECTION_POST_DATA,
4719 : .createStmt = query->data,
4720 : .dropStmt = delq->data));
4721 :
4722 570 : if (pubinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
4723 74 : dumpComment(fout, "PUBLICATION", qpubname,
4724 74 : NULL, pubinfo->rolname,
4725 74 : pubinfo->dobj.catId, 0, pubinfo->dobj.dumpId);
4726 :
4727 570 : if (pubinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
4728 0 : dumpSecLabel(fout, "PUBLICATION", qpubname,
4729 0 : NULL, pubinfo->rolname,
4730 0 : pubinfo->dobj.catId, 0, pubinfo->dobj.dumpId);
4731 :
4732 570 : destroyPQExpBuffer(delq);
4733 570 : destroyPQExpBuffer(query);
4734 570 : free(qpubname);
4735 : }
4736 :
4737 : /*
4738 : * getPublicationNamespaces
4739 : * get information about publication membership for dumpable schemas.
4740 : */
4741 : void
4742 370 : getPublicationNamespaces(Archive *fout)
4743 : {
4744 : PQExpBuffer query;
4745 : PGresult *res;
4746 : PublicationSchemaInfo *pubsinfo;
4747 370 : DumpOptions *dopt = fout->dopt;
4748 : int i_tableoid;
4749 : int i_oid;
4750 : int i_pnpubid;
4751 : int i_pnnspid;
4752 : int i,
4753 : j,
4754 : ntups;
4755 :
4756 370 : if (dopt->no_publications || fout->remoteVersion < 150000)
4757 0 : return;
4758 :
4759 370 : query = createPQExpBuffer();
4760 :
4761 : /* Collect all publication membership info. */
4762 370 : appendPQExpBufferStr(query,
4763 : "SELECT tableoid, oid, pnpubid, pnnspid "
4764 : "FROM pg_catalog.pg_publication_namespace");
4765 370 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4766 :
4767 370 : ntups = PQntuples(res);
4768 :
4769 370 : i_tableoid = PQfnumber(res, "tableoid");
4770 370 : i_oid = PQfnumber(res, "oid");
4771 370 : i_pnpubid = PQfnumber(res, "pnpubid");
4772 370 : i_pnnspid = PQfnumber(res, "pnnspid");
4773 :
4774 : /* this allocation may be more than we need */
4775 370 : pubsinfo = pg_malloc(ntups * sizeof(PublicationSchemaInfo));
4776 370 : j = 0;
4777 :
4778 644 : for (i = 0; i < ntups; i++)
4779 : {
4780 274 : Oid pnpubid = atooid(PQgetvalue(res, i, i_pnpubid));
4781 274 : Oid pnnspid = atooid(PQgetvalue(res, i, i_pnnspid));
4782 : PublicationInfo *pubinfo;
4783 : NamespaceInfo *nspinfo;
4784 :
4785 : /*
4786 : * Ignore any entries for which we aren't interested in either the
4787 : * publication or the rel.
4788 : */
4789 274 : pubinfo = findPublicationByOid(pnpubid);
4790 274 : if (pubinfo == NULL)
4791 0 : continue;
4792 274 : nspinfo = findNamespaceByOid(pnnspid);
4793 274 : if (nspinfo == NULL)
4794 0 : continue;
4795 :
4796 : /* OK, make a DumpableObject for this relationship */
4797 274 : pubsinfo[j].dobj.objType = DO_PUBLICATION_TABLE_IN_SCHEMA;
4798 274 : pubsinfo[j].dobj.catId.tableoid =
4799 274 : atooid(PQgetvalue(res, i, i_tableoid));
4800 274 : pubsinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4801 274 : AssignDumpId(&pubsinfo[j].dobj);
4802 274 : pubsinfo[j].dobj.namespace = nspinfo->dobj.namespace;
4803 274 : pubsinfo[j].dobj.name = nspinfo->dobj.name;
4804 274 : pubsinfo[j].publication = pubinfo;
4805 274 : pubsinfo[j].pubschema = nspinfo;
4806 :
4807 : /* Decide whether we want to dump it */
4808 274 : selectDumpablePublicationObject(&(pubsinfo[j].dobj), fout);
4809 :
4810 274 : j++;
4811 : }
4812 :
4813 370 : PQclear(res);
4814 370 : destroyPQExpBuffer(query);
4815 : }
4816 :
4817 : /*
4818 : * getPublicationTables
4819 : * get information about publication membership for dumpable tables.
4820 : */
4821 : void
4822 370 : getPublicationTables(Archive *fout, TableInfo tblinfo[], int numTables)
4823 : {
4824 : PQExpBuffer query;
4825 : PGresult *res;
4826 : PublicationRelInfo *pubrinfo;
4827 370 : DumpOptions *dopt = fout->dopt;
4828 : int i_tableoid;
4829 : int i_oid;
4830 : int i_prpubid;
4831 : int i_prrelid;
4832 : int i_prrelqual;
4833 : int i_prattrs;
4834 : int i,
4835 : j,
4836 : ntups;
4837 :
4838 370 : if (dopt->no_publications || fout->remoteVersion < 100000)
4839 0 : return;
4840 :
4841 370 : query = createPQExpBuffer();
4842 :
4843 : /* Collect all publication membership info. */
4844 370 : if (fout->remoteVersion >= 150000)
4845 370 : appendPQExpBufferStr(query,
4846 : "SELECT tableoid, oid, prpubid, prrelid, "
4847 : "pg_catalog.pg_get_expr(prqual, prrelid) AS prrelqual, "
4848 : "(CASE\n"
4849 : " WHEN pr.prattrs IS NOT NULL THEN\n"
4850 : " (SELECT array_agg(attname)\n"
4851 : " FROM\n"
4852 : " pg_catalog.generate_series(0, pg_catalog.array_upper(pr.prattrs::pg_catalog.int2[], 1)) s,\n"
4853 : " pg_catalog.pg_attribute\n"
4854 : " WHERE attrelid = pr.prrelid AND attnum = prattrs[s])\n"
4855 : " ELSE NULL END) prattrs "
4856 : "FROM pg_catalog.pg_publication_rel pr");
4857 : else
4858 0 : appendPQExpBufferStr(query,
4859 : "SELECT tableoid, oid, prpubid, prrelid, "
4860 : "NULL AS prrelqual, NULL AS prattrs "
4861 : "FROM pg_catalog.pg_publication_rel");
4862 370 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4863 :
4864 370 : ntups = PQntuples(res);
4865 :
4866 370 : i_tableoid = PQfnumber(res, "tableoid");
4867 370 : i_oid = PQfnumber(res, "oid");
4868 370 : i_prpubid = PQfnumber(res, "prpubid");
4869 370 : i_prrelid = PQfnumber(res, "prrelid");
4870 370 : i_prrelqual = PQfnumber(res, "prrelqual");
4871 370 : i_prattrs = PQfnumber(res, "prattrs");
4872 :
4873 : /* this allocation may be more than we need */
4874 370 : pubrinfo = pg_malloc(ntups * sizeof(PublicationRelInfo));
4875 370 : j = 0;
4876 :
4877 1154 : for (i = 0; i < ntups; i++)
4878 : {
4879 784 : Oid prpubid = atooid(PQgetvalue(res, i, i_prpubid));
4880 784 : Oid prrelid = atooid(PQgetvalue(res, i, i_prrelid));
4881 : PublicationInfo *pubinfo;
4882 : TableInfo *tbinfo;
4883 :
4884 : /*
4885 : * Ignore any entries for which we aren't interested in either the
4886 : * publication or the rel.
4887 : */
4888 784 : pubinfo = findPublicationByOid(prpubid);
4889 784 : if (pubinfo == NULL)
4890 0 : continue;
4891 784 : tbinfo = findTableByOid(prrelid);
4892 784 : if (tbinfo == NULL)
4893 0 : continue;
4894 :
4895 : /* OK, make a DumpableObject for this relationship */
4896 784 : pubrinfo[j].dobj.objType = DO_PUBLICATION_REL;
4897 784 : pubrinfo[j].dobj.catId.tableoid =
4898 784 : atooid(PQgetvalue(res, i, i_tableoid));
4899 784 : pubrinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4900 784 : AssignDumpId(&pubrinfo[j].dobj);
4901 784 : pubrinfo[j].dobj.namespace = tbinfo->dobj.namespace;
4902 784 : pubrinfo[j].dobj.name = tbinfo->dobj.name;
4903 784 : pubrinfo[j].publication = pubinfo;
4904 784 : pubrinfo[j].pubtable = tbinfo;
4905 784 : if (PQgetisnull(res, i, i_prrelqual))
4906 436 : pubrinfo[j].pubrelqual = NULL;
4907 : else
4908 348 : pubrinfo[j].pubrelqual = pg_strdup(PQgetvalue(res, i, i_prrelqual));
4909 :
4910 784 : if (!PQgetisnull(res, i, i_prattrs))
4911 : {
4912 : char **attnames;
4913 : int nattnames;
4914 : PQExpBuffer attribs;
4915 :
4916 246 : if (!parsePGArray(PQgetvalue(res, i, i_prattrs),
4917 : &attnames, &nattnames))
4918 0 : pg_fatal("could not parse %s array", "prattrs");
4919 246 : attribs = createPQExpBuffer();
4920 710 : for (int k = 0; k < nattnames; k++)
4921 : {
4922 464 : if (k > 0)
4923 218 : appendPQExpBufferStr(attribs, ", ");
4924 :
4925 464 : appendPQExpBufferStr(attribs, fmtId(attnames[k]));
4926 : }
4927 246 : pubrinfo[j].pubrattrs = attribs->data;
4928 246 : free(attribs); /* but not attribs->data */
4929 246 : free(attnames);
4930 : }
4931 : else
4932 538 : pubrinfo[j].pubrattrs = NULL;
4933 :
4934 : /* Decide whether we want to dump it */
4935 784 : selectDumpablePublicationObject(&(pubrinfo[j].dobj), fout);
4936 :
4937 784 : j++;
4938 : }
4939 :
4940 370 : PQclear(res);
4941 370 : destroyPQExpBuffer(query);
4942 : }
4943 :
4944 : /*
4945 : * dumpPublicationNamespace
4946 : * dump the definition of the given publication schema mapping.
4947 : */
4948 : static void
4949 222 : dumpPublicationNamespace(Archive *fout, const PublicationSchemaInfo *pubsinfo)
4950 : {
4951 222 : DumpOptions *dopt = fout->dopt;
4952 222 : NamespaceInfo *schemainfo = pubsinfo->pubschema;
4953 222 : PublicationInfo *pubinfo = pubsinfo->publication;
4954 : PQExpBuffer query;
4955 : char *tag;
4956 :
4957 : /* Do nothing if not dumping schema */
4958 222 : if (!dopt->dumpSchema)
4959 24 : return;
4960 :
4961 198 : tag = psprintf("%s %s", pubinfo->dobj.name, schemainfo->dobj.name);
4962 :
4963 198 : query = createPQExpBuffer();
4964 :
4965 198 : appendPQExpBuffer(query, "ALTER PUBLICATION %s ", fmtId(pubinfo->dobj.name));
4966 198 : appendPQExpBuffer(query, "ADD TABLES IN SCHEMA %s;\n", fmtId(schemainfo->dobj.name));
4967 :
4968 : /*
4969 : * There is no point in creating drop query as the drop is done by schema
4970 : * drop.
4971 : */
4972 198 : if (pubsinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
4973 198 : ArchiveEntry(fout, pubsinfo->dobj.catId, pubsinfo->dobj.dumpId,
4974 198 : ARCHIVE_OPTS(.tag = tag,
4975 : .namespace = schemainfo->dobj.name,
4976 : .owner = pubinfo->rolname,
4977 : .description = "PUBLICATION TABLES IN SCHEMA",
4978 : .section = SECTION_POST_DATA,
4979 : .createStmt = query->data));
4980 :
4981 : /* These objects can't currently have comments or seclabels */
4982 :
4983 198 : free(tag);
4984 198 : destroyPQExpBuffer(query);
4985 : }
4986 :
4987 : /*
4988 : * dumpPublicationTable
4989 : * dump the definition of the given publication table mapping
4990 : */
4991 : static void
4992 652 : dumpPublicationTable(Archive *fout, const PublicationRelInfo *pubrinfo)
4993 : {
4994 652 : DumpOptions *dopt = fout->dopt;
4995 652 : PublicationInfo *pubinfo = pubrinfo->publication;
4996 652 : TableInfo *tbinfo = pubrinfo->pubtable;
4997 : PQExpBuffer query;
4998 : char *tag;
4999 :
5000 : /* Do nothing if not dumping schema */
5001 652 : if (!dopt->dumpSchema)
5002 84 : return;
5003 :
5004 568 : tag = psprintf("%s %s", pubinfo->dobj.name, tbinfo->dobj.name);
5005 :
5006 568 : query = createPQExpBuffer();
5007 :
5008 568 : appendPQExpBuffer(query, "ALTER PUBLICATION %s ADD TABLE ONLY",
5009 568 : fmtId(pubinfo->dobj.name));
5010 568 : appendPQExpBuffer(query, " %s",
5011 568 : fmtQualifiedDumpable(tbinfo));
5012 :
5013 568 : if (pubrinfo->pubrattrs)
5014 178 : appendPQExpBuffer(query, " (%s)", pubrinfo->pubrattrs);
5015 :
5016 568 : if (pubrinfo->pubrelqual)
5017 : {
5018 : /*
5019 : * It's necessary to add parentheses around the expression because
5020 : * pg_get_expr won't supply the parentheses for things like WHERE
5021 : * TRUE.
5022 : */
5023 252 : appendPQExpBuffer(query, " WHERE (%s)", pubrinfo->pubrelqual);
5024 : }
5025 568 : appendPQExpBufferStr(query, ";\n");
5026 :
5027 : /*
5028 : * There is no point in creating a drop query as the drop is done by table
5029 : * drop. (If you think to change this, see also _printTocEntry().)
5030 : * Although this object doesn't really have ownership as such, set the
5031 : * owner field anyway to ensure that the command is run by the correct
5032 : * role at restore time.
5033 : */
5034 568 : if (pubrinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
5035 568 : ArchiveEntry(fout, pubrinfo->dobj.catId, pubrinfo->dobj.dumpId,
5036 568 : ARCHIVE_OPTS(.tag = tag,
5037 : .namespace = tbinfo->dobj.namespace->dobj.name,
5038 : .owner = pubinfo->rolname,
5039 : .description = "PUBLICATION TABLE",
5040 : .section = SECTION_POST_DATA,
5041 : .createStmt = query->data));
5042 :
5043 : /* These objects can't currently have comments or seclabels */
5044 :
5045 568 : free(tag);
5046 568 : destroyPQExpBuffer(query);
5047 : }
5048 :
5049 : /*
5050 : * Is the currently connected user a superuser?
5051 : */
5052 : static bool
5053 368 : is_superuser(Archive *fout)
5054 : {
5055 368 : ArchiveHandle *AH = (ArchiveHandle *) fout;
5056 : const char *val;
5057 :
5058 368 : val = PQparameterStatus(AH->connection, "is_superuser");
5059 :
5060 368 : if (val && strcmp(val, "on") == 0)
5061 362 : return true;
5062 :
5063 6 : return false;
5064 : }
5065 :
5066 : /*
5067 : * Set the given value to restrict_nonsystem_relation_kind value. Since
5068 : * restrict_nonsystem_relation_kind is introduced in minor version releases,
5069 : * the setting query is effective only where available.
5070 : */
5071 : static void
5072 438 : set_restrict_relation_kind(Archive *AH, const char *value)
5073 : {
5074 438 : PQExpBuffer query = createPQExpBuffer();
5075 : PGresult *res;
5076 :
5077 438 : appendPQExpBuffer(query,
5078 : "SELECT set_config(name, '%s', false) "
5079 : "FROM pg_settings "
5080 : "WHERE name = 'restrict_nonsystem_relation_kind'",
5081 : value);
5082 438 : res = ExecuteSqlQuery(AH, query->data, PGRES_TUPLES_OK);
5083 :
5084 438 : PQclear(res);
5085 438 : destroyPQExpBuffer(query);
5086 438 : }
5087 :
5088 : /*
5089 : * getSubscriptions
5090 : * get information about subscriptions
5091 : */
5092 : void
5093 370 : getSubscriptions(Archive *fout)
5094 : {
5095 370 : DumpOptions *dopt = fout->dopt;
5096 : PQExpBuffer query;
5097 : PGresult *res;
5098 : SubscriptionInfo *subinfo;
5099 : int i_tableoid;
5100 : int i_oid;
5101 : int i_subname;
5102 : int i_subowner;
5103 : int i_subbinary;
5104 : int i_substream;
5105 : int i_subtwophasestate;
5106 : int i_subdisableonerr;
5107 : int i_subpasswordrequired;
5108 : int i_subrunasowner;
5109 : int i_subconninfo;
5110 : int i_subslotname;
5111 : int i_subsynccommit;
5112 : int i_subpublications;
5113 : int i_suborigin;
5114 : int i_suboriginremotelsn;
5115 : int i_subenabled;
5116 : int i_subfailover;
5117 : int i_subretaindeadtuples;
5118 : int i_submaxretention;
5119 : int i,
5120 : ntups;
5121 :
5122 370 : if (dopt->no_subscriptions || fout->remoteVersion < 100000)
5123 2 : return;
5124 :
5125 368 : if (!is_superuser(fout))
5126 : {
5127 : int n;
5128 :
5129 6 : res = ExecuteSqlQuery(fout,
5130 : "SELECT count(*) FROM pg_subscription "
5131 : "WHERE subdbid = (SELECT oid FROM pg_database"
5132 : " WHERE datname = current_database())",
5133 : PGRES_TUPLES_OK);
5134 6 : n = atoi(PQgetvalue(res, 0, 0));
5135 6 : if (n > 0)
5136 4 : pg_log_warning("subscriptions not dumped because current user is not a superuser");
5137 6 : PQclear(res);
5138 6 : return;
5139 : }
5140 :
5141 362 : query = createPQExpBuffer();
5142 :
5143 : /* Get the subscriptions in current database. */
5144 362 : appendPQExpBufferStr(query,
5145 : "SELECT s.tableoid, s.oid, s.subname,\n"
5146 : " s.subowner,\n"
5147 : " s.subconninfo, s.subslotname, s.subsynccommit,\n"
5148 : " s.subpublications,\n");
5149 :
5150 362 : if (fout->remoteVersion >= 140000)
5151 362 : appendPQExpBufferStr(query, " s.subbinary,\n");
5152 : else
5153 0 : appendPQExpBufferStr(query, " false AS subbinary,\n");
5154 :
5155 362 : if (fout->remoteVersion >= 140000)
5156 362 : appendPQExpBufferStr(query, " s.substream,\n");
5157 : else
5158 0 : appendPQExpBufferStr(query, " 'f' AS substream,\n");
5159 :
5160 362 : if (fout->remoteVersion >= 150000)
5161 362 : appendPQExpBufferStr(query,
5162 : " s.subtwophasestate,\n"
5163 : " s.subdisableonerr,\n");
5164 : else
5165 0 : appendPQExpBuffer(query,
5166 : " '%c' AS subtwophasestate,\n"
5167 : " false AS subdisableonerr,\n",
5168 : LOGICALREP_TWOPHASE_STATE_DISABLED);
5169 :
5170 362 : if (fout->remoteVersion >= 160000)
5171 362 : appendPQExpBufferStr(query,
5172 : " s.subpasswordrequired,\n"
5173 : " s.subrunasowner,\n"
5174 : " s.suborigin,\n");
5175 : else
5176 0 : appendPQExpBuffer(query,
5177 : " 't' AS subpasswordrequired,\n"
5178 : " 't' AS subrunasowner,\n"
5179 : " '%s' AS suborigin,\n",
5180 : LOGICALREP_ORIGIN_ANY);
5181 :
5182 362 : if (dopt->binary_upgrade && fout->remoteVersion >= 170000)
5183 72 : appendPQExpBufferStr(query, " o.remote_lsn AS suboriginremotelsn,\n"
5184 : " s.subenabled,\n");
5185 : else
5186 290 : appendPQExpBufferStr(query, " NULL AS suboriginremotelsn,\n"
5187 : " false AS subenabled,\n");
5188 :
5189 362 : if (fout->remoteVersion >= 170000)
5190 362 : appendPQExpBufferStr(query,
5191 : " s.subfailover,\n");
5192 : else
5193 0 : appendPQExpBufferStr(query,
5194 : " false AS subfailover,\n");
5195 :
5196 362 : if (fout->remoteVersion >= 190000)
5197 362 : appendPQExpBufferStr(query,
5198 : " s.subretaindeadtuples,\n");
5199 : else
5200 0 : appendPQExpBufferStr(query,
5201 : " false AS subretaindeadtuples,\n");
5202 :
5203 362 : if (fout->remoteVersion >= 190000)
5204 362 : appendPQExpBufferStr(query,
5205 : " s.submaxretention\n");
5206 : else
5207 0 : appendPQExpBuffer(query,
5208 : " 0 AS submaxretention\n");
5209 :
5210 362 : appendPQExpBufferStr(query,
5211 : "FROM pg_subscription s\n");
5212 :
5213 362 : if (dopt->binary_upgrade && fout->remoteVersion >= 170000)
5214 72 : appendPQExpBufferStr(query,
5215 : "LEFT JOIN pg_catalog.pg_replication_origin_status o \n"
5216 : " ON o.external_id = 'pg_' || s.oid::text \n");
5217 :
5218 362 : appendPQExpBufferStr(query,
5219 : "WHERE s.subdbid = (SELECT oid FROM pg_database\n"
5220 : " WHERE datname = current_database())");
5221 :
5222 362 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5223 :
5224 362 : ntups = PQntuples(res);
5225 :
5226 : /*
5227 : * Get subscription fields. We don't include subskiplsn in the dump as
5228 : * after restoring the dump this value may no longer be relevant.
5229 : */
5230 362 : i_tableoid = PQfnumber(res, "tableoid");
5231 362 : i_oid = PQfnumber(res, "oid");
5232 362 : i_subname = PQfnumber(res, "subname");
5233 362 : i_subowner = PQfnumber(res, "subowner");
5234 362 : i_subenabled = PQfnumber(res, "subenabled");
5235 362 : i_subbinary = PQfnumber(res, "subbinary");
5236 362 : i_substream = PQfnumber(res, "substream");
5237 362 : i_subtwophasestate = PQfnumber(res, "subtwophasestate");
5238 362 : i_subdisableonerr = PQfnumber(res, "subdisableonerr");
5239 362 : i_subpasswordrequired = PQfnumber(res, "subpasswordrequired");
5240 362 : i_subrunasowner = PQfnumber(res, "subrunasowner");
5241 362 : i_subfailover = PQfnumber(res, "subfailover");
5242 362 : i_subretaindeadtuples = PQfnumber(res, "subretaindeadtuples");
5243 362 : i_submaxretention = PQfnumber(res, "submaxretention");
5244 362 : i_subconninfo = PQfnumber(res, "subconninfo");
5245 362 : i_subslotname = PQfnumber(res, "subslotname");
5246 362 : i_subsynccommit = PQfnumber(res, "subsynccommit");
5247 362 : i_subpublications = PQfnumber(res, "subpublications");
5248 362 : i_suborigin = PQfnumber(res, "suborigin");
5249 362 : i_suboriginremotelsn = PQfnumber(res, "suboriginremotelsn");
5250 :
5251 362 : subinfo = pg_malloc(ntups * sizeof(SubscriptionInfo));
5252 :
5253 654 : for (i = 0; i < ntups; i++)
5254 : {
5255 292 : subinfo[i].dobj.objType = DO_SUBSCRIPTION;
5256 292 : subinfo[i].dobj.catId.tableoid =
5257 292 : atooid(PQgetvalue(res, i, i_tableoid));
5258 292 : subinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
5259 292 : AssignDumpId(&subinfo[i].dobj);
5260 292 : subinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_subname));
5261 292 : subinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_subowner));
5262 :
5263 292 : subinfo[i].subenabled =
5264 292 : (strcmp(PQgetvalue(res, i, i_subenabled), "t") == 0);
5265 292 : subinfo[i].subbinary =
5266 292 : (strcmp(PQgetvalue(res, i, i_subbinary), "t") == 0);
5267 292 : subinfo[i].substream = *(PQgetvalue(res, i, i_substream));
5268 292 : subinfo[i].subtwophasestate = *(PQgetvalue(res, i, i_subtwophasestate));
5269 292 : subinfo[i].subdisableonerr =
5270 292 : (strcmp(PQgetvalue(res, i, i_subdisableonerr), "t") == 0);
5271 292 : subinfo[i].subpasswordrequired =
5272 292 : (strcmp(PQgetvalue(res, i, i_subpasswordrequired), "t") == 0);
5273 292 : subinfo[i].subrunasowner =
5274 292 : (strcmp(PQgetvalue(res, i, i_subrunasowner), "t") == 0);
5275 292 : subinfo[i].subfailover =
5276 292 : (strcmp(PQgetvalue(res, i, i_subfailover), "t") == 0);
5277 292 : subinfo[i].subretaindeadtuples =
5278 292 : (strcmp(PQgetvalue(res, i, i_subretaindeadtuples), "t") == 0);
5279 292 : subinfo[i].submaxretention =
5280 292 : atoi(PQgetvalue(res, i, i_submaxretention));
5281 584 : subinfo[i].subconninfo =
5282 292 : pg_strdup(PQgetvalue(res, i, i_subconninfo));
5283 292 : if (PQgetisnull(res, i, i_subslotname))
5284 0 : subinfo[i].subslotname = NULL;
5285 : else
5286 292 : subinfo[i].subslotname =
5287 292 : pg_strdup(PQgetvalue(res, i, i_subslotname));
5288 584 : subinfo[i].subsynccommit =
5289 292 : pg_strdup(PQgetvalue(res, i, i_subsynccommit));
5290 584 : subinfo[i].subpublications =
5291 292 : pg_strdup(PQgetvalue(res, i, i_subpublications));
5292 292 : subinfo[i].suborigin = pg_strdup(PQgetvalue(res, i, i_suborigin));
5293 292 : if (PQgetisnull(res, i, i_suboriginremotelsn))
5294 290 : subinfo[i].suboriginremotelsn = NULL;
5295 : else
5296 2 : subinfo[i].suboriginremotelsn =
5297 2 : pg_strdup(PQgetvalue(res, i, i_suboriginremotelsn));
5298 :
5299 : /* Decide whether we want to dump it */
5300 292 : selectDumpableObject(&(subinfo[i].dobj), fout);
5301 : }
5302 362 : PQclear(res);
5303 :
5304 362 : destroyPQExpBuffer(query);
5305 : }
5306 :
5307 : /*
5308 : * getSubscriptionTables
5309 : * Get information about subscription membership for dumpable tables. This
5310 : * will be used only in binary-upgrade mode for PG17 or later versions.
5311 : */
5312 : void
5313 370 : getSubscriptionTables(Archive *fout)
5314 : {
5315 370 : DumpOptions *dopt = fout->dopt;
5316 370 : SubscriptionInfo *subinfo = NULL;
5317 : SubRelInfo *subrinfo;
5318 : PGresult *res;
5319 : int i_srsubid;
5320 : int i_srrelid;
5321 : int i_srsubstate;
5322 : int i_srsublsn;
5323 : int ntups;
5324 370 : Oid last_srsubid = InvalidOid;
5325 :
5326 370 : if (dopt->no_subscriptions || !dopt->binary_upgrade ||
5327 72 : fout->remoteVersion < 170000)
5328 298 : return;
5329 :
5330 72 : res = ExecuteSqlQuery(fout,
5331 : "SELECT srsubid, srrelid, srsubstate, srsublsn "
5332 : "FROM pg_catalog.pg_subscription_rel "
5333 : "ORDER BY srsubid",
5334 : PGRES_TUPLES_OK);
5335 72 : ntups = PQntuples(res);
5336 72 : if (ntups == 0)
5337 70 : goto cleanup;
5338 :
5339 : /* Get pg_subscription_rel attributes */
5340 2 : i_srsubid = PQfnumber(res, "srsubid");
5341 2 : i_srrelid = PQfnumber(res, "srrelid");
5342 2 : i_srsubstate = PQfnumber(res, "srsubstate");
5343 2 : i_srsublsn = PQfnumber(res, "srsublsn");
5344 :
5345 2 : subrinfo = pg_malloc(ntups * sizeof(SubRelInfo));
5346 6 : for (int i = 0; i < ntups; i++)
5347 : {
5348 4 : Oid cur_srsubid = atooid(PQgetvalue(res, i, i_srsubid));
5349 4 : Oid relid = atooid(PQgetvalue(res, i, i_srrelid));
5350 : TableInfo *tblinfo;
5351 :
5352 : /*
5353 : * If we switched to a new subscription, check if the subscription
5354 : * exists.
5355 : */
5356 4 : if (cur_srsubid != last_srsubid)
5357 : {
5358 4 : subinfo = findSubscriptionByOid(cur_srsubid);
5359 4 : if (subinfo == NULL)
5360 0 : pg_fatal("subscription with OID %u does not exist", cur_srsubid);
5361 :
5362 4 : last_srsubid = cur_srsubid;
5363 : }
5364 :
5365 4 : tblinfo = findTableByOid(relid);
5366 4 : if (tblinfo == NULL)
5367 0 : pg_fatal("failed sanity check, table with OID %u not found",
5368 : relid);
5369 :
5370 : /* OK, make a DumpableObject for this relationship */
5371 4 : subrinfo[i].dobj.objType = DO_SUBSCRIPTION_REL;
5372 4 : subrinfo[i].dobj.catId.tableoid = relid;
5373 4 : subrinfo[i].dobj.catId.oid = cur_srsubid;
5374 4 : AssignDumpId(&subrinfo[i].dobj);
5375 4 : subrinfo[i].dobj.name = pg_strdup(subinfo->dobj.name);
5376 4 : subrinfo[i].tblinfo = tblinfo;
5377 4 : subrinfo[i].srsubstate = PQgetvalue(res, i, i_srsubstate)[0];
5378 4 : if (PQgetisnull(res, i, i_srsublsn))
5379 2 : subrinfo[i].srsublsn = NULL;
5380 : else
5381 2 : subrinfo[i].srsublsn = pg_strdup(PQgetvalue(res, i, i_srsublsn));
5382 :
5383 4 : subrinfo[i].subinfo = subinfo;
5384 :
5385 : /* Decide whether we want to dump it */
5386 4 : selectDumpableObject(&(subrinfo[i].dobj), fout);
5387 : }
5388 :
5389 2 : cleanup:
5390 72 : PQclear(res);
5391 : }
5392 :
5393 : /*
5394 : * dumpSubscriptionTable
5395 : * Dump the definition of the given subscription table mapping. This will be
5396 : * used only in binary-upgrade mode for PG17 or later versions.
5397 : */
5398 : static void
5399 4 : dumpSubscriptionTable(Archive *fout, const SubRelInfo *subrinfo)
5400 : {
5401 4 : DumpOptions *dopt = fout->dopt;
5402 4 : SubscriptionInfo *subinfo = subrinfo->subinfo;
5403 : PQExpBuffer query;
5404 : char *tag;
5405 :
5406 : /* Do nothing if not dumping schema */
5407 4 : if (!dopt->dumpSchema)
5408 0 : return;
5409 :
5410 : Assert(fout->dopt->binary_upgrade && fout->remoteVersion >= 170000);
5411 :
5412 4 : tag = psprintf("%s %s", subinfo->dobj.name, subrinfo->dobj.name);
5413 :
5414 4 : query = createPQExpBuffer();
5415 :
5416 4 : if (subinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
5417 : {
5418 : /*
5419 : * binary_upgrade_add_sub_rel_state will add the subscription relation
5420 : * to pg_subscription_rel table. This will be used only in
5421 : * binary-upgrade mode.
5422 : */
5423 4 : appendPQExpBufferStr(query,
5424 : "\n-- For binary upgrade, must preserve the subscriber table.\n");
5425 4 : appendPQExpBufferStr(query,
5426 : "SELECT pg_catalog.binary_upgrade_add_sub_rel_state(");
5427 4 : appendStringLiteralAH(query, subrinfo->dobj.name, fout);
5428 4 : appendPQExpBuffer(query,
5429 : ", %u, '%c'",
5430 4 : subrinfo->tblinfo->dobj.catId.oid,
5431 4 : subrinfo->srsubstate);
5432 :
5433 4 : if (subrinfo->srsublsn && subrinfo->srsublsn[0] != '\0')
5434 2 : appendPQExpBuffer(query, ", '%s'", subrinfo->srsublsn);
5435 : else
5436 2 : appendPQExpBufferStr(query, ", NULL");
5437 :
5438 4 : appendPQExpBufferStr(query, ");\n");
5439 : }
5440 :
5441 : /*
5442 : * There is no point in creating a drop query as the drop is done by table
5443 : * drop. (If you think to change this, see also _printTocEntry().)
5444 : * Although this object doesn't really have ownership as such, set the
5445 : * owner field anyway to ensure that the command is run by the correct
5446 : * role at restore time.
5447 : */
5448 4 : if (subrinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
5449 4 : ArchiveEntry(fout, subrinfo->dobj.catId, subrinfo->dobj.dumpId,
5450 4 : ARCHIVE_OPTS(.tag = tag,
5451 : .namespace = subrinfo->tblinfo->dobj.namespace->dobj.name,
5452 : .owner = subinfo->rolname,
5453 : .description = "SUBSCRIPTION TABLE",
5454 : .section = SECTION_POST_DATA,
5455 : .createStmt = query->data));
5456 :
5457 : /* These objects can't currently have comments or seclabels */
5458 :
5459 4 : free(tag);
5460 4 : destroyPQExpBuffer(query);
5461 : }
5462 :
5463 : /*
5464 : * dumpSubscription
5465 : * dump the definition of the given subscription
5466 : */
5467 : static void
5468 256 : dumpSubscription(Archive *fout, const SubscriptionInfo *subinfo)
5469 : {
5470 256 : DumpOptions *dopt = fout->dopt;
5471 : PQExpBuffer delq;
5472 : PQExpBuffer query;
5473 : PQExpBuffer publications;
5474 : char *qsubname;
5475 256 : char **pubnames = NULL;
5476 256 : int npubnames = 0;
5477 : int i;
5478 :
5479 : /* Do nothing if not dumping schema */
5480 256 : if (!dopt->dumpSchema)
5481 36 : return;
5482 :
5483 220 : delq = createPQExpBuffer();
5484 220 : query = createPQExpBuffer();
5485 :
5486 220 : qsubname = pg_strdup(fmtId(subinfo->dobj.name));
5487 :
5488 220 : appendPQExpBuffer(delq, "DROP SUBSCRIPTION %s;\n",
5489 : qsubname);
5490 :
5491 220 : appendPQExpBuffer(query, "CREATE SUBSCRIPTION %s CONNECTION ",
5492 : qsubname);
5493 220 : appendStringLiteralAH(query, subinfo->subconninfo, fout);
5494 :
5495 : /* Build list of quoted publications and append them to query. */
5496 220 : if (!parsePGArray(subinfo->subpublications, &pubnames, &npubnames))
5497 0 : pg_fatal("could not parse %s array", "subpublications");
5498 :
5499 220 : publications = createPQExpBuffer();
5500 440 : for (i = 0; i < npubnames; i++)
5501 : {
5502 220 : if (i > 0)
5503 0 : appendPQExpBufferStr(publications, ", ");
5504 :
5505 220 : appendPQExpBufferStr(publications, fmtId(pubnames[i]));
5506 : }
5507 :
5508 220 : appendPQExpBuffer(query, " PUBLICATION %s WITH (connect = false, slot_name = ", publications->data);
5509 220 : if (subinfo->subslotname)
5510 220 : appendStringLiteralAH(query, subinfo->subslotname, fout);
5511 : else
5512 0 : appendPQExpBufferStr(query, "NONE");
5513 :
5514 220 : if (subinfo->subbinary)
5515 0 : appendPQExpBufferStr(query, ", binary = true");
5516 :
5517 220 : if (subinfo->substream == LOGICALREP_STREAM_ON)
5518 72 : appendPQExpBufferStr(query, ", streaming = on");
5519 148 : else if (subinfo->substream == LOGICALREP_STREAM_PARALLEL)
5520 76 : appendPQExpBufferStr(query, ", streaming = parallel");
5521 : else
5522 72 : appendPQExpBufferStr(query, ", streaming = off");
5523 :
5524 220 : if (subinfo->subtwophasestate != LOGICALREP_TWOPHASE_STATE_DISABLED)
5525 0 : appendPQExpBufferStr(query, ", two_phase = on");
5526 :
5527 220 : if (subinfo->subdisableonerr)
5528 0 : appendPQExpBufferStr(query, ", disable_on_error = true");
5529 :
5530 220 : if (!subinfo->subpasswordrequired)
5531 0 : appendPQExpBufferStr(query, ", password_required = false");
5532 :
5533 220 : if (subinfo->subrunasowner)
5534 0 : appendPQExpBufferStr(query, ", run_as_owner = true");
5535 :
5536 220 : if (subinfo->subfailover)
5537 2 : appendPQExpBufferStr(query, ", failover = true");
5538 :
5539 220 : if (subinfo->subretaindeadtuples)
5540 2 : appendPQExpBufferStr(query, ", retain_dead_tuples = true");
5541 :
5542 220 : if (subinfo->submaxretention)
5543 0 : appendPQExpBuffer(query, ", max_retention_duration = %d", subinfo->submaxretention);
5544 :
5545 220 : if (strcmp(subinfo->subsynccommit, "off") != 0)
5546 0 : appendPQExpBuffer(query, ", synchronous_commit = %s", fmtId(subinfo->subsynccommit));
5547 :
5548 220 : if (pg_strcasecmp(subinfo->suborigin, LOGICALREP_ORIGIN_ANY) != 0)
5549 72 : appendPQExpBuffer(query, ", origin = %s", subinfo->suborigin);
5550 :
5551 220 : appendPQExpBufferStr(query, ");\n");
5552 :
5553 : /*
5554 : * In binary-upgrade mode, we allow the replication to continue after the
5555 : * upgrade.
5556 : */
5557 220 : if (dopt->binary_upgrade && fout->remoteVersion >= 170000)
5558 : {
5559 10 : if (subinfo->suboriginremotelsn)
5560 : {
5561 : /*
5562 : * Preserve the remote_lsn for the subscriber's replication
5563 : * origin. This value is required to start the replication from
5564 : * the position before the upgrade. This value will be stale if
5565 : * the publisher gets upgraded before the subscriber node.
5566 : * However, this shouldn't be a problem as the upgrade of the
5567 : * publisher ensures that all the transactions were replicated
5568 : * before upgrading it.
5569 : */
5570 2 : appendPQExpBufferStr(query,
5571 : "\n-- For binary upgrade, must preserve the remote_lsn for the subscriber's replication origin.\n");
5572 2 : appendPQExpBufferStr(query,
5573 : "SELECT pg_catalog.binary_upgrade_replorigin_advance(");
5574 2 : appendStringLiteralAH(query, subinfo->dobj.name, fout);
5575 2 : appendPQExpBuffer(query, ", '%s');\n", subinfo->suboriginremotelsn);
5576 : }
5577 :
5578 10 : if (subinfo->subenabled)
5579 : {
5580 : /*
5581 : * Enable the subscription to allow the replication to continue
5582 : * after the upgrade.
5583 : */
5584 2 : appendPQExpBufferStr(query,
5585 : "\n-- For binary upgrade, must preserve the subscriber's running state.\n");
5586 2 : appendPQExpBuffer(query, "ALTER SUBSCRIPTION %s ENABLE;\n", qsubname);
5587 : }
5588 : }
5589 :
5590 220 : if (subinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
5591 220 : ArchiveEntry(fout, subinfo->dobj.catId, subinfo->dobj.dumpId,
5592 220 : ARCHIVE_OPTS(.tag = subinfo->dobj.name,
5593 : .owner = subinfo->rolname,
5594 : .description = "SUBSCRIPTION",
5595 : .section = SECTION_POST_DATA,
5596 : .createStmt = query->data,
5597 : .dropStmt = delq->data));
5598 :
5599 220 : if (subinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
5600 72 : dumpComment(fout, "SUBSCRIPTION", qsubname,
5601 72 : NULL, subinfo->rolname,
5602 72 : subinfo->dobj.catId, 0, subinfo->dobj.dumpId);
5603 :
5604 220 : if (subinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
5605 0 : dumpSecLabel(fout, "SUBSCRIPTION", qsubname,
5606 0 : NULL, subinfo->rolname,
5607 0 : subinfo->dobj.catId, 0, subinfo->dobj.dumpId);
5608 :
5609 220 : destroyPQExpBuffer(publications);
5610 220 : free(pubnames);
5611 :
5612 220 : destroyPQExpBuffer(delq);
5613 220 : destroyPQExpBuffer(query);
5614 220 : free(qsubname);
5615 : }
5616 :
5617 : /*
5618 : * Given a "create query", append as many ALTER ... DEPENDS ON EXTENSION as
5619 : * the object needs.
5620 : */
5621 : static void
5622 10658 : append_depends_on_extension(Archive *fout,
5623 : PQExpBuffer create,
5624 : const DumpableObject *dobj,
5625 : const char *catalog,
5626 : const char *keyword,
5627 : const char *objname)
5628 : {
5629 10658 : if (dobj->depends_on_ext)
5630 : {
5631 : char *nm;
5632 : PGresult *res;
5633 : PQExpBuffer query;
5634 : int ntups;
5635 : int i_extname;
5636 : int i;
5637 :
5638 : /* dodge fmtId() non-reentrancy */
5639 84 : nm = pg_strdup(objname);
5640 :
5641 84 : query = createPQExpBuffer();
5642 84 : appendPQExpBuffer(query,
5643 : "SELECT e.extname "
5644 : "FROM pg_catalog.pg_depend d, pg_catalog.pg_extension e "
5645 : "WHERE d.refobjid = e.oid AND classid = '%s'::pg_catalog.regclass "
5646 : "AND objid = '%u'::pg_catalog.oid AND deptype = 'x' "
5647 : "AND refclassid = 'pg_catalog.pg_extension'::pg_catalog.regclass",
5648 : catalog,
5649 84 : dobj->catId.oid);
5650 84 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5651 84 : ntups = PQntuples(res);
5652 84 : i_extname = PQfnumber(res, "extname");
5653 168 : for (i = 0; i < ntups; i++)
5654 : {
5655 84 : appendPQExpBuffer(create, "\nALTER %s %s DEPENDS ON EXTENSION %s;",
5656 : keyword, nm,
5657 84 : fmtId(PQgetvalue(res, i, i_extname)));
5658 : }
5659 :
5660 84 : PQclear(res);
5661 84 : destroyPQExpBuffer(query);
5662 84 : pg_free(nm);
5663 : }
5664 10658 : }
5665 :
5666 : static Oid
5667 0 : get_next_possible_free_pg_type_oid(Archive *fout, PQExpBuffer upgrade_query)
5668 : {
5669 : /*
5670 : * If the old version didn't assign an array type, but the new version
5671 : * does, we must select an unused type OID to assign. This currently only
5672 : * happens for domains, when upgrading pre-v11 to v11 and up.
5673 : *
5674 : * Note: local state here is kind of ugly, but we must have some, since we
5675 : * mustn't choose the same unused OID more than once.
5676 : */
5677 : static Oid next_possible_free_oid = FirstNormalObjectId;
5678 : PGresult *res;
5679 : bool is_dup;
5680 :
5681 : do
5682 : {
5683 0 : ++next_possible_free_oid;
5684 0 : printfPQExpBuffer(upgrade_query,
5685 : "SELECT EXISTS(SELECT 1 "
5686 : "FROM pg_catalog.pg_type "
5687 : "WHERE oid = '%u'::pg_catalog.oid);",
5688 : next_possible_free_oid);
5689 0 : res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
5690 0 : is_dup = (PQgetvalue(res, 0, 0)[0] == 't');
5691 0 : PQclear(res);
5692 0 : } while (is_dup);
5693 :
5694 0 : return next_possible_free_oid;
5695 : }
5696 :
5697 : static void
5698 1886 : binary_upgrade_set_type_oids_by_type_oid(Archive *fout,
5699 : PQExpBuffer upgrade_buffer,
5700 : Oid pg_type_oid,
5701 : bool force_array_type,
5702 : bool include_multirange_type)
5703 : {
5704 1886 : PQExpBuffer upgrade_query = createPQExpBuffer();
5705 : PGresult *res;
5706 : Oid pg_type_array_oid;
5707 : Oid pg_type_multirange_oid;
5708 : Oid pg_type_multirange_array_oid;
5709 : TypeInfo *tinfo;
5710 :
5711 1886 : appendPQExpBufferStr(upgrade_buffer, "\n-- For binary upgrade, must preserve pg_type oid\n");
5712 1886 : appendPQExpBuffer(upgrade_buffer,
5713 : "SELECT pg_catalog.binary_upgrade_set_next_pg_type_oid('%u'::pg_catalog.oid);\n\n",
5714 : pg_type_oid);
5715 :
5716 1886 : tinfo = findTypeByOid(pg_type_oid);
5717 1886 : if (tinfo)
5718 1886 : pg_type_array_oid = tinfo->typarray;
5719 : else
5720 0 : pg_type_array_oid = InvalidOid;
5721 :
5722 1886 : if (!OidIsValid(pg_type_array_oid) && force_array_type)
5723 0 : pg_type_array_oid = get_next_possible_free_pg_type_oid(fout, upgrade_query);
5724 :
5725 1886 : if (OidIsValid(pg_type_array_oid))
5726 : {
5727 1882 : appendPQExpBufferStr(upgrade_buffer,
5728 : "\n-- For binary upgrade, must preserve pg_type array oid\n");
5729 1882 : appendPQExpBuffer(upgrade_buffer,
5730 : "SELECT pg_catalog.binary_upgrade_set_next_array_pg_type_oid('%u'::pg_catalog.oid);\n\n",
5731 : pg_type_array_oid);
5732 : }
5733 :
5734 : /*
5735 : * Pre-set the multirange type oid and its own array type oid.
5736 : */
5737 1886 : if (include_multirange_type)
5738 : {
5739 16 : if (fout->remoteVersion >= 140000)
5740 : {
5741 16 : printfPQExpBuffer(upgrade_query,
5742 : "SELECT t.oid, t.typarray "
5743 : "FROM pg_catalog.pg_type t "
5744 : "JOIN pg_catalog.pg_range r "
5745 : "ON t.oid = r.rngmultitypid "
5746 : "WHERE r.rngtypid = '%u'::pg_catalog.oid;",
5747 : pg_type_oid);
5748 :
5749 16 : res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
5750 :
5751 16 : pg_type_multirange_oid = atooid(PQgetvalue(res, 0, PQfnumber(res, "oid")));
5752 16 : pg_type_multirange_array_oid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typarray")));
5753 :
5754 16 : PQclear(res);
5755 : }
5756 : else
5757 : {
5758 0 : pg_type_multirange_oid = get_next_possible_free_pg_type_oid(fout, upgrade_query);
5759 0 : pg_type_multirange_array_oid = get_next_possible_free_pg_type_oid(fout, upgrade_query);
5760 : }
5761 :
5762 16 : appendPQExpBufferStr(upgrade_buffer,
5763 : "\n-- For binary upgrade, must preserve multirange pg_type oid\n");
5764 16 : appendPQExpBuffer(upgrade_buffer,
5765 : "SELECT pg_catalog.binary_upgrade_set_next_multirange_pg_type_oid('%u'::pg_catalog.oid);\n\n",
5766 : pg_type_multirange_oid);
5767 16 : appendPQExpBufferStr(upgrade_buffer,
5768 : "\n-- For binary upgrade, must preserve multirange pg_type array oid\n");
5769 16 : appendPQExpBuffer(upgrade_buffer,
5770 : "SELECT pg_catalog.binary_upgrade_set_next_multirange_array_pg_type_oid('%u'::pg_catalog.oid);\n\n",
5771 : pg_type_multirange_array_oid);
5772 : }
5773 :
5774 1886 : destroyPQExpBuffer(upgrade_query);
5775 1886 : }
5776 :
5777 : static void
5778 1736 : binary_upgrade_set_type_oids_by_rel(Archive *fout,
5779 : PQExpBuffer upgrade_buffer,
5780 : const TableInfo *tbinfo)
5781 : {
5782 1736 : Oid pg_type_oid = tbinfo->reltype;
5783 :
5784 1736 : if (OidIsValid(pg_type_oid))
5785 1736 : binary_upgrade_set_type_oids_by_type_oid(fout, upgrade_buffer,
5786 : pg_type_oid, false, false);
5787 1736 : }
5788 :
5789 : /*
5790 : * bsearch() comparator for BinaryUpgradeClassOidItem
5791 : */
5792 : static int
5793 24702 : BinaryUpgradeClassOidItemCmp(const void *p1, const void *p2)
5794 : {
5795 24702 : BinaryUpgradeClassOidItem v1 = *((const BinaryUpgradeClassOidItem *) p1);
5796 24702 : BinaryUpgradeClassOidItem v2 = *((const BinaryUpgradeClassOidItem *) p2);
5797 :
5798 24702 : return pg_cmp_u32(v1.oid, v2.oid);
5799 : }
5800 :
5801 : /*
5802 : * collectBinaryUpgradeClassOids
5803 : *
5804 : * Construct a table of pg_class information required for
5805 : * binary_upgrade_set_pg_class_oids(). The table is sorted by OID for speed in
5806 : * lookup.
5807 : */
5808 : static void
5809 72 : collectBinaryUpgradeClassOids(Archive *fout)
5810 : {
5811 : PGresult *res;
5812 : const char *query;
5813 :
5814 72 : query = "SELECT c.oid, c.relkind, c.relfilenode, c.reltoastrelid, "
5815 : "ct.relfilenode, i.indexrelid, cti.relfilenode "
5816 : "FROM pg_catalog.pg_class c LEFT JOIN pg_catalog.pg_index i "
5817 : "ON (c.reltoastrelid = i.indrelid AND i.indisvalid) "
5818 : "LEFT JOIN pg_catalog.pg_class ct ON (c.reltoastrelid = ct.oid) "
5819 : "LEFT JOIN pg_catalog.pg_class AS cti ON (i.indexrelid = cti.oid) "
5820 : "ORDER BY c.oid;";
5821 :
5822 72 : res = ExecuteSqlQuery(fout, query, PGRES_TUPLES_OK);
5823 :
5824 72 : nbinaryUpgradeClassOids = PQntuples(res);
5825 72 : binaryUpgradeClassOids = (BinaryUpgradeClassOidItem *)
5826 72 : pg_malloc(nbinaryUpgradeClassOids * sizeof(BinaryUpgradeClassOidItem));
5827 :
5828 33724 : for (int i = 0; i < nbinaryUpgradeClassOids; i++)
5829 : {
5830 33652 : binaryUpgradeClassOids[i].oid = atooid(PQgetvalue(res, i, 0));
5831 33652 : binaryUpgradeClassOids[i].relkind = *PQgetvalue(res, i, 1);
5832 33652 : binaryUpgradeClassOids[i].relfilenumber = atooid(PQgetvalue(res, i, 2));
5833 33652 : binaryUpgradeClassOids[i].toast_oid = atooid(PQgetvalue(res, i, 3));
5834 33652 : binaryUpgradeClassOids[i].toast_relfilenumber = atooid(PQgetvalue(res, i, 4));
5835 33652 : binaryUpgradeClassOids[i].toast_index_oid = atooid(PQgetvalue(res, i, 5));
5836 33652 : binaryUpgradeClassOids[i].toast_index_relfilenumber = atooid(PQgetvalue(res, i, 6));
5837 : }
5838 :
5839 72 : PQclear(res);
5840 72 : }
5841 :
5842 : static void
5843 2508 : binary_upgrade_set_pg_class_oids(Archive *fout,
5844 : PQExpBuffer upgrade_buffer, Oid pg_class_oid)
5845 : {
5846 2508 : BinaryUpgradeClassOidItem key = {0};
5847 : BinaryUpgradeClassOidItem *entry;
5848 :
5849 : Assert(binaryUpgradeClassOids);
5850 :
5851 : /*
5852 : * Preserve the OID and relfilenumber of the table, table's index, table's
5853 : * toast table and toast table's index if any.
5854 : *
5855 : * One complexity is that the current table definition might not require
5856 : * the creation of a TOAST table, but the old database might have a TOAST
5857 : * table that was created earlier, before some wide columns were dropped.
5858 : * By setting the TOAST oid we force creation of the TOAST heap and index
5859 : * by the new backend, so we can copy the files during binary upgrade
5860 : * without worrying about this case.
5861 : */
5862 2508 : key.oid = pg_class_oid;
5863 2508 : entry = bsearch(&key, binaryUpgradeClassOids, nbinaryUpgradeClassOids,
5864 : sizeof(BinaryUpgradeClassOidItem),
5865 : BinaryUpgradeClassOidItemCmp);
5866 :
5867 2508 : appendPQExpBufferStr(upgrade_buffer,
5868 : "\n-- For binary upgrade, must preserve pg_class oids and relfilenodes\n");
5869 :
5870 2508 : if (entry->relkind != RELKIND_INDEX &&
5871 1954 : entry->relkind != RELKIND_PARTITIONED_INDEX)
5872 : {
5873 1904 : appendPQExpBuffer(upgrade_buffer,
5874 : "SELECT pg_catalog.binary_upgrade_set_next_heap_pg_class_oid('%u'::pg_catalog.oid);\n",
5875 : pg_class_oid);
5876 :
5877 : /*
5878 : * Not every relation has storage. Also, in a pre-v12 database,
5879 : * partitioned tables have a relfilenumber, which should not be
5880 : * preserved when upgrading.
5881 : */
5882 1904 : if (RelFileNumberIsValid(entry->relfilenumber) &&
5883 1580 : entry->relkind != RELKIND_PARTITIONED_TABLE)
5884 1580 : appendPQExpBuffer(upgrade_buffer,
5885 : "SELECT pg_catalog.binary_upgrade_set_next_heap_relfilenode('%u'::pg_catalog.oid);\n",
5886 : entry->relfilenumber);
5887 :
5888 : /*
5889 : * In a pre-v12 database, partitioned tables might be marked as having
5890 : * toast tables, but we should ignore them if so.
5891 : */
5892 1904 : if (OidIsValid(entry->toast_oid) &&
5893 560 : entry->relkind != RELKIND_PARTITIONED_TABLE)
5894 : {
5895 560 : appendPQExpBuffer(upgrade_buffer,
5896 : "SELECT pg_catalog.binary_upgrade_set_next_toast_pg_class_oid('%u'::pg_catalog.oid);\n",
5897 : entry->toast_oid);
5898 560 : appendPQExpBuffer(upgrade_buffer,
5899 : "SELECT pg_catalog.binary_upgrade_set_next_toast_relfilenode('%u'::pg_catalog.oid);\n",
5900 : entry->toast_relfilenumber);
5901 :
5902 : /* every toast table has an index */
5903 560 : appendPQExpBuffer(upgrade_buffer,
5904 : "SELECT pg_catalog.binary_upgrade_set_next_index_pg_class_oid('%u'::pg_catalog.oid);\n",
5905 : entry->toast_index_oid);
5906 560 : appendPQExpBuffer(upgrade_buffer,
5907 : "SELECT pg_catalog.binary_upgrade_set_next_index_relfilenode('%u'::pg_catalog.oid);\n",
5908 : entry->toast_index_relfilenumber);
5909 : }
5910 : }
5911 : else
5912 : {
5913 : /* Preserve the OID and relfilenumber of the index */
5914 604 : appendPQExpBuffer(upgrade_buffer,
5915 : "SELECT pg_catalog.binary_upgrade_set_next_index_pg_class_oid('%u'::pg_catalog.oid);\n",
5916 : pg_class_oid);
5917 604 : appendPQExpBuffer(upgrade_buffer,
5918 : "SELECT pg_catalog.binary_upgrade_set_next_index_relfilenode('%u'::pg_catalog.oid);\n",
5919 : entry->relfilenumber);
5920 : }
5921 :
5922 2508 : appendPQExpBufferChar(upgrade_buffer, '\n');
5923 2508 : }
5924 :
5925 : /*
5926 : * If the DumpableObject is a member of an extension, add a suitable
5927 : * ALTER EXTENSION ADD command to the creation commands in upgrade_buffer.
5928 : *
5929 : * For somewhat historical reasons, objname should already be quoted,
5930 : * but not objnamespace (if any).
5931 : */
5932 : static void
5933 3008 : binary_upgrade_extension_member(PQExpBuffer upgrade_buffer,
5934 : const DumpableObject *dobj,
5935 : const char *objtype,
5936 : const char *objname,
5937 : const char *objnamespace)
5938 : {
5939 3008 : DumpableObject *extobj = NULL;
5940 : int i;
5941 :
5942 3008 : if (!dobj->ext_member)
5943 2966 : return;
5944 :
5945 : /*
5946 : * Find the parent extension. We could avoid this search if we wanted to
5947 : * add a link field to DumpableObject, but the space costs of that would
5948 : * be considerable. We assume that member objects could only have a
5949 : * direct dependency on their own extension, not any others.
5950 : */
5951 42 : for (i = 0; i < dobj->nDeps; i++)
5952 : {
5953 42 : extobj = findObjectByDumpId(dobj->dependencies[i]);
5954 42 : if (extobj && extobj->objType == DO_EXTENSION)
5955 42 : break;
5956 0 : extobj = NULL;
5957 : }
5958 42 : if (extobj == NULL)
5959 0 : pg_fatal("could not find parent extension for %s %s",
5960 : objtype, objname);
5961 :
5962 42 : appendPQExpBufferStr(upgrade_buffer,
5963 : "\n-- For binary upgrade, handle extension membership the hard way\n");
5964 42 : appendPQExpBuffer(upgrade_buffer, "ALTER EXTENSION %s ADD %s ",
5965 42 : fmtId(extobj->name),
5966 : objtype);
5967 42 : if (objnamespace && *objnamespace)
5968 36 : appendPQExpBuffer(upgrade_buffer, "%s.", fmtId(objnamespace));
5969 42 : appendPQExpBuffer(upgrade_buffer, "%s;\n", objname);
5970 : }
5971 :
5972 : /*
5973 : * getNamespaces:
5974 : * get information about all namespaces in the system catalogs
5975 : */
5976 : void
5977 372 : getNamespaces(Archive *fout)
5978 : {
5979 : PGresult *res;
5980 : int ntups;
5981 : int i;
5982 : PQExpBuffer query;
5983 : NamespaceInfo *nsinfo;
5984 : int i_tableoid;
5985 : int i_oid;
5986 : int i_nspname;
5987 : int i_nspowner;
5988 : int i_nspacl;
5989 : int i_acldefault;
5990 :
5991 372 : query = createPQExpBuffer();
5992 :
5993 : /*
5994 : * we fetch all namespaces including system ones, so that every object we
5995 : * read in can be linked to a containing namespace.
5996 : */
5997 372 : appendPQExpBufferStr(query, "SELECT n.tableoid, n.oid, n.nspname, "
5998 : "n.nspowner, "
5999 : "n.nspacl, "
6000 : "acldefault('n', n.nspowner) AS acldefault "
6001 : "FROM pg_namespace n");
6002 :
6003 372 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
6004 :
6005 372 : ntups = PQntuples(res);
6006 :
6007 372 : nsinfo = (NamespaceInfo *) pg_malloc(ntups * sizeof(NamespaceInfo));
6008 :
6009 372 : i_tableoid = PQfnumber(res, "tableoid");
6010 372 : i_oid = PQfnumber(res, "oid");
6011 372 : i_nspname = PQfnumber(res, "nspname");
6012 372 : i_nspowner = PQfnumber(res, "nspowner");
6013 372 : i_nspacl = PQfnumber(res, "nspacl");
6014 372 : i_acldefault = PQfnumber(res, "acldefault");
6015 :
6016 3248 : for (i = 0; i < ntups; i++)
6017 : {
6018 : const char *nspowner;
6019 :
6020 2876 : nsinfo[i].dobj.objType = DO_NAMESPACE;
6021 2876 : nsinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
6022 2876 : nsinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
6023 2876 : AssignDumpId(&nsinfo[i].dobj);
6024 2876 : nsinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_nspname));
6025 2876 : nsinfo[i].dacl.acl = pg_strdup(PQgetvalue(res, i, i_nspacl));
6026 2876 : nsinfo[i].dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault));
6027 2876 : nsinfo[i].dacl.privtype = 0;
6028 2876 : nsinfo[i].dacl.initprivs = NULL;
6029 2876 : nspowner = PQgetvalue(res, i, i_nspowner);
6030 2876 : nsinfo[i].nspowner = atooid(nspowner);
6031 2876 : nsinfo[i].rolname = getRoleName(nspowner);
6032 :
6033 : /* Decide whether to dump this namespace */
6034 2876 : selectDumpableNamespace(&nsinfo[i], fout);
6035 :
6036 : /* Mark whether namespace has an ACL */
6037 2876 : if (!PQgetisnull(res, i, i_nspacl))
6038 1252 : nsinfo[i].dobj.components |= DUMP_COMPONENT_ACL;
6039 :
6040 : /*
6041 : * We ignore any pg_init_privs.initprivs entry for the public schema
6042 : * and assume a predetermined default, for several reasons. First,
6043 : * dropping and recreating the schema removes its pg_init_privs entry,
6044 : * but an empty destination database starts with this ACL nonetheless.
6045 : * Second, we support dump/reload of public schema ownership changes.
6046 : * ALTER SCHEMA OWNER filters nspacl through aclnewowner(), but
6047 : * initprivs continues to reflect the initial owner. Hence,
6048 : * synthesize the value that nspacl will have after the restore's
6049 : * ALTER SCHEMA OWNER. Third, this makes the destination database
6050 : * match the source's ACL, even if the latter was an initdb-default
6051 : * ACL, which changed in v15. An upgrade pulls in changes to most
6052 : * system object ACLs that the DBA had not customized. We've made the
6053 : * public schema depart from that, because changing its ACL so easily
6054 : * breaks applications.
6055 : */
6056 2876 : if (strcmp(nsinfo[i].dobj.name, "public") == 0)
6057 : {
6058 364 : PQExpBuffer aclarray = createPQExpBuffer();
6059 364 : PQExpBuffer aclitem = createPQExpBuffer();
6060 :
6061 : /* Standard ACL as of v15 is {owner=UC/owner,=U/owner} */
6062 364 : appendPQExpBufferChar(aclarray, '{');
6063 364 : quoteAclUserName(aclitem, nsinfo[i].rolname);
6064 364 : appendPQExpBufferStr(aclitem, "=UC/");
6065 364 : quoteAclUserName(aclitem, nsinfo[i].rolname);
6066 364 : appendPGArray(aclarray, aclitem->data);
6067 364 : resetPQExpBuffer(aclitem);
6068 364 : appendPQExpBufferStr(aclitem, "=U/");
6069 364 : quoteAclUserName(aclitem, nsinfo[i].rolname);
6070 364 : appendPGArray(aclarray, aclitem->data);
6071 364 : appendPQExpBufferChar(aclarray, '}');
6072 :
6073 364 : nsinfo[i].dacl.privtype = 'i';
6074 364 : nsinfo[i].dacl.initprivs = pstrdup(aclarray->data);
6075 364 : nsinfo[i].dobj.components |= DUMP_COMPONENT_ACL;
6076 :
6077 364 : destroyPQExpBuffer(aclarray);
6078 364 : destroyPQExpBuffer(aclitem);
6079 : }
6080 : }
6081 :
6082 372 : PQclear(res);
6083 372 : destroyPQExpBuffer(query);
6084 372 : }
6085 :
6086 : /*
6087 : * findNamespace:
6088 : * given a namespace OID, look up the info read by getNamespaces
6089 : */
6090 : static NamespaceInfo *
6091 1170982 : findNamespace(Oid nsoid)
6092 : {
6093 : NamespaceInfo *nsinfo;
6094 :
6095 1170982 : nsinfo = findNamespaceByOid(nsoid);
6096 1170982 : if (nsinfo == NULL)
6097 0 : pg_fatal("schema with OID %u does not exist", nsoid);
6098 1170982 : return nsinfo;
6099 : }
6100 :
6101 : /*
6102 : * getExtensions:
6103 : * read all extensions in the system catalogs and return them in the
6104 : * ExtensionInfo* structure
6105 : *
6106 : * numExtensions is set to the number of extensions read in
6107 : */
6108 : ExtensionInfo *
6109 372 : getExtensions(Archive *fout, int *numExtensions)
6110 : {
6111 372 : DumpOptions *dopt = fout->dopt;
6112 : PGresult *res;
6113 : int ntups;
6114 : int i;
6115 : PQExpBuffer query;
6116 372 : ExtensionInfo *extinfo = NULL;
6117 : int i_tableoid;
6118 : int i_oid;
6119 : int i_extname;
6120 : int i_nspname;
6121 : int i_extrelocatable;
6122 : int i_extversion;
6123 : int i_extconfig;
6124 : int i_extcondition;
6125 :
6126 372 : query = createPQExpBuffer();
6127 :
6128 372 : appendPQExpBufferStr(query, "SELECT x.tableoid, x.oid, "
6129 : "x.extname, n.nspname, x.extrelocatable, x.extversion, x.extconfig, x.extcondition "
6130 : "FROM pg_extension x "
6131 : "JOIN pg_namespace n ON n.oid = x.extnamespace");
6132 :
6133 372 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
6134 :
6135 372 : ntups = PQntuples(res);
6136 372 : if (ntups == 0)
6137 0 : goto cleanup;
6138 :
6139 372 : extinfo = (ExtensionInfo *) pg_malloc(ntups * sizeof(ExtensionInfo));
6140 :
6141 372 : i_tableoid = PQfnumber(res, "tableoid");
6142 372 : i_oid = PQfnumber(res, "oid");
6143 372 : i_extname = PQfnumber(res, "extname");
6144 372 : i_nspname = PQfnumber(res, "nspname");
6145 372 : i_extrelocatable = PQfnumber(res, "extrelocatable");
6146 372 : i_extversion = PQfnumber(res, "extversion");
6147 372 : i_extconfig = PQfnumber(res, "extconfig");
6148 372 : i_extcondition = PQfnumber(res, "extcondition");
6149 :
6150 804 : for (i = 0; i < ntups; i++)
6151 : {
6152 432 : extinfo[i].dobj.objType = DO_EXTENSION;
6153 432 : extinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
6154 432 : extinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
6155 432 : AssignDumpId(&extinfo[i].dobj);
6156 432 : extinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_extname));
6157 432 : extinfo[i].namespace = pg_strdup(PQgetvalue(res, i, i_nspname));
6158 432 : extinfo[i].relocatable = *(PQgetvalue(res, i, i_extrelocatable)) == 't';
6159 432 : extinfo[i].extversion = pg_strdup(PQgetvalue(res, i, i_extversion));
6160 432 : extinfo[i].extconfig = pg_strdup(PQgetvalue(res, i, i_extconfig));
6161 432 : extinfo[i].extcondition = pg_strdup(PQgetvalue(res, i, i_extcondition));
6162 :
6163 : /* Decide whether we want to dump it */
6164 432 : selectDumpableExtension(&(extinfo[i]), dopt);
6165 : }
6166 :
6167 372 : cleanup:
6168 372 : PQclear(res);
6169 372 : destroyPQExpBuffer(query);
6170 :
6171 372 : *numExtensions = ntups;
6172 :
6173 372 : return extinfo;
6174 : }
6175 :
6176 : /*
6177 : * getTypes:
6178 : * get information about all types in the system catalogs
6179 : *
6180 : * NB: this must run after getFuncs() because we assume we can do
6181 : * findFuncByOid().
6182 : */
6183 : void
6184 370 : getTypes(Archive *fout)
6185 : {
6186 : PGresult *res;
6187 : int ntups;
6188 : int i;
6189 370 : PQExpBuffer query = createPQExpBuffer();
6190 : TypeInfo *tyinfo;
6191 : ShellTypeInfo *stinfo;
6192 : int i_tableoid;
6193 : int i_oid;
6194 : int i_typname;
6195 : int i_typnamespace;
6196 : int i_typacl;
6197 : int i_acldefault;
6198 : int i_typowner;
6199 : int i_typelem;
6200 : int i_typrelid;
6201 : int i_typrelkind;
6202 : int i_typtype;
6203 : int i_typisdefined;
6204 : int i_isarray;
6205 : int i_typarray;
6206 :
6207 : /*
6208 : * we include even the built-in types because those may be used as array
6209 : * elements by user-defined types
6210 : *
6211 : * we filter out the built-in types when we dump out the types
6212 : *
6213 : * same approach for undefined (shell) types and array types
6214 : *
6215 : * Note: as of 8.3 we can reliably detect whether a type is an
6216 : * auto-generated array type by checking the element type's typarray.
6217 : * (Before that the test is capable of generating false positives.) We
6218 : * still check for name beginning with '_', though, so as to avoid the
6219 : * cost of the subselect probe for all standard types. This would have to
6220 : * be revisited if the backend ever allows renaming of array types.
6221 : */
6222 370 : appendPQExpBufferStr(query, "SELECT tableoid, oid, typname, "
6223 : "typnamespace, typacl, "
6224 : "acldefault('T', typowner) AS acldefault, "
6225 : "typowner, "
6226 : "typelem, typrelid, typarray, "
6227 : "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
6228 : "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
6229 : "typtype, typisdefined, "
6230 : "typname[0] = '_' AND typelem != 0 AND "
6231 : "(SELECT typarray FROM pg_type te WHERE oid = pg_type.typelem) = oid AS isarray "
6232 : "FROM pg_type");
6233 :
6234 370 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
6235 :
6236 370 : ntups = PQntuples(res);
6237 :
6238 370 : tyinfo = (TypeInfo *) pg_malloc(ntups * sizeof(TypeInfo));
6239 :
6240 370 : i_tableoid = PQfnumber(res, "tableoid");
6241 370 : i_oid = PQfnumber(res, "oid");
6242 370 : i_typname = PQfnumber(res, "typname");
6243 370 : i_typnamespace = PQfnumber(res, "typnamespace");
6244 370 : i_typacl = PQfnumber(res, "typacl");
6245 370 : i_acldefault = PQfnumber(res, "acldefault");
6246 370 : i_typowner = PQfnumber(res, "typowner");
6247 370 : i_typelem = PQfnumber(res, "typelem");
6248 370 : i_typrelid = PQfnumber(res, "typrelid");
6249 370 : i_typrelkind = PQfnumber(res, "typrelkind");
6250 370 : i_typtype = PQfnumber(res, "typtype");
6251 370 : i_typisdefined = PQfnumber(res, "typisdefined");
6252 370 : i_isarray = PQfnumber(res, "isarray");
6253 370 : i_typarray = PQfnumber(res, "typarray");
6254 :
6255 270620 : for (i = 0; i < ntups; i++)
6256 : {
6257 270250 : tyinfo[i].dobj.objType = DO_TYPE;
6258 270250 : tyinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
6259 270250 : tyinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
6260 270250 : AssignDumpId(&tyinfo[i].dobj);
6261 270250 : tyinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_typname));
6262 540500 : tyinfo[i].dobj.namespace =
6263 270250 : findNamespace(atooid(PQgetvalue(res, i, i_typnamespace)));
6264 270250 : tyinfo[i].dacl.acl = pg_strdup(PQgetvalue(res, i, i_typacl));
6265 270250 : tyinfo[i].dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault));
6266 270250 : tyinfo[i].dacl.privtype = 0;
6267 270250 : tyinfo[i].dacl.initprivs = NULL;
6268 270250 : tyinfo[i].ftypname = NULL; /* may get filled later */
6269 270250 : tyinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_typowner));
6270 270250 : tyinfo[i].typelem = atooid(PQgetvalue(res, i, i_typelem));
6271 270250 : tyinfo[i].typrelid = atooid(PQgetvalue(res, i, i_typrelid));
6272 270250 : tyinfo[i].typrelkind = *PQgetvalue(res, i, i_typrelkind);
6273 270250 : tyinfo[i].typtype = *PQgetvalue(res, i, i_typtype);
6274 270250 : tyinfo[i].shellType = NULL;
6275 :
6276 270250 : if (strcmp(PQgetvalue(res, i, i_typisdefined), "t") == 0)
6277 270134 : tyinfo[i].isDefined = true;
6278 : else
6279 116 : tyinfo[i].isDefined = false;
6280 :
6281 270250 : if (strcmp(PQgetvalue(res, i, i_isarray), "t") == 0)
6282 129702 : tyinfo[i].isArray = true;
6283 : else
6284 140548 : tyinfo[i].isArray = false;
6285 :
6286 270250 : tyinfo[i].typarray = atooid(PQgetvalue(res, i, i_typarray));
6287 :
6288 270250 : if (tyinfo[i].typtype == TYPTYPE_MULTIRANGE)
6289 2508 : tyinfo[i].isMultirange = true;
6290 : else
6291 267742 : tyinfo[i].isMultirange = false;
6292 :
6293 : /* Decide whether we want to dump it */
6294 270250 : selectDumpableType(&tyinfo[i], fout);
6295 :
6296 : /* Mark whether type has an ACL */
6297 270250 : if (!PQgetisnull(res, i, i_typacl))
6298 458 : tyinfo[i].dobj.components |= DUMP_COMPONENT_ACL;
6299 :
6300 : /*
6301 : * If it's a domain, fetch info about its constraints, if any
6302 : */
6303 270250 : tyinfo[i].nDomChecks = 0;
6304 270250 : tyinfo[i].domChecks = NULL;
6305 270250 : tyinfo[i].notnull = NULL;
6306 270250 : if ((tyinfo[i].dobj.dump & DUMP_COMPONENT_DEFINITION) &&
6307 31770 : tyinfo[i].typtype == TYPTYPE_DOMAIN)
6308 328 : getDomainConstraints(fout, &(tyinfo[i]));
6309 :
6310 : /*
6311 : * If it's a base type, make a DumpableObject representing a shell
6312 : * definition of the type. We will need to dump that ahead of the I/O
6313 : * functions for the type. Similarly, range types need a shell
6314 : * definition in case they have a canonicalize function.
6315 : *
6316 : * Note: the shell type doesn't have a catId. You might think it
6317 : * should copy the base type's catId, but then it might capture the
6318 : * pg_depend entries for the type, which we don't want.
6319 : */
6320 270250 : if ((tyinfo[i].dobj.dump & DUMP_COMPONENT_DEFINITION) &&
6321 31770 : (tyinfo[i].typtype == TYPTYPE_BASE ||
6322 15452 : tyinfo[i].typtype == TYPTYPE_RANGE))
6323 : {
6324 16590 : stinfo = (ShellTypeInfo *) pg_malloc(sizeof(ShellTypeInfo));
6325 16590 : stinfo->dobj.objType = DO_SHELL_TYPE;
6326 16590 : stinfo->dobj.catId = nilCatalogId;
6327 16590 : AssignDumpId(&stinfo->dobj);
6328 16590 : stinfo->dobj.name = pg_strdup(tyinfo[i].dobj.name);
6329 16590 : stinfo->dobj.namespace = tyinfo[i].dobj.namespace;
6330 16590 : stinfo->baseType = &(tyinfo[i]);
6331 16590 : tyinfo[i].shellType = stinfo;
6332 :
6333 : /*
6334 : * Initially mark the shell type as not to be dumped. We'll only
6335 : * dump it if the I/O or canonicalize functions need to be dumped;
6336 : * this is taken care of while sorting dependencies.
6337 : */
6338 16590 : stinfo->dobj.dump = DUMP_COMPONENT_NONE;
6339 : }
6340 : }
6341 :
6342 370 : PQclear(res);
6343 :
6344 370 : destroyPQExpBuffer(query);
6345 370 : }
6346 :
6347 : /*
6348 : * getOperators:
6349 : * get information about all operators in the system catalogs
6350 : */
6351 : void
6352 370 : getOperators(Archive *fout)
6353 : {
6354 : PGresult *res;
6355 : int ntups;
6356 : int i;
6357 370 : PQExpBuffer query = createPQExpBuffer();
6358 : OprInfo *oprinfo;
6359 : int i_tableoid;
6360 : int i_oid;
6361 : int i_oprname;
6362 : int i_oprnamespace;
6363 : int i_oprowner;
6364 : int i_oprkind;
6365 : int i_oprleft;
6366 : int i_oprright;
6367 : int i_oprcode;
6368 :
6369 : /*
6370 : * find all operators, including builtin operators; we filter out
6371 : * system-defined operators at dump-out time.
6372 : */
6373 :
6374 370 : appendPQExpBufferStr(query, "SELECT tableoid, oid, oprname, "
6375 : "oprnamespace, "
6376 : "oprowner, "
6377 : "oprkind, "
6378 : "oprleft, "
6379 : "oprright, "
6380 : "oprcode::oid AS oprcode "
6381 : "FROM pg_operator");
6382 :
6383 370 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
6384 :
6385 370 : ntups = PQntuples(res);
6386 :
6387 370 : oprinfo = (OprInfo *) pg_malloc(ntups * sizeof(OprInfo));
6388 :
6389 370 : i_tableoid = PQfnumber(res, "tableoid");
6390 370 : i_oid = PQfnumber(res, "oid");
6391 370 : i_oprname = PQfnumber(res, "oprname");
6392 370 : i_oprnamespace = PQfnumber(res, "oprnamespace");
6393 370 : i_oprowner = PQfnumber(res, "oprowner");
6394 370 : i_oprkind = PQfnumber(res, "oprkind");
6395 370 : i_oprleft = PQfnumber(res, "oprleft");
6396 370 : i_oprright = PQfnumber(res, "oprright");
6397 370 : i_oprcode = PQfnumber(res, "oprcode");
6398 :
6399 296296 : for (i = 0; i < ntups; i++)
6400 : {
6401 295926 : oprinfo[i].dobj.objType = DO_OPERATOR;
6402 295926 : oprinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
6403 295926 : oprinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
6404 295926 : AssignDumpId(&oprinfo[i].dobj);
6405 295926 : oprinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_oprname));
6406 591852 : oprinfo[i].dobj.namespace =
6407 295926 : findNamespace(atooid(PQgetvalue(res, i, i_oprnamespace)));
6408 295926 : oprinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_oprowner));
6409 295926 : oprinfo[i].oprkind = (PQgetvalue(res, i, i_oprkind))[0];
6410 295926 : oprinfo[i].oprleft = atooid(PQgetvalue(res, i, i_oprleft));
6411 295926 : oprinfo[i].oprright = atooid(PQgetvalue(res, i, i_oprright));
6412 295926 : oprinfo[i].oprcode = atooid(PQgetvalue(res, i, i_oprcode));
6413 :
6414 : /* Decide whether we want to dump it */
6415 295926 : selectDumpableObject(&(oprinfo[i].dobj), fout);
6416 : }
6417 :
6418 370 : PQclear(res);
6419 :
6420 370 : destroyPQExpBuffer(query);
6421 370 : }
6422 :
6423 : /*
6424 : * getCollations:
6425 : * get information about all collations in the system catalogs
6426 : */
6427 : void
6428 370 : getCollations(Archive *fout)
6429 : {
6430 : PGresult *res;
6431 : int ntups;
6432 : int i;
6433 : PQExpBuffer query;
6434 : CollInfo *collinfo;
6435 : int i_tableoid;
6436 : int i_oid;
6437 : int i_collname;
6438 : int i_collnamespace;
6439 : int i_collowner;
6440 : int i_collencoding;
6441 :
6442 370 : query = createPQExpBuffer();
6443 :
6444 : /*
6445 : * find all collations, including builtin collations; we filter out
6446 : * system-defined collations at dump-out time.
6447 : */
6448 :
6449 370 : appendPQExpBufferStr(query, "SELECT tableoid, oid, collname, "
6450 : "collnamespace, "
6451 : "collowner, "
6452 : "collencoding "
6453 : "FROM pg_collation");
6454 :
6455 370 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
6456 :
6457 370 : ntups = PQntuples(res);
6458 :
6459 370 : collinfo = (CollInfo *) pg_malloc(ntups * sizeof(CollInfo));
6460 :
6461 370 : i_tableoid = PQfnumber(res, "tableoid");
6462 370 : i_oid = PQfnumber(res, "oid");
6463 370 : i_collname = PQfnumber(res, "collname");
6464 370 : i_collnamespace = PQfnumber(res, "collnamespace");
6465 370 : i_collowner = PQfnumber(res, "collowner");
6466 370 : i_collencoding = PQfnumber(res, "collencoding");
6467 :
6468 302536 : for (i = 0; i < ntups; i++)
6469 : {
6470 302166 : collinfo[i].dobj.objType = DO_COLLATION;
6471 302166 : collinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
6472 302166 : collinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
6473 302166 : AssignDumpId(&collinfo[i].dobj);
6474 302166 : collinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_collname));
6475 604332 : collinfo[i].dobj.namespace =
6476 302166 : findNamespace(atooid(PQgetvalue(res, i, i_collnamespace)));
6477 302166 : collinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_collowner));
6478 302166 : collinfo[i].collencoding = atoi(PQgetvalue(res, i, i_collencoding));
6479 :
6480 : /* Decide whether we want to dump it */
6481 302166 : selectDumpableObject(&(collinfo[i].dobj), fout);
6482 : }
6483 :
6484 370 : PQclear(res);
6485 :
6486 370 : destroyPQExpBuffer(query);
6487 370 : }
6488 :
6489 : /*
6490 : * getConversions:
6491 : * get information about all conversions in the system catalogs
6492 : */
6493 : void
6494 370 : getConversions(Archive *fout)
6495 : {
6496 : PGresult *res;
6497 : int ntups;
6498 : int i;
6499 : PQExpBuffer query;
6500 : ConvInfo *convinfo;
6501 : int i_tableoid;
6502 : int i_oid;
6503 : int i_conname;
6504 : int i_connamespace;
6505 : int i_conowner;
6506 :
6507 370 : query = createPQExpBuffer();
6508 :
6509 : /*
6510 : * find all conversions, including builtin conversions; we filter out
6511 : * system-defined conversions at dump-out time.
6512 : */
6513 :
6514 370 : appendPQExpBufferStr(query, "SELECT tableoid, oid, conname, "
6515 : "connamespace, "
6516 : "conowner "
6517 : "FROM pg_conversion");
6518 :
6519 370 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
6520 :
6521 370 : ntups = PQntuples(res);
6522 :
6523 370 : convinfo = (ConvInfo *) pg_malloc(ntups * sizeof(ConvInfo));
6524 :
6525 370 : i_tableoid = PQfnumber(res, "tableoid");
6526 370 : i_oid = PQfnumber(res, "oid");
6527 370 : i_conname = PQfnumber(res, "conname");
6528 370 : i_connamespace = PQfnumber(res, "connamespace");
6529 370 : i_conowner = PQfnumber(res, "conowner");
6530 :
6531 47832 : for (i = 0; i < ntups; i++)
6532 : {
6533 47462 : convinfo[i].dobj.objType = DO_CONVERSION;
6534 47462 : convinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
6535 47462 : convinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
6536 47462 : AssignDumpId(&convinfo[i].dobj);
6537 47462 : convinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_conname));
6538 94924 : convinfo[i].dobj.namespace =
6539 47462 : findNamespace(atooid(PQgetvalue(res, i, i_connamespace)));
6540 47462 : convinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_conowner));
6541 :
6542 : /* Decide whether we want to dump it */
6543 47462 : selectDumpableObject(&(convinfo[i].dobj), fout);
6544 : }
6545 :
6546 370 : PQclear(res);
6547 :
6548 370 : destroyPQExpBuffer(query);
6549 370 : }
6550 :
6551 : /*
6552 : * getAccessMethods:
6553 : * get information about all user-defined access methods
6554 : */
6555 : void
6556 370 : getAccessMethods(Archive *fout)
6557 : {
6558 : PGresult *res;
6559 : int ntups;
6560 : int i;
6561 : PQExpBuffer query;
6562 : AccessMethodInfo *aminfo;
6563 : int i_tableoid;
6564 : int i_oid;
6565 : int i_amname;
6566 : int i_amhandler;
6567 : int i_amtype;
6568 :
6569 370 : query = createPQExpBuffer();
6570 :
6571 : /*
6572 : * Select all access methods from pg_am table. v9.6 introduced CREATE
6573 : * ACCESS METHOD, so earlier versions usually have only built-in access
6574 : * methods. v9.6 also changed the access method API, replacing dozens of
6575 : * pg_am columns with amhandler. Even if a user created an access method
6576 : * by "INSERT INTO pg_am", we have no way to translate pre-v9.6 pg_am
6577 : * columns to a v9.6+ CREATE ACCESS METHOD. Hence, before v9.6, read
6578 : * pg_am just to facilitate findAccessMethodByOid() providing the
6579 : * OID-to-name mapping.
6580 : */
6581 370 : appendPQExpBufferStr(query, "SELECT tableoid, oid, amname, ");
6582 370 : if (fout->remoteVersion >= 90600)
6583 370 : appendPQExpBufferStr(query,
6584 : "amtype, "
6585 : "amhandler::pg_catalog.regproc AS amhandler ");
6586 : else
6587 0 : appendPQExpBufferStr(query,
6588 : "'i'::pg_catalog.\"char\" AS amtype, "
6589 : "'-'::pg_catalog.regproc AS amhandler ");
6590 370 : appendPQExpBufferStr(query, "FROM pg_am");
6591 :
6592 370 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
6593 :
6594 370 : ntups = PQntuples(res);
6595 :
6596 370 : aminfo = (AccessMethodInfo *) pg_malloc(ntups * sizeof(AccessMethodInfo));
6597 :
6598 370 : i_tableoid = PQfnumber(res, "tableoid");
6599 370 : i_oid = PQfnumber(res, "oid");
6600 370 : i_amname = PQfnumber(res, "amname");
6601 370 : i_amhandler = PQfnumber(res, "amhandler");
6602 370 : i_amtype = PQfnumber(res, "amtype");
6603 :
6604 3228 : for (i = 0; i < ntups; i++)
6605 : {
6606 2858 : aminfo[i].dobj.objType = DO_ACCESS_METHOD;
6607 2858 : aminfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
6608 2858 : aminfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
6609 2858 : AssignDumpId(&aminfo[i].dobj);
6610 2858 : aminfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_amname));
6611 2858 : aminfo[i].dobj.namespace = NULL;
6612 2858 : aminfo[i].amhandler = pg_strdup(PQgetvalue(res, i, i_amhandler));
6613 2858 : aminfo[i].amtype = *(PQgetvalue(res, i, i_amtype));
6614 :
6615 : /* Decide whether we want to dump it */
6616 2858 : selectDumpableAccessMethod(&(aminfo[i]), fout);
6617 : }
6618 :
6619 370 : PQclear(res);
6620 :
6621 370 : destroyPQExpBuffer(query);
6622 370 : }
6623 :
6624 :
6625 : /*
6626 : * getOpclasses:
6627 : * get information about all opclasses in the system catalogs
6628 : */
6629 : void
6630 370 : getOpclasses(Archive *fout)
6631 : {
6632 : PGresult *res;
6633 : int ntups;
6634 : int i;
6635 370 : PQExpBuffer query = createPQExpBuffer();
6636 : OpclassInfo *opcinfo;
6637 : int i_tableoid;
6638 : int i_oid;
6639 : int i_opcmethod;
6640 : int i_opcname;
6641 : int i_opcnamespace;
6642 : int i_opcowner;
6643 :
6644 : /*
6645 : * find all opclasses, including builtin opclasses; we filter out
6646 : * system-defined opclasses at dump-out time.
6647 : */
6648 :
6649 370 : appendPQExpBufferStr(query, "SELECT tableoid, oid, opcmethod, opcname, "
6650 : "opcnamespace, "
6651 : "opcowner "
6652 : "FROM pg_opclass");
6653 :
6654 370 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
6655 :
6656 370 : ntups = PQntuples(res);
6657 :
6658 370 : opcinfo = (OpclassInfo *) pg_malloc(ntups * sizeof(OpclassInfo));
6659 :
6660 370 : i_tableoid = PQfnumber(res, "tableoid");
6661 370 : i_oid = PQfnumber(res, "oid");
6662 370 : i_opcmethod = PQfnumber(res, "opcmethod");
6663 370 : i_opcname = PQfnumber(res, "opcname");
6664 370 : i_opcnamespace = PQfnumber(res, "opcnamespace");
6665 370 : i_opcowner = PQfnumber(res, "opcowner");
6666 :
6667 66208 : for (i = 0; i < ntups; i++)
6668 : {
6669 65838 : opcinfo[i].dobj.objType = DO_OPCLASS;
6670 65838 : opcinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
6671 65838 : opcinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
6672 65838 : AssignDumpId(&opcinfo[i].dobj);
6673 65838 : opcinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_opcname));
6674 131676 : opcinfo[i].dobj.namespace =
6675 65838 : findNamespace(atooid(PQgetvalue(res, i, i_opcnamespace)));
6676 65838 : opcinfo[i].opcmethod = atooid(PQgetvalue(res, i, i_opcmethod));
6677 65838 : opcinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_opcowner));
6678 :
6679 : /* Decide whether we want to dump it */
6680 65838 : selectDumpableObject(&(opcinfo[i].dobj), fout);
6681 : }
6682 :
6683 370 : PQclear(res);
6684 :
6685 370 : destroyPQExpBuffer(query);
6686 370 : }
6687 :
6688 : /*
6689 : * getOpfamilies:
6690 : * get information about all opfamilies in the system catalogs
6691 : */
6692 : void
6693 370 : getOpfamilies(Archive *fout)
6694 : {
6695 : PGresult *res;
6696 : int ntups;
6697 : int i;
6698 : PQExpBuffer query;
6699 : OpfamilyInfo *opfinfo;
6700 : int i_tableoid;
6701 : int i_oid;
6702 : int i_opfmethod;
6703 : int i_opfname;
6704 : int i_opfnamespace;
6705 : int i_opfowner;
6706 :
6707 370 : query = createPQExpBuffer();
6708 :
6709 : /*
6710 : * find all opfamilies, including builtin opfamilies; we filter out
6711 : * system-defined opfamilies at dump-out time.
6712 : */
6713 :
6714 370 : appendPQExpBufferStr(query, "SELECT tableoid, oid, opfmethod, opfname, "
6715 : "opfnamespace, "
6716 : "opfowner "
6717 : "FROM pg_opfamily");
6718 :
6719 370 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
6720 :
6721 370 : ntups = PQntuples(res);
6722 :
6723 370 : opfinfo = (OpfamilyInfo *) pg_malloc(ntups * sizeof(OpfamilyInfo));
6724 :
6725 370 : i_tableoid = PQfnumber(res, "tableoid");
6726 370 : i_oid = PQfnumber(res, "oid");
6727 370 : i_opfname = PQfnumber(res, "opfname");
6728 370 : i_opfmethod = PQfnumber(res, "opfmethod");
6729 370 : i_opfnamespace = PQfnumber(res, "opfnamespace");
6730 370 : i_opfowner = PQfnumber(res, "opfowner");
6731 :
6732 54692 : for (i = 0; i < ntups; i++)
6733 : {
6734 54322 : opfinfo[i].dobj.objType = DO_OPFAMILY;
6735 54322 : opfinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
6736 54322 : opfinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
6737 54322 : AssignDumpId(&opfinfo[i].dobj);
6738 54322 : opfinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_opfname));
6739 108644 : opfinfo[i].dobj.namespace =
6740 54322 : findNamespace(atooid(PQgetvalue(res, i, i_opfnamespace)));
6741 54322 : opfinfo[i].opfmethod = atooid(PQgetvalue(res, i, i_opfmethod));
6742 54322 : opfinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_opfowner));
6743 :
6744 : /* Decide whether we want to dump it */
6745 54322 : selectDumpableObject(&(opfinfo[i].dobj), fout);
6746 : }
6747 :
6748 370 : PQclear(res);
6749 :
6750 370 : destroyPQExpBuffer(query);
6751 370 : }
6752 :
6753 : /*
6754 : * getAggregates:
6755 : * get information about all user-defined aggregates in the system catalogs
6756 : */
6757 : void
6758 370 : getAggregates(Archive *fout)
6759 : {
6760 370 : DumpOptions *dopt = fout->dopt;
6761 : PGresult *res;
6762 : int ntups;
6763 : int i;
6764 370 : PQExpBuffer query = createPQExpBuffer();
6765 : AggInfo *agginfo;
6766 : int i_tableoid;
6767 : int i_oid;
6768 : int i_aggname;
6769 : int i_aggnamespace;
6770 : int i_pronargs;
6771 : int i_proargtypes;
6772 : int i_proowner;
6773 : int i_aggacl;
6774 : int i_acldefault;
6775 :
6776 : /*
6777 : * Find all interesting aggregates. See comment in getFuncs() for the
6778 : * rationale behind the filtering logic.
6779 : */
6780 370 : if (fout->remoteVersion >= 90600)
6781 : {
6782 : const char *agg_check;
6783 :
6784 740 : agg_check = (fout->remoteVersion >= 110000 ? "p.prokind = 'a'"
6785 370 : : "p.proisagg");
6786 :
6787 370 : appendPQExpBuffer(query, "SELECT p.tableoid, p.oid, "
6788 : "p.proname AS aggname, "
6789 : "p.pronamespace AS aggnamespace, "
6790 : "p.pronargs, p.proargtypes, "
6791 : "p.proowner, "
6792 : "p.proacl AS aggacl, "
6793 : "acldefault('f', p.proowner) AS acldefault "
6794 : "FROM pg_proc p "
6795 : "LEFT JOIN pg_init_privs pip ON "
6796 : "(p.oid = pip.objoid "
6797 : "AND pip.classoid = 'pg_proc'::regclass "
6798 : "AND pip.objsubid = 0) "
6799 : "WHERE %s AND ("
6800 : "p.pronamespace != "
6801 : "(SELECT oid FROM pg_namespace "
6802 : "WHERE nspname = 'pg_catalog') OR "
6803 : "p.proacl IS DISTINCT FROM pip.initprivs",
6804 : agg_check);
6805 370 : if (dopt->binary_upgrade)
6806 72 : appendPQExpBufferStr(query,
6807 : " OR EXISTS(SELECT 1 FROM pg_depend WHERE "
6808 : "classid = 'pg_proc'::regclass AND "
6809 : "objid = p.oid AND "
6810 : "refclassid = 'pg_extension'::regclass AND "
6811 : "deptype = 'e')");
6812 370 : appendPQExpBufferChar(query, ')');
6813 : }
6814 : else
6815 : {
6816 0 : appendPQExpBufferStr(query, "SELECT tableoid, oid, proname AS aggname, "
6817 : "pronamespace AS aggnamespace, "
6818 : "pronargs, proargtypes, "
6819 : "proowner, "
6820 : "proacl AS aggacl, "
6821 : "acldefault('f', proowner) AS acldefault "
6822 : "FROM pg_proc p "
6823 : "WHERE proisagg AND ("
6824 : "pronamespace != "
6825 : "(SELECT oid FROM pg_namespace "
6826 : "WHERE nspname = 'pg_catalog')");
6827 0 : if (dopt->binary_upgrade)
6828 0 : appendPQExpBufferStr(query,
6829 : " OR EXISTS(SELECT 1 FROM pg_depend WHERE "
6830 : "classid = 'pg_proc'::regclass AND "
6831 : "objid = p.oid AND "
6832 : "refclassid = 'pg_extension'::regclass AND "
6833 : "deptype = 'e')");
6834 0 : appendPQExpBufferChar(query, ')');
6835 : }
6836 :
6837 370 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
6838 :
6839 370 : ntups = PQntuples(res);
6840 :
6841 370 : agginfo = (AggInfo *) pg_malloc(ntups * sizeof(AggInfo));
6842 :
6843 370 : i_tableoid = PQfnumber(res, "tableoid");
6844 370 : i_oid = PQfnumber(res, "oid");
6845 370 : i_aggname = PQfnumber(res, "aggname");
6846 370 : i_aggnamespace = PQfnumber(res, "aggnamespace");
6847 370 : i_pronargs = PQfnumber(res, "pronargs");
6848 370 : i_proargtypes = PQfnumber(res, "proargtypes");
6849 370 : i_proowner = PQfnumber(res, "proowner");
6850 370 : i_aggacl = PQfnumber(res, "aggacl");
6851 370 : i_acldefault = PQfnumber(res, "acldefault");
6852 :
6853 1180 : for (i = 0; i < ntups; i++)
6854 : {
6855 810 : agginfo[i].aggfn.dobj.objType = DO_AGG;
6856 810 : agginfo[i].aggfn.dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
6857 810 : agginfo[i].aggfn.dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
6858 810 : AssignDumpId(&agginfo[i].aggfn.dobj);
6859 810 : agginfo[i].aggfn.dobj.name = pg_strdup(PQgetvalue(res, i, i_aggname));
6860 1620 : agginfo[i].aggfn.dobj.namespace =
6861 810 : findNamespace(atooid(PQgetvalue(res, i, i_aggnamespace)));
6862 810 : agginfo[i].aggfn.dacl.acl = pg_strdup(PQgetvalue(res, i, i_aggacl));
6863 810 : agginfo[i].aggfn.dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault));
6864 810 : agginfo[i].aggfn.dacl.privtype = 0;
6865 810 : agginfo[i].aggfn.dacl.initprivs = NULL;
6866 810 : agginfo[i].aggfn.rolname = getRoleName(PQgetvalue(res, i, i_proowner));
6867 810 : agginfo[i].aggfn.lang = InvalidOid; /* not currently interesting */
6868 810 : agginfo[i].aggfn.prorettype = InvalidOid; /* not saved */
6869 810 : agginfo[i].aggfn.nargs = atoi(PQgetvalue(res, i, i_pronargs));
6870 810 : if (agginfo[i].aggfn.nargs == 0)
6871 112 : agginfo[i].aggfn.argtypes = NULL;
6872 : else
6873 : {
6874 698 : agginfo[i].aggfn.argtypes = (Oid *) pg_malloc(agginfo[i].aggfn.nargs * sizeof(Oid));
6875 698 : parseOidArray(PQgetvalue(res, i, i_proargtypes),
6876 698 : agginfo[i].aggfn.argtypes,
6877 698 : agginfo[i].aggfn.nargs);
6878 : }
6879 810 : agginfo[i].aggfn.postponed_def = false; /* might get set during sort */
6880 :
6881 : /* Decide whether we want to dump it */
6882 810 : selectDumpableObject(&(agginfo[i].aggfn.dobj), fout);
6883 :
6884 : /* Mark whether aggregate has an ACL */
6885 810 : if (!PQgetisnull(res, i, i_aggacl))
6886 50 : agginfo[i].aggfn.dobj.components |= DUMP_COMPONENT_ACL;
6887 : }
6888 :
6889 370 : PQclear(res);
6890 :
6891 370 : destroyPQExpBuffer(query);
6892 370 : }
6893 :
6894 : /*
6895 : * getFuncs:
6896 : * get information about all user-defined functions in the system catalogs
6897 : */
6898 : void
6899 370 : getFuncs(Archive *fout)
6900 : {
6901 370 : DumpOptions *dopt = fout->dopt;
6902 : PGresult *res;
6903 : int ntups;
6904 : int i;
6905 370 : PQExpBuffer query = createPQExpBuffer();
6906 : FuncInfo *finfo;
6907 : int i_tableoid;
6908 : int i_oid;
6909 : int i_proname;
6910 : int i_pronamespace;
6911 : int i_proowner;
6912 : int i_prolang;
6913 : int i_pronargs;
6914 : int i_proargtypes;
6915 : int i_prorettype;
6916 : int i_proacl;
6917 : int i_acldefault;
6918 :
6919 : /*
6920 : * Find all interesting functions. This is a bit complicated:
6921 : *
6922 : * 1. Always exclude aggregates; those are handled elsewhere.
6923 : *
6924 : * 2. Always exclude functions that are internally dependent on something
6925 : * else, since presumably those will be created as a result of creating
6926 : * the something else. This currently acts only to suppress constructor
6927 : * functions for range types. Note this is OK only because the
6928 : * constructors don't have any dependencies the range type doesn't have;
6929 : * otherwise we might not get creation ordering correct.
6930 : *
6931 : * 3. Otherwise, we normally exclude functions in pg_catalog. However, if
6932 : * they're members of extensions and we are in binary-upgrade mode then
6933 : * include them, since we want to dump extension members individually in
6934 : * that mode. Also, if they are used by casts or transforms then we need
6935 : * to gather the information about them, though they won't be dumped if
6936 : * they are built-in. Also, in 9.6 and up, include functions in
6937 : * pg_catalog if they have an ACL different from what's shown in
6938 : * pg_init_privs (so we have to join to pg_init_privs; annoying).
6939 : */
6940 370 : if (fout->remoteVersion >= 90600)
6941 : {
6942 : const char *not_agg_check;
6943 :
6944 740 : not_agg_check = (fout->remoteVersion >= 110000 ? "p.prokind <> 'a'"
6945 370 : : "NOT p.proisagg");
6946 :
6947 370 : appendPQExpBuffer(query,
6948 : "SELECT p.tableoid, p.oid, p.proname, p.prolang, "
6949 : "p.pronargs, p.proargtypes, p.prorettype, "
6950 : "p.proacl, "
6951 : "acldefault('f', p.proowner) AS acldefault, "
6952 : "p.pronamespace, "
6953 : "p.proowner "
6954 : "FROM pg_proc p "
6955 : "LEFT JOIN pg_init_privs pip ON "
6956 : "(p.oid = pip.objoid "
6957 : "AND pip.classoid = 'pg_proc'::regclass "
6958 : "AND pip.objsubid = 0) "
6959 : "WHERE %s"
6960 : "\n AND NOT EXISTS (SELECT 1 FROM pg_depend "
6961 : "WHERE classid = 'pg_proc'::regclass AND "
6962 : "objid = p.oid AND deptype = 'i')"
6963 : "\n AND ("
6964 : "\n pronamespace != "
6965 : "(SELECT oid FROM pg_namespace "
6966 : "WHERE nspname = 'pg_catalog')"
6967 : "\n OR EXISTS (SELECT 1 FROM pg_cast"
6968 : "\n WHERE pg_cast.oid > %u "
6969 : "\n AND p.oid = pg_cast.castfunc)"
6970 : "\n OR EXISTS (SELECT 1 FROM pg_transform"
6971 : "\n WHERE pg_transform.oid > %u AND "
6972 : "\n (p.oid = pg_transform.trffromsql"
6973 : "\n OR p.oid = pg_transform.trftosql))",
6974 : not_agg_check,
6975 : g_last_builtin_oid,
6976 : g_last_builtin_oid);
6977 370 : if (dopt->binary_upgrade)
6978 72 : appendPQExpBufferStr(query,
6979 : "\n OR EXISTS(SELECT 1 FROM pg_depend WHERE "
6980 : "classid = 'pg_proc'::regclass AND "
6981 : "objid = p.oid AND "
6982 : "refclassid = 'pg_extension'::regclass AND "
6983 : "deptype = 'e')");
6984 370 : appendPQExpBufferStr(query,
6985 : "\n OR p.proacl IS DISTINCT FROM pip.initprivs");
6986 370 : appendPQExpBufferChar(query, ')');
6987 : }
6988 : else
6989 : {
6990 0 : appendPQExpBuffer(query,
6991 : "SELECT tableoid, oid, proname, prolang, "
6992 : "pronargs, proargtypes, prorettype, proacl, "
6993 : "acldefault('f', proowner) AS acldefault, "
6994 : "pronamespace, "
6995 : "proowner "
6996 : "FROM pg_proc p "
6997 : "WHERE NOT proisagg"
6998 : "\n AND NOT EXISTS (SELECT 1 FROM pg_depend "
6999 : "WHERE classid = 'pg_proc'::regclass AND "
7000 : "objid = p.oid AND deptype = 'i')"
7001 : "\n AND ("
7002 : "\n pronamespace != "
7003 : "(SELECT oid FROM pg_namespace "
7004 : "WHERE nspname = 'pg_catalog')"
7005 : "\n OR EXISTS (SELECT 1 FROM pg_cast"
7006 : "\n WHERE pg_cast.oid > '%u'::oid"
7007 : "\n AND p.oid = pg_cast.castfunc)",
7008 : g_last_builtin_oid);
7009 :
7010 0 : if (fout->remoteVersion >= 90500)
7011 0 : appendPQExpBuffer(query,
7012 : "\n OR EXISTS (SELECT 1 FROM pg_transform"
7013 : "\n WHERE pg_transform.oid > '%u'::oid"
7014 : "\n AND (p.oid = pg_transform.trffromsql"
7015 : "\n OR p.oid = pg_transform.trftosql))",
7016 : g_last_builtin_oid);
7017 :
7018 0 : if (dopt->binary_upgrade)
7019 0 : appendPQExpBufferStr(query,
7020 : "\n OR EXISTS(SELECT 1 FROM pg_depend WHERE "
7021 : "classid = 'pg_proc'::regclass AND "
7022 : "objid = p.oid AND "
7023 : "refclassid = 'pg_extension'::regclass AND "
7024 : "deptype = 'e')");
7025 0 : appendPQExpBufferChar(query, ')');
7026 : }
7027 :
7028 370 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
7029 :
7030 370 : ntups = PQntuples(res);
7031 :
7032 370 : finfo = (FuncInfo *) pg_malloc0(ntups * sizeof(FuncInfo));
7033 :
7034 370 : i_tableoid = PQfnumber(res, "tableoid");
7035 370 : i_oid = PQfnumber(res, "oid");
7036 370 : i_proname = PQfnumber(res, "proname");
7037 370 : i_pronamespace = PQfnumber(res, "pronamespace");
7038 370 : i_proowner = PQfnumber(res, "proowner");
7039 370 : i_prolang = PQfnumber(res, "prolang");
7040 370 : i_pronargs = PQfnumber(res, "pronargs");
7041 370 : i_proargtypes = PQfnumber(res, "proargtypes");
7042 370 : i_prorettype = PQfnumber(res, "prorettype");
7043 370 : i_proacl = PQfnumber(res, "proacl");
7044 370 : i_acldefault = PQfnumber(res, "acldefault");
7045 :
7046 10000 : for (i = 0; i < ntups; i++)
7047 : {
7048 9630 : finfo[i].dobj.objType = DO_FUNC;
7049 9630 : finfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
7050 9630 : finfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
7051 9630 : AssignDumpId(&finfo[i].dobj);
7052 9630 : finfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_proname));
7053 19260 : finfo[i].dobj.namespace =
7054 9630 : findNamespace(atooid(PQgetvalue(res, i, i_pronamespace)));
7055 9630 : finfo[i].dacl.acl = pg_strdup(PQgetvalue(res, i, i_proacl));
7056 9630 : finfo[i].dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault));
7057 9630 : finfo[i].dacl.privtype = 0;
7058 9630 : finfo[i].dacl.initprivs = NULL;
7059 9630 : finfo[i].rolname = getRoleName(PQgetvalue(res, i, i_proowner));
7060 9630 : finfo[i].lang = atooid(PQgetvalue(res, i, i_prolang));
7061 9630 : finfo[i].prorettype = atooid(PQgetvalue(res, i, i_prorettype));
7062 9630 : finfo[i].nargs = atoi(PQgetvalue(res, i, i_pronargs));
7063 9630 : if (finfo[i].nargs == 0)
7064 2198 : finfo[i].argtypes = NULL;
7065 : else
7066 : {
7067 7432 : finfo[i].argtypes = (Oid *) pg_malloc(finfo[i].nargs * sizeof(Oid));
7068 7432 : parseOidArray(PQgetvalue(res, i, i_proargtypes),
7069 7432 : finfo[i].argtypes, finfo[i].nargs);
7070 : }
7071 9630 : finfo[i].postponed_def = false; /* might get set during sort */
7072 :
7073 : /* Decide whether we want to dump it */
7074 9630 : selectDumpableObject(&(finfo[i].dobj), fout);
7075 :
7076 : /* Mark whether function has an ACL */
7077 9630 : if (!PQgetisnull(res, i, i_proacl))
7078 304 : finfo[i].dobj.components |= DUMP_COMPONENT_ACL;
7079 : }
7080 :
7081 370 : PQclear(res);
7082 :
7083 370 : destroyPQExpBuffer(query);
7084 370 : }
7085 :
7086 : /*
7087 : * getRelationStatistics
7088 : * register the statistics object as a dependent of the relation.
7089 : *
7090 : * reltuples is passed as a string to avoid complexities in converting from/to
7091 : * floating point.
7092 : */
7093 : static RelStatsInfo *
7094 20184 : getRelationStatistics(Archive *fout, DumpableObject *rel, int32 relpages,
7095 : char *reltuples, int32 relallvisible,
7096 : int32 relallfrozen, char relkind,
7097 : char **indAttNames, int nindAttNames)
7098 : {
7099 20184 : if (!fout->dopt->dumpStatistics)
7100 12078 : return NULL;
7101 :
7102 8106 : if ((relkind == RELKIND_RELATION) ||
7103 3414 : (relkind == RELKIND_PARTITIONED_TABLE) ||
7104 2094 : (relkind == RELKIND_INDEX) ||
7105 1374 : (relkind == RELKIND_PARTITIONED_INDEX) ||
7106 598 : (relkind == RELKIND_MATVIEW ||
7107 : relkind == RELKIND_FOREIGN_TABLE))
7108 : {
7109 7584 : RelStatsInfo *info = pg_malloc0(sizeof(RelStatsInfo));
7110 7584 : DumpableObject *dobj = &info->dobj;
7111 :
7112 7584 : dobj->objType = DO_REL_STATS;
7113 7584 : dobj->catId.tableoid = 0;
7114 7584 : dobj->catId.oid = 0;
7115 7584 : AssignDumpId(dobj);
7116 7584 : dobj->dependencies = (DumpId *) pg_malloc(sizeof(DumpId));
7117 7584 : dobj->dependencies[0] = rel->dumpId;
7118 7584 : dobj->nDeps = 1;
7119 7584 : dobj->allocDeps = 1;
7120 7584 : dobj->components |= DUMP_COMPONENT_STATISTICS;
7121 7584 : dobj->name = pg_strdup(rel->name);
7122 7584 : dobj->namespace = rel->namespace;
7123 7584 : info->relpages = relpages;
7124 7584 : info->reltuples = pstrdup(reltuples);
7125 7584 : info->relallvisible = relallvisible;
7126 7584 : info->relallfrozen = relallfrozen;
7127 7584 : info->relkind = relkind;
7128 7584 : info->indAttNames = indAttNames;
7129 7584 : info->nindAttNames = nindAttNames;
7130 :
7131 : /*
7132 : * Ordinarily, stats go in SECTION_DATA for tables and
7133 : * SECTION_POST_DATA for indexes.
7134 : *
7135 : * However, the section may be updated later for materialized view
7136 : * stats. REFRESH MATERIALIZED VIEW replaces the storage and resets
7137 : * the stats, so the stats must be restored after the data. Also, the
7138 : * materialized view definition may be postponed to SECTION_POST_DATA
7139 : * (see repairMatViewBoundaryMultiLoop()).
7140 : */
7141 7584 : switch (info->relkind)
7142 : {
7143 5544 : case RELKIND_RELATION:
7144 : case RELKIND_PARTITIONED_TABLE:
7145 : case RELKIND_MATVIEW:
7146 : case RELKIND_FOREIGN_TABLE:
7147 5544 : info->section = SECTION_DATA;
7148 5544 : break;
7149 2040 : case RELKIND_INDEX:
7150 : case RELKIND_PARTITIONED_INDEX:
7151 2040 : info->section = SECTION_POST_DATA;
7152 2040 : break;
7153 0 : default:
7154 0 : pg_fatal("cannot dump statistics for relation kind \"%c\"",
7155 : info->relkind);
7156 : }
7157 :
7158 7584 : return info;
7159 : }
7160 522 : return NULL;
7161 : }
7162 :
7163 : /*
7164 : * getTables
7165 : * read all the tables (no indexes) in the system catalogs,
7166 : * and return them as an array of TableInfo structures
7167 : *
7168 : * *numTables is set to the number of tables read in
7169 : */
7170 : TableInfo *
7171 372 : getTables(Archive *fout, int *numTables)
7172 : {
7173 372 : DumpOptions *dopt = fout->dopt;
7174 : PGresult *res;
7175 : int ntups;
7176 : int i;
7177 372 : PQExpBuffer query = createPQExpBuffer();
7178 : TableInfo *tblinfo;
7179 : int i_reltableoid;
7180 : int i_reloid;
7181 : int i_relname;
7182 : int i_relnamespace;
7183 : int i_relkind;
7184 : int i_reltype;
7185 : int i_relowner;
7186 : int i_relchecks;
7187 : int i_relhasindex;
7188 : int i_relhasrules;
7189 : int i_relpages;
7190 : int i_reltuples;
7191 : int i_relallvisible;
7192 : int i_relallfrozen;
7193 : int i_toastpages;
7194 : int i_owning_tab;
7195 : int i_owning_col;
7196 : int i_reltablespace;
7197 : int i_relhasoids;
7198 : int i_relhastriggers;
7199 : int i_relpersistence;
7200 : int i_relispopulated;
7201 : int i_relreplident;
7202 : int i_relrowsec;
7203 : int i_relforcerowsec;
7204 : int i_relfrozenxid;
7205 : int i_toastfrozenxid;
7206 : int i_toastoid;
7207 : int i_relminmxid;
7208 : int i_toastminmxid;
7209 : int i_reloptions;
7210 : int i_checkoption;
7211 : int i_toastreloptions;
7212 : int i_reloftype;
7213 : int i_foreignserver;
7214 : int i_amname;
7215 : int i_is_identity_sequence;
7216 : int i_relacl;
7217 : int i_acldefault;
7218 : int i_ispartition;
7219 :
7220 : /*
7221 : * Find all the tables and table-like objects.
7222 : *
7223 : * We must fetch all tables in this phase because otherwise we cannot
7224 : * correctly identify inherited columns, owned sequences, etc.
7225 : *
7226 : * We include system catalogs, so that we can work if a user table is
7227 : * defined to inherit from a system catalog (pretty weird, but...)
7228 : *
7229 : * Note: in this phase we should collect only a minimal amount of
7230 : * information about each table, basically just enough to decide if it is
7231 : * interesting. In particular, since we do not yet have lock on any user
7232 : * table, we MUST NOT invoke any server-side data collection functions
7233 : * (for instance, pg_get_partkeydef()). Those are likely to fail or give
7234 : * wrong answers if any concurrent DDL is happening.
7235 : */
7236 :
7237 372 : appendPQExpBufferStr(query,
7238 : "SELECT c.tableoid, c.oid, c.relname, "
7239 : "c.relnamespace, c.relkind, c.reltype, "
7240 : "c.relowner, "
7241 : "c.relchecks, "
7242 : "c.relhasindex, c.relhasrules, c.relpages, "
7243 : "c.reltuples, c.relallvisible, ");
7244 :
7245 372 : if (fout->remoteVersion >= 180000)
7246 372 : appendPQExpBufferStr(query, "c.relallfrozen, ");
7247 : else
7248 0 : appendPQExpBufferStr(query, "0 AS relallfrozen, ");
7249 :
7250 372 : appendPQExpBufferStr(query,
7251 : "c.relhastriggers, c.relpersistence, "
7252 : "c.reloftype, "
7253 : "c.relacl, "
7254 : "acldefault(CASE WHEN c.relkind = " CppAsString2(RELKIND_SEQUENCE)
7255 : " THEN 's'::\"char\" ELSE 'r'::\"char\" END, c.relowner) AS acldefault, "
7256 : "CASE WHEN c.relkind = " CppAsString2(RELKIND_FOREIGN_TABLE) " THEN "
7257 : "(SELECT ftserver FROM pg_catalog.pg_foreign_table WHERE ftrelid = c.oid) "
7258 : "ELSE 0 END AS foreignserver, "
7259 : "c.relfrozenxid, tc.relfrozenxid AS tfrozenxid, "
7260 : "tc.oid AS toid, "
7261 : "tc.relpages AS toastpages, "
7262 : "tc.reloptions AS toast_reloptions, "
7263 : "d.refobjid AS owning_tab, "
7264 : "d.refobjsubid AS owning_col, "
7265 : "tsp.spcname AS reltablespace, ");
7266 :
7267 372 : if (fout->remoteVersion >= 120000)
7268 372 : appendPQExpBufferStr(query,
7269 : "false AS relhasoids, ");
7270 : else
7271 0 : appendPQExpBufferStr(query,
7272 : "c.relhasoids, ");
7273 :
7274 372 : if (fout->remoteVersion >= 90300)
7275 372 : appendPQExpBufferStr(query,
7276 : "c.relispopulated, ");
7277 : else
7278 0 : appendPQExpBufferStr(query,
7279 : "'t' as relispopulated, ");
7280 :
7281 372 : if (fout->remoteVersion >= 90400)
7282 372 : appendPQExpBufferStr(query,
7283 : "c.relreplident, ");
7284 : else
7285 0 : appendPQExpBufferStr(query,
7286 : "'d' AS relreplident, ");
7287 :
7288 372 : if (fout->remoteVersion >= 90500)
7289 372 : appendPQExpBufferStr(query,
7290 : "c.relrowsecurity, c.relforcerowsecurity, ");
7291 : else
7292 0 : appendPQExpBufferStr(query,
7293 : "false AS relrowsecurity, "
7294 : "false AS relforcerowsecurity, ");
7295 :
7296 372 : if (fout->remoteVersion >= 90300)
7297 372 : appendPQExpBufferStr(query,
7298 : "c.relminmxid, tc.relminmxid AS tminmxid, ");
7299 : else
7300 0 : appendPQExpBufferStr(query,
7301 : "0 AS relminmxid, 0 AS tminmxid, ");
7302 :
7303 372 : if (fout->remoteVersion >= 90300)
7304 372 : appendPQExpBufferStr(query,
7305 : "array_remove(array_remove(c.reloptions,'check_option=local'),'check_option=cascaded') AS reloptions, "
7306 : "CASE WHEN 'check_option=local' = ANY (c.reloptions) THEN 'LOCAL'::text "
7307 : "WHEN 'check_option=cascaded' = ANY (c.reloptions) THEN 'CASCADED'::text ELSE NULL END AS checkoption, ");
7308 : else
7309 0 : appendPQExpBufferStr(query,
7310 : "c.reloptions, NULL AS checkoption, ");
7311 :
7312 372 : if (fout->remoteVersion >= 90600)
7313 372 : appendPQExpBufferStr(query,
7314 : "am.amname, ");
7315 : else
7316 0 : appendPQExpBufferStr(query,
7317 : "NULL AS amname, ");
7318 :
7319 372 : if (fout->remoteVersion >= 90600)
7320 372 : appendPQExpBufferStr(query,
7321 : "(d.deptype = 'i') IS TRUE AS is_identity_sequence, ");
7322 : else
7323 0 : appendPQExpBufferStr(query,
7324 : "false AS is_identity_sequence, ");
7325 :
7326 372 : if (fout->remoteVersion >= 100000)
7327 372 : appendPQExpBufferStr(query,
7328 : "c.relispartition AS ispartition ");
7329 : else
7330 0 : appendPQExpBufferStr(query,
7331 : "false AS ispartition ");
7332 :
7333 : /*
7334 : * Left join to pg_depend to pick up dependency info linking sequences to
7335 : * their owning column, if any (note this dependency is AUTO except for
7336 : * identity sequences, where it's INTERNAL). Also join to pg_tablespace to
7337 : * collect the spcname.
7338 : */
7339 372 : appendPQExpBufferStr(query,
7340 : "\nFROM pg_class c\n"
7341 : "LEFT JOIN pg_depend d ON "
7342 : "(c.relkind = " CppAsString2(RELKIND_SEQUENCE) " AND "
7343 : "d.classid = 'pg_class'::regclass AND d.objid = c.oid AND "
7344 : "d.objsubid = 0 AND "
7345 : "d.refclassid = 'pg_class'::regclass AND d.deptype IN ('a', 'i'))\n"
7346 : "LEFT JOIN pg_tablespace tsp ON (tsp.oid = c.reltablespace)\n");
7347 :
7348 : /*
7349 : * In 9.6 and up, left join to pg_am to pick up the amname.
7350 : */
7351 372 : if (fout->remoteVersion >= 90600)
7352 372 : appendPQExpBufferStr(query,
7353 : "LEFT JOIN pg_am am ON (c.relam = am.oid)\n");
7354 :
7355 : /*
7356 : * We purposefully ignore toast OIDs for partitioned tables; the reason is
7357 : * that versions 10 and 11 have them, but later versions do not, so
7358 : * emitting them causes the upgrade to fail.
7359 : */
7360 372 : appendPQExpBufferStr(query,
7361 : "LEFT JOIN pg_class tc ON (c.reltoastrelid = tc.oid"
7362 : " AND tc.relkind = " CppAsString2(RELKIND_TOASTVALUE)
7363 : " AND c.relkind <> " CppAsString2(RELKIND_PARTITIONED_TABLE) ")\n");
7364 :
7365 : /*
7366 : * Restrict to interesting relkinds (in particular, not indexes). Not all
7367 : * relkinds are possible in older servers, but it's not worth the trouble
7368 : * to emit a version-dependent list.
7369 : *
7370 : * Composite-type table entries won't be dumped as such, but we have to
7371 : * make a DumpableObject for them so that we can track dependencies of the
7372 : * composite type (pg_depend entries for columns of the composite type
7373 : * link to the pg_class entry not the pg_type entry).
7374 : */
7375 372 : appendPQExpBufferStr(query,
7376 : "WHERE c.relkind IN ("
7377 : CppAsString2(RELKIND_RELATION) ", "
7378 : CppAsString2(RELKIND_SEQUENCE) ", "
7379 : CppAsString2(RELKIND_VIEW) ", "
7380 : CppAsString2(RELKIND_COMPOSITE_TYPE) ", "
7381 : CppAsString2(RELKIND_MATVIEW) ", "
7382 : CppAsString2(RELKIND_FOREIGN_TABLE) ", "
7383 : CppAsString2(RELKIND_PARTITIONED_TABLE) ")\n"
7384 : "ORDER BY c.oid");
7385 :
7386 372 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
7387 :
7388 372 : ntups = PQntuples(res);
7389 :
7390 372 : *numTables = ntups;
7391 :
7392 : /*
7393 : * Extract data from result and lock dumpable tables. We do the locking
7394 : * before anything else, to minimize the window wherein a table could
7395 : * disappear under us.
7396 : *
7397 : * Note that we have to save info about all tables here, even when dumping
7398 : * only one, because we don't yet know which tables might be inheritance
7399 : * ancestors of the target table.
7400 : */
7401 372 : tblinfo = (TableInfo *) pg_malloc0(ntups * sizeof(TableInfo));
7402 :
7403 372 : i_reltableoid = PQfnumber(res, "tableoid");
7404 372 : i_reloid = PQfnumber(res, "oid");
7405 372 : i_relname = PQfnumber(res, "relname");
7406 372 : i_relnamespace = PQfnumber(res, "relnamespace");
7407 372 : i_relkind = PQfnumber(res, "relkind");
7408 372 : i_reltype = PQfnumber(res, "reltype");
7409 372 : i_relowner = PQfnumber(res, "relowner");
7410 372 : i_relchecks = PQfnumber(res, "relchecks");
7411 372 : i_relhasindex = PQfnumber(res, "relhasindex");
7412 372 : i_relhasrules = PQfnumber(res, "relhasrules");
7413 372 : i_relpages = PQfnumber(res, "relpages");
7414 372 : i_reltuples = PQfnumber(res, "reltuples");
7415 372 : i_relallvisible = PQfnumber(res, "relallvisible");
7416 372 : i_relallfrozen = PQfnumber(res, "relallfrozen");
7417 372 : i_toastpages = PQfnumber(res, "toastpages");
7418 372 : i_owning_tab = PQfnumber(res, "owning_tab");
7419 372 : i_owning_col = PQfnumber(res, "owning_col");
7420 372 : i_reltablespace = PQfnumber(res, "reltablespace");
7421 372 : i_relhasoids = PQfnumber(res, "relhasoids");
7422 372 : i_relhastriggers = PQfnumber(res, "relhastriggers");
7423 372 : i_relpersistence = PQfnumber(res, "relpersistence");
7424 372 : i_relispopulated = PQfnumber(res, "relispopulated");
7425 372 : i_relreplident = PQfnumber(res, "relreplident");
7426 372 : i_relrowsec = PQfnumber(res, "relrowsecurity");
7427 372 : i_relforcerowsec = PQfnumber(res, "relforcerowsecurity");
7428 372 : i_relfrozenxid = PQfnumber(res, "relfrozenxid");
7429 372 : i_toastfrozenxid = PQfnumber(res, "tfrozenxid");
7430 372 : i_toastoid = PQfnumber(res, "toid");
7431 372 : i_relminmxid = PQfnumber(res, "relminmxid");
7432 372 : i_toastminmxid = PQfnumber(res, "tminmxid");
7433 372 : i_reloptions = PQfnumber(res, "reloptions");
7434 372 : i_checkoption = PQfnumber(res, "checkoption");
7435 372 : i_toastreloptions = PQfnumber(res, "toast_reloptions");
7436 372 : i_reloftype = PQfnumber(res, "reloftype");
7437 372 : i_foreignserver = PQfnumber(res, "foreignserver");
7438 372 : i_amname = PQfnumber(res, "amname");
7439 372 : i_is_identity_sequence = PQfnumber(res, "is_identity_sequence");
7440 372 : i_relacl = PQfnumber(res, "relacl");
7441 372 : i_acldefault = PQfnumber(res, "acldefault");
7442 372 : i_ispartition = PQfnumber(res, "ispartition");
7443 :
7444 372 : if (dopt->lockWaitTimeout)
7445 : {
7446 : /*
7447 : * Arrange to fail instead of waiting forever for a table lock.
7448 : *
7449 : * NB: this coding assumes that the only queries issued within the
7450 : * following loop are LOCK TABLEs; else the timeout may be undesirably
7451 : * applied to other things too.
7452 : */
7453 4 : resetPQExpBuffer(query);
7454 4 : appendPQExpBufferStr(query, "SET statement_timeout = ");
7455 4 : appendStringLiteralConn(query, dopt->lockWaitTimeout, GetConnection(fout));
7456 4 : ExecuteSqlStatement(fout, query->data);
7457 : }
7458 :
7459 372 : resetPQExpBuffer(query);
7460 :
7461 99374 : for (i = 0; i < ntups; i++)
7462 : {
7463 99002 : int32 relallvisible = atoi(PQgetvalue(res, i, i_relallvisible));
7464 99002 : int32 relallfrozen = atoi(PQgetvalue(res, i, i_relallfrozen));
7465 :
7466 99002 : tblinfo[i].dobj.objType = DO_TABLE;
7467 99002 : tblinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_reltableoid));
7468 99002 : tblinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_reloid));
7469 99002 : AssignDumpId(&tblinfo[i].dobj);
7470 99002 : tblinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_relname));
7471 198004 : tblinfo[i].dobj.namespace =
7472 99002 : findNamespace(atooid(PQgetvalue(res, i, i_relnamespace)));
7473 99002 : tblinfo[i].dacl.acl = pg_strdup(PQgetvalue(res, i, i_relacl));
7474 99002 : tblinfo[i].dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault));
7475 99002 : tblinfo[i].dacl.privtype = 0;
7476 99002 : tblinfo[i].dacl.initprivs = NULL;
7477 99002 : tblinfo[i].relkind = *(PQgetvalue(res, i, i_relkind));
7478 99002 : tblinfo[i].reltype = atooid(PQgetvalue(res, i, i_reltype));
7479 99002 : tblinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_relowner));
7480 99002 : tblinfo[i].ncheck = atoi(PQgetvalue(res, i, i_relchecks));
7481 99002 : tblinfo[i].hasindex = (strcmp(PQgetvalue(res, i, i_relhasindex), "t") == 0);
7482 99002 : tblinfo[i].hasrules = (strcmp(PQgetvalue(res, i, i_relhasrules), "t") == 0);
7483 99002 : tblinfo[i].relpages = atoi(PQgetvalue(res, i, i_relpages));
7484 99002 : if (PQgetisnull(res, i, i_toastpages))
7485 79400 : tblinfo[i].toastpages = 0;
7486 : else
7487 19602 : tblinfo[i].toastpages = atoi(PQgetvalue(res, i, i_toastpages));
7488 99002 : if (PQgetisnull(res, i, i_owning_tab))
7489 : {
7490 98136 : tblinfo[i].owning_tab = InvalidOid;
7491 98136 : tblinfo[i].owning_col = 0;
7492 : }
7493 : else
7494 : {
7495 866 : tblinfo[i].owning_tab = atooid(PQgetvalue(res, i, i_owning_tab));
7496 866 : tblinfo[i].owning_col = atoi(PQgetvalue(res, i, i_owning_col));
7497 : }
7498 99002 : tblinfo[i].reltablespace = pg_strdup(PQgetvalue(res, i, i_reltablespace));
7499 99002 : tblinfo[i].hasoids = (strcmp(PQgetvalue(res, i, i_relhasoids), "t") == 0);
7500 99002 : tblinfo[i].hastriggers = (strcmp(PQgetvalue(res, i, i_relhastriggers), "t") == 0);
7501 99002 : tblinfo[i].relpersistence = *(PQgetvalue(res, i, i_relpersistence));
7502 99002 : tblinfo[i].relispopulated = (strcmp(PQgetvalue(res, i, i_relispopulated), "t") == 0);
7503 99002 : tblinfo[i].relreplident = *(PQgetvalue(res, i, i_relreplident));
7504 99002 : tblinfo[i].rowsec = (strcmp(PQgetvalue(res, i, i_relrowsec), "t") == 0);
7505 99002 : tblinfo[i].forcerowsec = (strcmp(PQgetvalue(res, i, i_relforcerowsec), "t") == 0);
7506 99002 : tblinfo[i].frozenxid = atooid(PQgetvalue(res, i, i_relfrozenxid));
7507 99002 : tblinfo[i].toast_frozenxid = atooid(PQgetvalue(res, i, i_toastfrozenxid));
7508 99002 : tblinfo[i].toast_oid = atooid(PQgetvalue(res, i, i_toastoid));
7509 99002 : tblinfo[i].minmxid = atooid(PQgetvalue(res, i, i_relminmxid));
7510 99002 : tblinfo[i].toast_minmxid = atooid(PQgetvalue(res, i, i_toastminmxid));
7511 99002 : tblinfo[i].reloptions = pg_strdup(PQgetvalue(res, i, i_reloptions));
7512 99002 : if (PQgetisnull(res, i, i_checkoption))
7513 98898 : tblinfo[i].checkoption = NULL;
7514 : else
7515 104 : tblinfo[i].checkoption = pg_strdup(PQgetvalue(res, i, i_checkoption));
7516 99002 : tblinfo[i].toast_reloptions = pg_strdup(PQgetvalue(res, i, i_toastreloptions));
7517 99002 : tblinfo[i].reloftype = atooid(PQgetvalue(res, i, i_reloftype));
7518 99002 : tblinfo[i].foreign_server = atooid(PQgetvalue(res, i, i_foreignserver));
7519 99002 : if (PQgetisnull(res, i, i_amname))
7520 58848 : tblinfo[i].amname = NULL;
7521 : else
7522 40154 : tblinfo[i].amname = pg_strdup(PQgetvalue(res, i, i_amname));
7523 99002 : tblinfo[i].is_identity_sequence = (strcmp(PQgetvalue(res, i, i_is_identity_sequence), "t") == 0);
7524 99002 : tblinfo[i].ispartition = (strcmp(PQgetvalue(res, i, i_ispartition), "t") == 0);
7525 :
7526 : /* other fields were zeroed above */
7527 :
7528 : /*
7529 : * Decide whether we want to dump this table.
7530 : */
7531 99002 : if (tblinfo[i].relkind == RELKIND_COMPOSITE_TYPE)
7532 378 : tblinfo[i].dobj.dump = DUMP_COMPONENT_NONE;
7533 : else
7534 98624 : selectDumpableTable(&tblinfo[i], fout);
7535 :
7536 : /*
7537 : * Now, consider the table "interesting" if we need to dump its
7538 : * definition, data or its statistics. Later on, we'll skip a lot of
7539 : * data collection for uninteresting tables.
7540 : *
7541 : * Note: the "interesting" flag will also be set by flagInhTables for
7542 : * parents of interesting tables, so that we collect necessary
7543 : * inheritance info even when the parents are not themselves being
7544 : * dumped. This is the main reason why we need an "interesting" flag
7545 : * that's separate from the components-to-dump bitmask.
7546 : */
7547 99002 : tblinfo[i].interesting = (tblinfo[i].dobj.dump &
7548 : (DUMP_COMPONENT_DEFINITION |
7549 : DUMP_COMPONENT_DATA |
7550 99002 : DUMP_COMPONENT_STATISTICS)) != 0;
7551 :
7552 99002 : tblinfo[i].dummy_view = false; /* might get set during sort */
7553 99002 : tblinfo[i].postponed_def = false; /* might get set during sort */
7554 :
7555 : /* Tables have data */
7556 99002 : tblinfo[i].dobj.components |= DUMP_COMPONENT_DATA;
7557 :
7558 : /* Mark whether table has an ACL */
7559 99002 : if (!PQgetisnull(res, i, i_relacl))
7560 78584 : tblinfo[i].dobj.components |= DUMP_COMPONENT_ACL;
7561 99002 : tblinfo[i].hascolumnACLs = false; /* may get set later */
7562 :
7563 : /* Add statistics */
7564 99002 : if (tblinfo[i].interesting)
7565 : {
7566 : RelStatsInfo *stats;
7567 :
7568 29580 : stats = getRelationStatistics(fout, &tblinfo[i].dobj,
7569 14790 : tblinfo[i].relpages,
7570 : PQgetvalue(res, i, i_reltuples),
7571 : relallvisible, relallfrozen,
7572 14790 : tblinfo[i].relkind, NULL, 0);
7573 14790 : if (tblinfo[i].relkind == RELKIND_MATVIEW)
7574 980 : tblinfo[i].stats = stats;
7575 : }
7576 :
7577 : /*
7578 : * Read-lock target tables to make sure they aren't DROPPED or altered
7579 : * in schema before we get around to dumping them.
7580 : *
7581 : * Note that we don't explicitly lock parents of the target tables; we
7582 : * assume our lock on the child is enough to prevent schema
7583 : * alterations to parent tables.
7584 : *
7585 : * NOTE: it'd be kinda nice to lock other relations too, not only
7586 : * plain or partitioned tables, but the backend doesn't presently
7587 : * allow that.
7588 : *
7589 : * We only need to lock the table for certain components; see
7590 : * pg_dump.h
7591 : */
7592 99002 : if ((tblinfo[i].dobj.dump & DUMP_COMPONENTS_REQUIRING_LOCK) &&
7593 14790 : (tblinfo[i].relkind == RELKIND_RELATION ||
7594 4230 : tblinfo[i].relkind == RELKIND_PARTITIONED_TABLE))
7595 : {
7596 : /*
7597 : * Tables are locked in batches. When dumping from a remote
7598 : * server this can save a significant amount of time by reducing
7599 : * the number of round trips.
7600 : */
7601 11796 : if (query->len == 0)
7602 244 : appendPQExpBuffer(query, "LOCK TABLE %s",
7603 244 : fmtQualifiedDumpable(&tblinfo[i]));
7604 : else
7605 : {
7606 11552 : appendPQExpBuffer(query, ", %s",
7607 11552 : fmtQualifiedDumpable(&tblinfo[i]));
7608 :
7609 : /* Arbitrarily end a batch when query length reaches 100K. */
7610 11552 : if (query->len >= 100000)
7611 : {
7612 : /* Lock another batch of tables. */
7613 0 : appendPQExpBufferStr(query, " IN ACCESS SHARE MODE");
7614 0 : ExecuteSqlStatement(fout, query->data);
7615 0 : resetPQExpBuffer(query);
7616 : }
7617 : }
7618 : }
7619 : }
7620 :
7621 372 : if (query->len != 0)
7622 : {
7623 : /* Lock the tables in the last batch. */
7624 244 : appendPQExpBufferStr(query, " IN ACCESS SHARE MODE");
7625 244 : ExecuteSqlStatement(fout, query->data);
7626 : }
7627 :
7628 370 : if (dopt->lockWaitTimeout)
7629 : {
7630 4 : ExecuteSqlStatement(fout, "SET statement_timeout = 0");
7631 : }
7632 :
7633 370 : PQclear(res);
7634 :
7635 370 : destroyPQExpBuffer(query);
7636 :
7637 370 : return tblinfo;
7638 : }
7639 :
7640 : /*
7641 : * getOwnedSeqs
7642 : * identify owned sequences and mark them as dumpable if owning table is
7643 : *
7644 : * We used to do this in getTables(), but it's better to do it after the
7645 : * index used by findTableByOid() has been set up.
7646 : */
7647 : void
7648 370 : getOwnedSeqs(Archive *fout, TableInfo tblinfo[], int numTables)
7649 : {
7650 : int i;
7651 :
7652 : /*
7653 : * Force sequences that are "owned" by table columns to be dumped whenever
7654 : * their owning table is being dumped.
7655 : */
7656 98826 : for (i = 0; i < numTables; i++)
7657 : {
7658 98456 : TableInfo *seqinfo = &tblinfo[i];
7659 : TableInfo *owning_tab;
7660 :
7661 98456 : if (!OidIsValid(seqinfo->owning_tab))
7662 97596 : continue; /* not an owned sequence */
7663 :
7664 860 : owning_tab = findTableByOid(seqinfo->owning_tab);
7665 860 : if (owning_tab == NULL)
7666 0 : pg_fatal("failed sanity check, parent table with OID %u of sequence with OID %u not found",
7667 : seqinfo->owning_tab, seqinfo->dobj.catId.oid);
7668 :
7669 : /*
7670 : * For an identity sequence, dump exactly the same components for the
7671 : * sequence as for the owning table. This is important because we
7672 : * treat the identity sequence as an integral part of the table. For
7673 : * example, there is not any DDL command that allows creation of such
7674 : * a sequence independently of the table.
7675 : *
7676 : * For other owned sequences such as serial sequences, we need to dump
7677 : * the components that are being dumped for the table and any
7678 : * components that the sequence is explicitly marked with.
7679 : *
7680 : * We can't simply use the set of components which are being dumped
7681 : * for the table as the table might be in an extension (and only the
7682 : * non-extension components, eg: ACLs if changed, security labels, and
7683 : * policies, are being dumped) while the sequence is not (and
7684 : * therefore the definition and other components should also be
7685 : * dumped).
7686 : *
7687 : * If the sequence is part of the extension then it should be properly
7688 : * marked by checkExtensionMembership() and this will be a no-op as
7689 : * the table will be equivalently marked.
7690 : */
7691 860 : if (seqinfo->is_identity_sequence)
7692 410 : seqinfo->dobj.dump = owning_tab->dobj.dump;
7693 : else
7694 450 : seqinfo->dobj.dump |= owning_tab->dobj.dump;
7695 :
7696 : /* Make sure that necessary data is available if we're dumping it */
7697 860 : if (seqinfo->dobj.dump != DUMP_COMPONENT_NONE)
7698 : {
7699 668 : seqinfo->interesting = true;
7700 668 : owning_tab->interesting = true;
7701 : }
7702 : }
7703 370 : }
7704 :
7705 : /*
7706 : * getInherits
7707 : * read all the inheritance information
7708 : * from the system catalogs return them in the InhInfo* structure
7709 : *
7710 : * numInherits is set to the number of pairs read in
7711 : */
7712 : InhInfo *
7713 370 : getInherits(Archive *fout, int *numInherits)
7714 : {
7715 : PGresult *res;
7716 : int ntups;
7717 : int i;
7718 370 : PQExpBuffer query = createPQExpBuffer();
7719 : InhInfo *inhinfo;
7720 :
7721 : int i_inhrelid;
7722 : int i_inhparent;
7723 :
7724 : /* find all the inheritance information */
7725 370 : appendPQExpBufferStr(query, "SELECT inhrelid, inhparent FROM pg_inherits");
7726 :
7727 370 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
7728 :
7729 370 : ntups = PQntuples(res);
7730 :
7731 370 : *numInherits = ntups;
7732 :
7733 370 : inhinfo = (InhInfo *) pg_malloc(ntups * sizeof(InhInfo));
7734 :
7735 370 : i_inhrelid = PQfnumber(res, "inhrelid");
7736 370 : i_inhparent = PQfnumber(res, "inhparent");
7737 :
7738 7380 : for (i = 0; i < ntups; i++)
7739 : {
7740 7010 : inhinfo[i].inhrelid = atooid(PQgetvalue(res, i, i_inhrelid));
7741 7010 : inhinfo[i].inhparent = atooid(PQgetvalue(res, i, i_inhparent));
7742 : }
7743 :
7744 370 : PQclear(res);
7745 :
7746 370 : destroyPQExpBuffer(query);
7747 :
7748 370 : return inhinfo;
7749 : }
7750 :
7751 : /*
7752 : * getPartitioningInfo
7753 : * get information about partitioning
7754 : *
7755 : * For the most part, we only collect partitioning info about tables we
7756 : * intend to dump. However, this function has to consider all partitioned
7757 : * tables in the database, because we need to know about parents of partitions
7758 : * we are going to dump even if the parents themselves won't be dumped.
7759 : *
7760 : * Specifically, what we need to know is whether each partitioned table
7761 : * has an "unsafe" partitioning scheme that requires us to force
7762 : * load-via-partition-root mode for its children. Currently the only case
7763 : * for which we force that is hash partitioning on enum columns, since the
7764 : * hash codes depend on enum value OIDs which won't be replicated across
7765 : * dump-and-reload. There are other cases in which load-via-partition-root
7766 : * might be necessary, but we expect users to cope with them.
7767 : */
7768 : void
7769 370 : getPartitioningInfo(Archive *fout)
7770 : {
7771 : PQExpBuffer query;
7772 : PGresult *res;
7773 : int ntups;
7774 :
7775 : /* hash partitioning didn't exist before v11 */
7776 370 : if (fout->remoteVersion < 110000)
7777 0 : return;
7778 : /* needn't bother if not dumping data */
7779 370 : if (!fout->dopt->dumpData)
7780 80 : return;
7781 :
7782 290 : query = createPQExpBuffer();
7783 :
7784 : /*
7785 : * Unsafe partitioning schemes are exactly those for which hash enum_ops
7786 : * appears among the partition opclasses. We needn't check partstrat.
7787 : *
7788 : * Note that this query may well retrieve info about tables we aren't
7789 : * going to dump and hence have no lock on. That's okay since we need not
7790 : * invoke any unsafe server-side functions.
7791 : */
7792 290 : appendPQExpBufferStr(query,
7793 : "SELECT partrelid FROM pg_partitioned_table WHERE\n"
7794 : "(SELECT c.oid FROM pg_opclass c JOIN pg_am a "
7795 : "ON c.opcmethod = a.oid\n"
7796 : "WHERE opcname = 'enum_ops' "
7797 : "AND opcnamespace = 'pg_catalog'::regnamespace "
7798 : "AND amname = 'hash') = ANY(partclass)");
7799 :
7800 290 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
7801 :
7802 290 : ntups = PQntuples(res);
7803 :
7804 388 : for (int i = 0; i < ntups; i++)
7805 : {
7806 98 : Oid tabrelid = atooid(PQgetvalue(res, i, 0));
7807 : TableInfo *tbinfo;
7808 :
7809 98 : tbinfo = findTableByOid(tabrelid);
7810 98 : if (tbinfo == NULL)
7811 0 : pg_fatal("failed sanity check, table OID %u appearing in pg_partitioned_table not found",
7812 : tabrelid);
7813 98 : tbinfo->unsafe_partitions = true;
7814 : }
7815 :
7816 290 : PQclear(res);
7817 :
7818 290 : destroyPQExpBuffer(query);
7819 : }
7820 :
7821 : /*
7822 : * getIndexes
7823 : * get information about every index on a dumpable table
7824 : *
7825 : * Note: index data is not returned directly to the caller, but it
7826 : * does get entered into the DumpableObject tables.
7827 : */
7828 : void
7829 370 : getIndexes(Archive *fout, TableInfo tblinfo[], int numTables)
7830 : {
7831 370 : PQExpBuffer query = createPQExpBuffer();
7832 370 : PQExpBuffer tbloids = createPQExpBuffer();
7833 : PGresult *res;
7834 : int ntups;
7835 : int curtblindx;
7836 : IndxInfo *indxinfo;
7837 : int i_tableoid,
7838 : i_oid,
7839 : i_indrelid,
7840 : i_indexname,
7841 : i_relpages,
7842 : i_reltuples,
7843 : i_relallvisible,
7844 : i_relallfrozen,
7845 : i_parentidx,
7846 : i_indexdef,
7847 : i_indnkeyatts,
7848 : i_indnatts,
7849 : i_indkey,
7850 : i_indisclustered,
7851 : i_indisreplident,
7852 : i_indnullsnotdistinct,
7853 : i_contype,
7854 : i_conname,
7855 : i_condeferrable,
7856 : i_condeferred,
7857 : i_conperiod,
7858 : i_contableoid,
7859 : i_conoid,
7860 : i_condef,
7861 : i_indattnames,
7862 : i_tablespace,
7863 : i_indreloptions,
7864 : i_indstatcols,
7865 : i_indstatvals;
7866 :
7867 : /*
7868 : * We want to perform just one query against pg_index. However, we
7869 : * mustn't try to select every row of the catalog and then sort it out on
7870 : * the client side, because some of the server-side functions we need
7871 : * would be unsafe to apply to tables we don't have lock on. Hence, we
7872 : * build an array of the OIDs of tables we care about (and now have lock
7873 : * on!), and use a WHERE clause to constrain which rows are selected.
7874 : */
7875 370 : appendPQExpBufferChar(tbloids, '{');
7876 98826 : for (int i = 0; i < numTables; i++)
7877 : {
7878 98456 : TableInfo *tbinfo = &tblinfo[i];
7879 :
7880 98456 : if (!tbinfo->hasindex)
7881 69852 : continue;
7882 :
7883 : /*
7884 : * We can ignore indexes of uninteresting tables.
7885 : */
7886 28604 : if (!tbinfo->interesting)
7887 24454 : continue;
7888 :
7889 : /* OK, we need info for this table */
7890 4150 : if (tbloids->len > 1) /* do we have more than the '{'? */
7891 3982 : appendPQExpBufferChar(tbloids, ',');
7892 4150 : appendPQExpBuffer(tbloids, "%u", tbinfo->dobj.catId.oid);
7893 : }
7894 370 : appendPQExpBufferChar(tbloids, '}');
7895 :
7896 370 : appendPQExpBufferStr(query,
7897 : "SELECT t.tableoid, t.oid, i.indrelid, "
7898 : "t.relname AS indexname, "
7899 : "t.relpages, t.reltuples, t.relallvisible, ");
7900 :
7901 370 : if (fout->remoteVersion >= 180000)
7902 370 : appendPQExpBufferStr(query, "t.relallfrozen, ");
7903 : else
7904 0 : appendPQExpBufferStr(query, "0 AS relallfrozen, ");
7905 :
7906 370 : appendPQExpBufferStr(query,
7907 : "pg_catalog.pg_get_indexdef(i.indexrelid) AS indexdef, "
7908 : "i.indkey, i.indisclustered, "
7909 : "c.contype, c.conname, "
7910 : "c.condeferrable, c.condeferred, "
7911 : "c.tableoid AS contableoid, "
7912 : "c.oid AS conoid, "
7913 : "pg_catalog.pg_get_constraintdef(c.oid, false) AS condef, "
7914 : "CASE WHEN i.indexprs IS NOT NULL THEN "
7915 : "(SELECT pg_catalog.array_agg(attname ORDER BY attnum)"
7916 : " FROM pg_catalog.pg_attribute "
7917 : " WHERE attrelid = i.indexrelid) "
7918 : "ELSE NULL END AS indattnames, "
7919 : "(SELECT spcname FROM pg_catalog.pg_tablespace s WHERE s.oid = t.reltablespace) AS tablespace, "
7920 : "t.reloptions AS indreloptions, ");
7921 :
7922 :
7923 370 : if (fout->remoteVersion >= 90400)
7924 370 : appendPQExpBufferStr(query,
7925 : "i.indisreplident, ");
7926 : else
7927 0 : appendPQExpBufferStr(query,
7928 : "false AS indisreplident, ");
7929 :
7930 370 : if (fout->remoteVersion >= 110000)
7931 370 : appendPQExpBufferStr(query,
7932 : "inh.inhparent AS parentidx, "
7933 : "i.indnkeyatts AS indnkeyatts, "
7934 : "i.indnatts AS indnatts, "
7935 : "(SELECT pg_catalog.array_agg(attnum ORDER BY attnum) "
7936 : " FROM pg_catalog.pg_attribute "
7937 : " WHERE attrelid = i.indexrelid AND "
7938 : " attstattarget >= 0) AS indstatcols, "
7939 : "(SELECT pg_catalog.array_agg(attstattarget ORDER BY attnum) "
7940 : " FROM pg_catalog.pg_attribute "
7941 : " WHERE attrelid = i.indexrelid AND "
7942 : " attstattarget >= 0) AS indstatvals, ");
7943 : else
7944 0 : appendPQExpBufferStr(query,
7945 : "0 AS parentidx, "
7946 : "i.indnatts AS indnkeyatts, "
7947 : "i.indnatts AS indnatts, "
7948 : "'' AS indstatcols, "
7949 : "'' AS indstatvals, ");
7950 :
7951 370 : if (fout->remoteVersion >= 150000)
7952 370 : appendPQExpBufferStr(query,
7953 : "i.indnullsnotdistinct, ");
7954 : else
7955 0 : appendPQExpBufferStr(query,
7956 : "false AS indnullsnotdistinct, ");
7957 :
7958 370 : if (fout->remoteVersion >= 180000)
7959 370 : appendPQExpBufferStr(query,
7960 : "c.conperiod ");
7961 : else
7962 0 : appendPQExpBufferStr(query,
7963 : "NULL AS conperiod ");
7964 :
7965 : /*
7966 : * The point of the messy-looking outer join is to find a constraint that
7967 : * is related by an internal dependency link to the index. If we find one,
7968 : * create a CONSTRAINT entry linked to the INDEX entry. We assume an
7969 : * index won't have more than one internal dependency.
7970 : *
7971 : * Note: the check on conrelid is redundant, but useful because that
7972 : * column is indexed while conindid is not.
7973 : */
7974 370 : if (fout->remoteVersion >= 110000)
7975 : {
7976 370 : appendPQExpBuffer(query,
7977 : "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
7978 : "JOIN pg_catalog.pg_index i ON (src.tbloid = i.indrelid) "
7979 : "JOIN pg_catalog.pg_class t ON (t.oid = i.indexrelid) "
7980 : "JOIN pg_catalog.pg_class t2 ON (t2.oid = i.indrelid) "
7981 : "LEFT JOIN pg_catalog.pg_constraint c "
7982 : "ON (i.indrelid = c.conrelid AND "
7983 : "i.indexrelid = c.conindid AND "
7984 : "c.contype IN ('p','u','x')) "
7985 : "LEFT JOIN pg_catalog.pg_inherits inh "
7986 : "ON (inh.inhrelid = indexrelid) "
7987 : "WHERE (i.indisvalid OR t2.relkind = 'p') "
7988 : "AND i.indisready "
7989 : "ORDER BY i.indrelid, indexname",
7990 : tbloids->data);
7991 : }
7992 : else
7993 : {
7994 : /*
7995 : * the test on indisready is necessary in 9.2, and harmless in
7996 : * earlier/later versions
7997 : */
7998 0 : appendPQExpBuffer(query,
7999 : "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
8000 : "JOIN pg_catalog.pg_index i ON (src.tbloid = i.indrelid) "
8001 : "JOIN pg_catalog.pg_class t ON (t.oid = i.indexrelid) "
8002 : "LEFT JOIN pg_catalog.pg_constraint c "
8003 : "ON (i.indrelid = c.conrelid AND "
8004 : "i.indexrelid = c.conindid AND "
8005 : "c.contype IN ('p','u','x')) "
8006 : "WHERE i.indisvalid AND i.indisready "
8007 : "ORDER BY i.indrelid, indexname",
8008 : tbloids->data);
8009 : }
8010 :
8011 370 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
8012 :
8013 370 : ntups = PQntuples(res);
8014 :
8015 370 : i_tableoid = PQfnumber(res, "tableoid");
8016 370 : i_oid = PQfnumber(res, "oid");
8017 370 : i_indrelid = PQfnumber(res, "indrelid");
8018 370 : i_indexname = PQfnumber(res, "indexname");
8019 370 : i_relpages = PQfnumber(res, "relpages");
8020 370 : i_reltuples = PQfnumber(res, "reltuples");
8021 370 : i_relallvisible = PQfnumber(res, "relallvisible");
8022 370 : i_relallfrozen = PQfnumber(res, "relallfrozen");
8023 370 : i_parentidx = PQfnumber(res, "parentidx");
8024 370 : i_indexdef = PQfnumber(res, "indexdef");
8025 370 : i_indnkeyatts = PQfnumber(res, "indnkeyatts");
8026 370 : i_indnatts = PQfnumber(res, "indnatts");
8027 370 : i_indkey = PQfnumber(res, "indkey");
8028 370 : i_indisclustered = PQfnumber(res, "indisclustered");
8029 370 : i_indisreplident = PQfnumber(res, "indisreplident");
8030 370 : i_indnullsnotdistinct = PQfnumber(res, "indnullsnotdistinct");
8031 370 : i_contype = PQfnumber(res, "contype");
8032 370 : i_conname = PQfnumber(res, "conname");
8033 370 : i_condeferrable = PQfnumber(res, "condeferrable");
8034 370 : i_condeferred = PQfnumber(res, "condeferred");
8035 370 : i_conperiod = PQfnumber(res, "conperiod");
8036 370 : i_contableoid = PQfnumber(res, "contableoid");
8037 370 : i_conoid = PQfnumber(res, "conoid");
8038 370 : i_condef = PQfnumber(res, "condef");
8039 370 : i_indattnames = PQfnumber(res, "indattnames");
8040 370 : i_tablespace = PQfnumber(res, "tablespace");
8041 370 : i_indreloptions = PQfnumber(res, "indreloptions");
8042 370 : i_indstatcols = PQfnumber(res, "indstatcols");
8043 370 : i_indstatvals = PQfnumber(res, "indstatvals");
8044 :
8045 370 : indxinfo = (IndxInfo *) pg_malloc(ntups * sizeof(IndxInfo));
8046 :
8047 : /*
8048 : * Outer loop iterates once per table, not once per row. Incrementing of
8049 : * j is handled by the inner loop.
8050 : */
8051 370 : curtblindx = -1;
8052 4480 : for (int j = 0; j < ntups;)
8053 : {
8054 4110 : Oid indrelid = atooid(PQgetvalue(res, j, i_indrelid));
8055 4110 : TableInfo *tbinfo = NULL;
8056 4110 : char **indAttNames = NULL;
8057 4110 : int nindAttNames = 0;
8058 : int numinds;
8059 :
8060 : /* Count rows for this table */
8061 5394 : for (numinds = 1; numinds < ntups - j; numinds++)
8062 5226 : if (atooid(PQgetvalue(res, j + numinds, i_indrelid)) != indrelid)
8063 3942 : break;
8064 :
8065 : /*
8066 : * Locate the associated TableInfo; we rely on tblinfo[] being in OID
8067 : * order.
8068 : */
8069 49262 : while (++curtblindx < numTables)
8070 : {
8071 49262 : tbinfo = &tblinfo[curtblindx];
8072 49262 : if (tbinfo->dobj.catId.oid == indrelid)
8073 4110 : break;
8074 : }
8075 4110 : if (curtblindx >= numTables)
8076 0 : pg_fatal("unrecognized table OID %u", indrelid);
8077 : /* cross-check that we only got requested tables */
8078 4110 : if (!tbinfo->hasindex ||
8079 4110 : !tbinfo->interesting)
8080 0 : pg_fatal("unexpected index data for table \"%s\"",
8081 : tbinfo->dobj.name);
8082 :
8083 : /* Save data for this table */
8084 4110 : tbinfo->indexes = indxinfo + j;
8085 4110 : tbinfo->numIndexes = numinds;
8086 :
8087 9504 : for (int c = 0; c < numinds; c++, j++)
8088 : {
8089 : char contype;
8090 : char indexkind;
8091 : RelStatsInfo *relstats;
8092 5394 : int32 relpages = atoi(PQgetvalue(res, j, i_relpages));
8093 5394 : int32 relallvisible = atoi(PQgetvalue(res, j, i_relallvisible));
8094 5394 : int32 relallfrozen = atoi(PQgetvalue(res, j, i_relallfrozen));
8095 :
8096 5394 : indxinfo[j].dobj.objType = DO_INDEX;
8097 5394 : indxinfo[j].dobj.catId.tableoid = atooid(PQgetvalue(res, j, i_tableoid));
8098 5394 : indxinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
8099 5394 : AssignDumpId(&indxinfo[j].dobj);
8100 5394 : indxinfo[j].dobj.dump = tbinfo->dobj.dump;
8101 5394 : indxinfo[j].dobj.name = pg_strdup(PQgetvalue(res, j, i_indexname));
8102 5394 : indxinfo[j].dobj.namespace = tbinfo->dobj.namespace;
8103 5394 : indxinfo[j].indextable = tbinfo;
8104 5394 : indxinfo[j].indexdef = pg_strdup(PQgetvalue(res, j, i_indexdef));
8105 5394 : indxinfo[j].indnkeyattrs = atoi(PQgetvalue(res, j, i_indnkeyatts));
8106 5394 : indxinfo[j].indnattrs = atoi(PQgetvalue(res, j, i_indnatts));
8107 5394 : indxinfo[j].tablespace = pg_strdup(PQgetvalue(res, j, i_tablespace));
8108 5394 : indxinfo[j].indreloptions = pg_strdup(PQgetvalue(res, j, i_indreloptions));
8109 5394 : indxinfo[j].indstatcols = pg_strdup(PQgetvalue(res, j, i_indstatcols));
8110 5394 : indxinfo[j].indstatvals = pg_strdup(PQgetvalue(res, j, i_indstatvals));
8111 5394 : indxinfo[j].indkeys = (Oid *) pg_malloc(indxinfo[j].indnattrs * sizeof(Oid));
8112 5394 : parseOidArray(PQgetvalue(res, j, i_indkey),
8113 5394 : indxinfo[j].indkeys, indxinfo[j].indnattrs);
8114 5394 : indxinfo[j].indisclustered = (PQgetvalue(res, j, i_indisclustered)[0] == 't');
8115 5394 : indxinfo[j].indisreplident = (PQgetvalue(res, j, i_indisreplident)[0] == 't');
8116 5394 : indxinfo[j].indnullsnotdistinct = (PQgetvalue(res, j, i_indnullsnotdistinct)[0] == 't');
8117 5394 : indxinfo[j].parentidx = atooid(PQgetvalue(res, j, i_parentidx));
8118 5394 : indxinfo[j].partattaches = (SimplePtrList)
8119 : {
8120 : NULL, NULL
8121 : };
8122 :
8123 5394 : if (indxinfo[j].parentidx == 0)
8124 4170 : indexkind = RELKIND_INDEX;
8125 : else
8126 1224 : indexkind = RELKIND_PARTITIONED_INDEX;
8127 :
8128 5394 : if (!PQgetisnull(res, j, i_indattnames))
8129 : {
8130 316 : if (!parsePGArray(PQgetvalue(res, j, i_indattnames),
8131 : &indAttNames, &nindAttNames))
8132 0 : pg_fatal("could not parse %s array", "indattnames");
8133 : }
8134 :
8135 5394 : relstats = getRelationStatistics(fout, &indxinfo[j].dobj, relpages,
8136 : PQgetvalue(res, j, i_reltuples),
8137 : relallvisible, relallfrozen, indexkind,
8138 : indAttNames, nindAttNames);
8139 :
8140 5394 : contype = *(PQgetvalue(res, j, i_contype));
8141 5394 : if (contype == 'p' || contype == 'u' || contype == 'x')
8142 3146 : {
8143 : /*
8144 : * If we found a constraint matching the index, create an
8145 : * entry for it.
8146 : */
8147 : ConstraintInfo *constrinfo;
8148 :
8149 3146 : constrinfo = (ConstraintInfo *) pg_malloc(sizeof(ConstraintInfo));
8150 3146 : constrinfo->dobj.objType = DO_CONSTRAINT;
8151 3146 : constrinfo->dobj.catId.tableoid = atooid(PQgetvalue(res, j, i_contableoid));
8152 3146 : constrinfo->dobj.catId.oid = atooid(PQgetvalue(res, j, i_conoid));
8153 3146 : AssignDumpId(&constrinfo->dobj);
8154 3146 : constrinfo->dobj.dump = tbinfo->dobj.dump;
8155 3146 : constrinfo->dobj.name = pg_strdup(PQgetvalue(res, j, i_conname));
8156 3146 : constrinfo->dobj.namespace = tbinfo->dobj.namespace;
8157 3146 : constrinfo->contable = tbinfo;
8158 3146 : constrinfo->condomain = NULL;
8159 3146 : constrinfo->contype = contype;
8160 3146 : if (contype == 'x')
8161 20 : constrinfo->condef = pg_strdup(PQgetvalue(res, j, i_condef));
8162 : else
8163 3126 : constrinfo->condef = NULL;
8164 3146 : constrinfo->confrelid = InvalidOid;
8165 3146 : constrinfo->conindex = indxinfo[j].dobj.dumpId;
8166 3146 : constrinfo->condeferrable = *(PQgetvalue(res, j, i_condeferrable)) == 't';
8167 3146 : constrinfo->condeferred = *(PQgetvalue(res, j, i_condeferred)) == 't';
8168 3146 : constrinfo->conperiod = *(PQgetvalue(res, j, i_conperiod)) == 't';
8169 3146 : constrinfo->conislocal = true;
8170 3146 : constrinfo->separate = true;
8171 :
8172 3146 : indxinfo[j].indexconstraint = constrinfo->dobj.dumpId;
8173 3146 : if (relstats != NULL)
8174 1196 : addObjectDependency(&relstats->dobj, constrinfo->dobj.dumpId);
8175 : }
8176 : else
8177 : {
8178 : /* Plain secondary index */
8179 2248 : indxinfo[j].indexconstraint = 0;
8180 : }
8181 : }
8182 : }
8183 :
8184 370 : PQclear(res);
8185 :
8186 370 : destroyPQExpBuffer(query);
8187 370 : destroyPQExpBuffer(tbloids);
8188 370 : }
8189 :
8190 : /*
8191 : * getExtendedStatistics
8192 : * get information about extended-statistics objects.
8193 : *
8194 : * Note: extended statistics data is not returned directly to the caller, but
8195 : * it does get entered into the DumpableObject tables.
8196 : */
8197 : void
8198 370 : getExtendedStatistics(Archive *fout)
8199 : {
8200 : PQExpBuffer query;
8201 : PGresult *res;
8202 : StatsExtInfo *statsextinfo;
8203 : int ntups;
8204 : int i_tableoid;
8205 : int i_oid;
8206 : int i_stxname;
8207 : int i_stxnamespace;
8208 : int i_stxowner;
8209 : int i_stxrelid;
8210 : int i_stattarget;
8211 : int i;
8212 :
8213 : /* Extended statistics were new in v10 */
8214 370 : if (fout->remoteVersion < 100000)
8215 0 : return;
8216 :
8217 370 : query = createPQExpBuffer();
8218 :
8219 370 : if (fout->remoteVersion < 130000)
8220 0 : appendPQExpBufferStr(query, "SELECT tableoid, oid, stxname, "
8221 : "stxnamespace, stxowner, stxrelid, NULL AS stxstattarget "
8222 : "FROM pg_catalog.pg_statistic_ext");
8223 : else
8224 370 : appendPQExpBufferStr(query, "SELECT tableoid, oid, stxname, "
8225 : "stxnamespace, stxowner, stxrelid, stxstattarget "
8226 : "FROM pg_catalog.pg_statistic_ext");
8227 :
8228 370 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
8229 :
8230 370 : ntups = PQntuples(res);
8231 :
8232 370 : i_tableoid = PQfnumber(res, "tableoid");
8233 370 : i_oid = PQfnumber(res, "oid");
8234 370 : i_stxname = PQfnumber(res, "stxname");
8235 370 : i_stxnamespace = PQfnumber(res, "stxnamespace");
8236 370 : i_stxowner = PQfnumber(res, "stxowner");
8237 370 : i_stxrelid = PQfnumber(res, "stxrelid");
8238 370 : i_stattarget = PQfnumber(res, "stxstattarget");
8239 :
8240 370 : statsextinfo = (StatsExtInfo *) pg_malloc(ntups * sizeof(StatsExtInfo));
8241 :
8242 732 : for (i = 0; i < ntups; i++)
8243 : {
8244 362 : statsextinfo[i].dobj.objType = DO_STATSEXT;
8245 362 : statsextinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
8246 362 : statsextinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
8247 362 : AssignDumpId(&statsextinfo[i].dobj);
8248 362 : statsextinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_stxname));
8249 724 : statsextinfo[i].dobj.namespace =
8250 362 : findNamespace(atooid(PQgetvalue(res, i, i_stxnamespace)));
8251 362 : statsextinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_stxowner));
8252 724 : statsextinfo[i].stattable =
8253 362 : findTableByOid(atooid(PQgetvalue(res, i, i_stxrelid)));
8254 362 : if (PQgetisnull(res, i, i_stattarget))
8255 260 : statsextinfo[i].stattarget = -1;
8256 : else
8257 102 : statsextinfo[i].stattarget = atoi(PQgetvalue(res, i, i_stattarget));
8258 :
8259 : /* Decide whether we want to dump it */
8260 362 : selectDumpableStatisticsObject(&(statsextinfo[i]), fout);
8261 : }
8262 :
8263 370 : PQclear(res);
8264 370 : destroyPQExpBuffer(query);
8265 : }
8266 :
8267 : /*
8268 : * getConstraints
8269 : *
8270 : * Get info about constraints on dumpable tables.
8271 : *
8272 : * Currently handles foreign keys only.
8273 : * Unique and primary key constraints are handled with indexes,
8274 : * while check constraints are processed in getTableAttrs().
8275 : */
8276 : void
8277 370 : getConstraints(Archive *fout, TableInfo tblinfo[], int numTables)
8278 : {
8279 370 : PQExpBuffer query = createPQExpBuffer();
8280 370 : PQExpBuffer tbloids = createPQExpBuffer();
8281 : PGresult *res;
8282 : int ntups;
8283 : int curtblindx;
8284 370 : TableInfo *tbinfo = NULL;
8285 : ConstraintInfo *constrinfo;
8286 : int i_contableoid,
8287 : i_conoid,
8288 : i_conrelid,
8289 : i_conname,
8290 : i_confrelid,
8291 : i_conindid,
8292 : i_condef;
8293 :
8294 : /*
8295 : * We want to perform just one query against pg_constraint. However, we
8296 : * mustn't try to select every row of the catalog and then sort it out on
8297 : * the client side, because some of the server-side functions we need
8298 : * would be unsafe to apply to tables we don't have lock on. Hence, we
8299 : * build an array of the OIDs of tables we care about (and now have lock
8300 : * on!), and use a WHERE clause to constrain which rows are selected.
8301 : */
8302 370 : appendPQExpBufferChar(tbloids, '{');
8303 98826 : for (int i = 0; i < numTables; i++)
8304 : {
8305 98456 : TableInfo *tinfo = &tblinfo[i];
8306 :
8307 98456 : if (!(tinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
8308 83774 : continue;
8309 :
8310 : /* OK, we need info for this table */
8311 14682 : if (tbloids->len > 1) /* do we have more than the '{'? */
8312 14436 : appendPQExpBufferChar(tbloids, ',');
8313 14682 : appendPQExpBuffer(tbloids, "%u", tinfo->dobj.catId.oid);
8314 : }
8315 370 : appendPQExpBufferChar(tbloids, '}');
8316 :
8317 370 : appendPQExpBufferStr(query,
8318 : "SELECT c.tableoid, c.oid, "
8319 : "conrelid, conname, confrelid, ");
8320 370 : if (fout->remoteVersion >= 110000)
8321 370 : appendPQExpBufferStr(query, "conindid, ");
8322 : else
8323 0 : appendPQExpBufferStr(query, "0 AS conindid, ");
8324 370 : appendPQExpBuffer(query,
8325 : "pg_catalog.pg_get_constraintdef(c.oid) AS condef\n"
8326 : "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
8327 : "JOIN pg_catalog.pg_constraint c ON (src.tbloid = c.conrelid)\n"
8328 : "WHERE contype = 'f' ",
8329 : tbloids->data);
8330 370 : if (fout->remoteVersion >= 110000)
8331 370 : appendPQExpBufferStr(query,
8332 : "AND conparentid = 0 ");
8333 370 : appendPQExpBufferStr(query,
8334 : "ORDER BY conrelid, conname");
8335 :
8336 370 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
8337 :
8338 370 : ntups = PQntuples(res);
8339 :
8340 370 : i_contableoid = PQfnumber(res, "tableoid");
8341 370 : i_conoid = PQfnumber(res, "oid");
8342 370 : i_conrelid = PQfnumber(res, "conrelid");
8343 370 : i_conname = PQfnumber(res, "conname");
8344 370 : i_confrelid = PQfnumber(res, "confrelid");
8345 370 : i_conindid = PQfnumber(res, "conindid");
8346 370 : i_condef = PQfnumber(res, "condef");
8347 :
8348 370 : constrinfo = (ConstraintInfo *) pg_malloc(ntups * sizeof(ConstraintInfo));
8349 :
8350 370 : curtblindx = -1;
8351 736 : for (int j = 0; j < ntups; j++)
8352 : {
8353 366 : Oid conrelid = atooid(PQgetvalue(res, j, i_conrelid));
8354 : TableInfo *reftable;
8355 :
8356 : /*
8357 : * Locate the associated TableInfo; we rely on tblinfo[] being in OID
8358 : * order.
8359 : */
8360 366 : if (tbinfo == NULL || tbinfo->dobj.catId.oid != conrelid)
8361 : {
8362 30114 : while (++curtblindx < numTables)
8363 : {
8364 30114 : tbinfo = &tblinfo[curtblindx];
8365 30114 : if (tbinfo->dobj.catId.oid == conrelid)
8366 346 : break;
8367 : }
8368 346 : if (curtblindx >= numTables)
8369 0 : pg_fatal("unrecognized table OID %u", conrelid);
8370 : }
8371 :
8372 366 : constrinfo[j].dobj.objType = DO_FK_CONSTRAINT;
8373 366 : constrinfo[j].dobj.catId.tableoid = atooid(PQgetvalue(res, j, i_contableoid));
8374 366 : constrinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_conoid));
8375 366 : AssignDumpId(&constrinfo[j].dobj);
8376 366 : constrinfo[j].dobj.name = pg_strdup(PQgetvalue(res, j, i_conname));
8377 366 : constrinfo[j].dobj.namespace = tbinfo->dobj.namespace;
8378 366 : constrinfo[j].contable = tbinfo;
8379 366 : constrinfo[j].condomain = NULL;
8380 366 : constrinfo[j].contype = 'f';
8381 366 : constrinfo[j].condef = pg_strdup(PQgetvalue(res, j, i_condef));
8382 366 : constrinfo[j].confrelid = atooid(PQgetvalue(res, j, i_confrelid));
8383 366 : constrinfo[j].conindex = 0;
8384 366 : constrinfo[j].condeferrable = false;
8385 366 : constrinfo[j].condeferred = false;
8386 366 : constrinfo[j].conislocal = true;
8387 366 : constrinfo[j].separate = true;
8388 :
8389 : /*
8390 : * Restoring an FK that points to a partitioned table requires that
8391 : * all partition indexes have been attached beforehand. Ensure that
8392 : * happens by making the constraint depend on each index partition
8393 : * attach object.
8394 : */
8395 366 : reftable = findTableByOid(constrinfo[j].confrelid);
8396 366 : if (reftable && reftable->relkind == RELKIND_PARTITIONED_TABLE)
8397 : {
8398 40 : Oid indexOid = atooid(PQgetvalue(res, j, i_conindid));
8399 :
8400 40 : if (indexOid != InvalidOid)
8401 : {
8402 40 : for (int k = 0; k < reftable->numIndexes; k++)
8403 : {
8404 : IndxInfo *refidx;
8405 :
8406 : /* not our index? */
8407 40 : if (reftable->indexes[k].dobj.catId.oid != indexOid)
8408 0 : continue;
8409 :
8410 40 : refidx = &reftable->indexes[k];
8411 40 : addConstrChildIdxDeps(&constrinfo[j].dobj, refidx);
8412 40 : break;
8413 : }
8414 : }
8415 : }
8416 : }
8417 :
8418 370 : PQclear(res);
8419 :
8420 370 : destroyPQExpBuffer(query);
8421 370 : destroyPQExpBuffer(tbloids);
8422 370 : }
8423 :
8424 : /*
8425 : * addConstrChildIdxDeps
8426 : *
8427 : * Recursive subroutine for getConstraints
8428 : *
8429 : * Given an object representing a foreign key constraint and an index on the
8430 : * partitioned table it references, mark the constraint object as dependent
8431 : * on the DO_INDEX_ATTACH object of each index partition, recursively
8432 : * drilling down to their partitions if any. This ensures that the FK is not
8433 : * restored until the index is fully marked valid.
8434 : */
8435 : static void
8436 90 : addConstrChildIdxDeps(DumpableObject *dobj, const IndxInfo *refidx)
8437 : {
8438 : SimplePtrListCell *cell;
8439 :
8440 : Assert(dobj->objType == DO_FK_CONSTRAINT);
8441 :
8442 310 : for (cell = refidx->partattaches.head; cell; cell = cell->next)
8443 : {
8444 220 : IndexAttachInfo *attach = (IndexAttachInfo *) cell->ptr;
8445 :
8446 220 : addObjectDependency(dobj, attach->dobj.dumpId);
8447 :
8448 220 : if (attach->partitionIdx->partattaches.head != NULL)
8449 50 : addConstrChildIdxDeps(dobj, attach->partitionIdx);
8450 : }
8451 90 : }
8452 :
8453 : /*
8454 : * getDomainConstraints
8455 : *
8456 : * Get info about constraints on a domain.
8457 : */
8458 : static void
8459 328 : getDomainConstraints(Archive *fout, TypeInfo *tyinfo)
8460 : {
8461 : ConstraintInfo *constrinfo;
8462 328 : PQExpBuffer query = createPQExpBuffer();
8463 : PGresult *res;
8464 : int i_tableoid,
8465 : i_oid,
8466 : i_conname,
8467 : i_consrc,
8468 : i_convalidated,
8469 : i_contype;
8470 : int ntups;
8471 :
8472 328 : if (!fout->is_prepared[PREPQUERY_GETDOMAINCONSTRAINTS])
8473 : {
8474 : /*
8475 : * Set up query for constraint-specific details. For servers 17 and
8476 : * up, domains have constraints of type 'n' as well as 'c', otherwise
8477 : * just the latter.
8478 : */
8479 98 : appendPQExpBuffer(query,
8480 : "PREPARE getDomainConstraints(pg_catalog.oid) AS\n"
8481 : "SELECT tableoid, oid, conname, "
8482 : "pg_catalog.pg_get_constraintdef(oid) AS consrc, "
8483 : "convalidated, contype "
8484 : "FROM pg_catalog.pg_constraint "
8485 : "WHERE contypid = $1 AND contype IN (%s) "
8486 : "ORDER BY conname",
8487 98 : fout->remoteVersion < 170000 ? "'c'" : "'c', 'n'");
8488 :
8489 98 : ExecuteSqlStatement(fout, query->data);
8490 :
8491 98 : fout->is_prepared[PREPQUERY_GETDOMAINCONSTRAINTS] = true;
8492 : }
8493 :
8494 328 : printfPQExpBuffer(query,
8495 : "EXECUTE getDomainConstraints('%u')",
8496 : tyinfo->dobj.catId.oid);
8497 :
8498 328 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
8499 :
8500 328 : ntups = PQntuples(res);
8501 :
8502 328 : i_tableoid = PQfnumber(res, "tableoid");
8503 328 : i_oid = PQfnumber(res, "oid");
8504 328 : i_conname = PQfnumber(res, "conname");
8505 328 : i_consrc = PQfnumber(res, "consrc");
8506 328 : i_convalidated = PQfnumber(res, "convalidated");
8507 328 : i_contype = PQfnumber(res, "contype");
8508 :
8509 328 : constrinfo = (ConstraintInfo *) pg_malloc(ntups * sizeof(ConstraintInfo));
8510 328 : tyinfo->domChecks = constrinfo;
8511 :
8512 : /* 'i' tracks result rows; 'j' counts CHECK constraints */
8513 684 : for (int i = 0, j = 0; i < ntups; i++)
8514 : {
8515 356 : bool validated = PQgetvalue(res, i, i_convalidated)[0] == 't';
8516 356 : char contype = (PQgetvalue(res, i, i_contype))[0];
8517 : ConstraintInfo *constraint;
8518 :
8519 356 : if (contype == CONSTRAINT_CHECK)
8520 : {
8521 238 : constraint = &constrinfo[j++];
8522 238 : tyinfo->nDomChecks++;
8523 : }
8524 : else
8525 : {
8526 : Assert(contype == CONSTRAINT_NOTNULL);
8527 : Assert(tyinfo->notnull == NULL);
8528 : /* use last item in array for the not-null constraint */
8529 118 : tyinfo->notnull = &(constrinfo[ntups - 1]);
8530 118 : constraint = tyinfo->notnull;
8531 : }
8532 :
8533 356 : constraint->dobj.objType = DO_CONSTRAINT;
8534 356 : constraint->dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
8535 356 : constraint->dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
8536 356 : AssignDumpId(&(constraint->dobj));
8537 356 : constraint->dobj.name = pg_strdup(PQgetvalue(res, i, i_conname));
8538 356 : constraint->dobj.namespace = tyinfo->dobj.namespace;
8539 356 : constraint->contable = NULL;
8540 356 : constraint->condomain = tyinfo;
8541 356 : constraint->contype = contype;
8542 356 : constraint->condef = pg_strdup(PQgetvalue(res, i, i_consrc));
8543 356 : constraint->confrelid = InvalidOid;
8544 356 : constraint->conindex = 0;
8545 356 : constraint->condeferrable = false;
8546 356 : constraint->condeferred = false;
8547 356 : constraint->conislocal = true;
8548 :
8549 356 : constraint->separate = !validated;
8550 :
8551 : /*
8552 : * Make the domain depend on the constraint, ensuring it won't be
8553 : * output till any constraint dependencies are OK. If the constraint
8554 : * has not been validated, it's going to be dumped after the domain
8555 : * anyway, so this doesn't matter.
8556 : */
8557 356 : if (validated)
8558 346 : addObjectDependency(&tyinfo->dobj, constraint->dobj.dumpId);
8559 : }
8560 :
8561 328 : PQclear(res);
8562 :
8563 328 : destroyPQExpBuffer(query);
8564 328 : }
8565 :
8566 : /*
8567 : * getRules
8568 : * get basic information about every rule in the system
8569 : */
8570 : void
8571 370 : getRules(Archive *fout)
8572 : {
8573 : PGresult *res;
8574 : int ntups;
8575 : int i;
8576 370 : PQExpBuffer query = createPQExpBuffer();
8577 : RuleInfo *ruleinfo;
8578 : int i_tableoid;
8579 : int i_oid;
8580 : int i_rulename;
8581 : int i_ruletable;
8582 : int i_ev_type;
8583 : int i_is_instead;
8584 : int i_ev_enabled;
8585 :
8586 370 : appendPQExpBufferStr(query, "SELECT "
8587 : "tableoid, oid, rulename, "
8588 : "ev_class AS ruletable, ev_type, is_instead, "
8589 : "ev_enabled "
8590 : "FROM pg_rewrite "
8591 : "ORDER BY oid");
8592 :
8593 370 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
8594 :
8595 370 : ntups = PQntuples(res);
8596 :
8597 370 : ruleinfo = (RuleInfo *) pg_malloc(ntups * sizeof(RuleInfo));
8598 :
8599 370 : i_tableoid = PQfnumber(res, "tableoid");
8600 370 : i_oid = PQfnumber(res, "oid");
8601 370 : i_rulename = PQfnumber(res, "rulename");
8602 370 : i_ruletable = PQfnumber(res, "ruletable");
8603 370 : i_ev_type = PQfnumber(res, "ev_type");
8604 370 : i_is_instead = PQfnumber(res, "is_instead");
8605 370 : i_ev_enabled = PQfnumber(res, "ev_enabled");
8606 :
8607 57994 : for (i = 0; i < ntups; i++)
8608 : {
8609 : Oid ruletableoid;
8610 :
8611 57624 : ruleinfo[i].dobj.objType = DO_RULE;
8612 57624 : ruleinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
8613 57624 : ruleinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
8614 57624 : AssignDumpId(&ruleinfo[i].dobj);
8615 57624 : ruleinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_rulename));
8616 57624 : ruletableoid = atooid(PQgetvalue(res, i, i_ruletable));
8617 57624 : ruleinfo[i].ruletable = findTableByOid(ruletableoid);
8618 57624 : if (ruleinfo[i].ruletable == NULL)
8619 0 : pg_fatal("failed sanity check, parent table with OID %u of pg_rewrite entry with OID %u not found",
8620 : ruletableoid, ruleinfo[i].dobj.catId.oid);
8621 57624 : ruleinfo[i].dobj.namespace = ruleinfo[i].ruletable->dobj.namespace;
8622 57624 : ruleinfo[i].dobj.dump = ruleinfo[i].ruletable->dobj.dump;
8623 57624 : ruleinfo[i].ev_type = *(PQgetvalue(res, i, i_ev_type));
8624 57624 : ruleinfo[i].is_instead = *(PQgetvalue(res, i, i_is_instead)) == 't';
8625 57624 : ruleinfo[i].ev_enabled = *(PQgetvalue(res, i, i_ev_enabled));
8626 57624 : if (ruleinfo[i].ruletable)
8627 : {
8628 : /*
8629 : * If the table is a view or materialized view, force its ON
8630 : * SELECT rule to be sorted before the view itself --- this
8631 : * ensures that any dependencies for the rule affect the table's
8632 : * positioning. Other rules are forced to appear after their
8633 : * table.
8634 : */
8635 57624 : if ((ruleinfo[i].ruletable->relkind == RELKIND_VIEW ||
8636 1594 : ruleinfo[i].ruletable->relkind == RELKIND_MATVIEW) &&
8637 57162 : ruleinfo[i].ev_type == '1' && ruleinfo[i].is_instead)
8638 : {
8639 56338 : addObjectDependency(&ruleinfo[i].ruletable->dobj,
8640 56338 : ruleinfo[i].dobj.dumpId);
8641 : /* We'll merge the rule into CREATE VIEW, if possible */
8642 56338 : ruleinfo[i].separate = false;
8643 : }
8644 : else
8645 : {
8646 1286 : addObjectDependency(&ruleinfo[i].dobj,
8647 1286 : ruleinfo[i].ruletable->dobj.dumpId);
8648 1286 : ruleinfo[i].separate = true;
8649 : }
8650 : }
8651 : else
8652 0 : ruleinfo[i].separate = true;
8653 : }
8654 :
8655 370 : PQclear(res);
8656 :
8657 370 : destroyPQExpBuffer(query);
8658 370 : }
8659 :
8660 : /*
8661 : * getTriggers
8662 : * get information about every trigger on a dumpable table
8663 : *
8664 : * Note: trigger data is not returned directly to the caller, but it
8665 : * does get entered into the DumpableObject tables.
8666 : */
8667 : void
8668 370 : getTriggers(Archive *fout, TableInfo tblinfo[], int numTables)
8669 : {
8670 370 : PQExpBuffer query = createPQExpBuffer();
8671 370 : PQExpBuffer tbloids = createPQExpBuffer();
8672 : PGresult *res;
8673 : int ntups;
8674 : int curtblindx;
8675 : TriggerInfo *tginfo;
8676 : int i_tableoid,
8677 : i_oid,
8678 : i_tgrelid,
8679 : i_tgname,
8680 : i_tgenabled,
8681 : i_tgispartition,
8682 : i_tgdef;
8683 :
8684 : /*
8685 : * We want to perform just one query against pg_trigger. However, we
8686 : * mustn't try to select every row of the catalog and then sort it out on
8687 : * the client side, because some of the server-side functions we need
8688 : * would be unsafe to apply to tables we don't have lock on. Hence, we
8689 : * build an array of the OIDs of tables we care about (and now have lock
8690 : * on!), and use a WHERE clause to constrain which rows are selected.
8691 : */
8692 370 : appendPQExpBufferChar(tbloids, '{');
8693 98826 : for (int i = 0; i < numTables; i++)
8694 : {
8695 98456 : TableInfo *tbinfo = &tblinfo[i];
8696 :
8697 98456 : if (!tbinfo->hastriggers ||
8698 2336 : !(tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
8699 96644 : continue;
8700 :
8701 : /* OK, we need info for this table */
8702 1812 : if (tbloids->len > 1) /* do we have more than the '{'? */
8703 1698 : appendPQExpBufferChar(tbloids, ',');
8704 1812 : appendPQExpBuffer(tbloids, "%u", tbinfo->dobj.catId.oid);
8705 : }
8706 370 : appendPQExpBufferChar(tbloids, '}');
8707 :
8708 370 : if (fout->remoteVersion >= 150000)
8709 : {
8710 : /*
8711 : * NB: think not to use pretty=true in pg_get_triggerdef. It could
8712 : * result in non-forward-compatible dumps of WHEN clauses due to
8713 : * under-parenthesization.
8714 : *
8715 : * NB: We need to see partition triggers in case the tgenabled flag
8716 : * has been changed from the parent.
8717 : */
8718 370 : appendPQExpBuffer(query,
8719 : "SELECT t.tgrelid, t.tgname, "
8720 : "pg_catalog.pg_get_triggerdef(t.oid, false) AS tgdef, "
8721 : "t.tgenabled, t.tableoid, t.oid, "
8722 : "t.tgparentid <> 0 AS tgispartition\n"
8723 : "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
8724 : "JOIN pg_catalog.pg_trigger t ON (src.tbloid = t.tgrelid) "
8725 : "LEFT JOIN pg_catalog.pg_trigger u ON (u.oid = t.tgparentid) "
8726 : "WHERE ((NOT t.tgisinternal AND t.tgparentid = 0) "
8727 : "OR t.tgenabled != u.tgenabled) "
8728 : "ORDER BY t.tgrelid, t.tgname",
8729 : tbloids->data);
8730 : }
8731 0 : else if (fout->remoteVersion >= 130000)
8732 : {
8733 : /*
8734 : * NB: think not to use pretty=true in pg_get_triggerdef. It could
8735 : * result in non-forward-compatible dumps of WHEN clauses due to
8736 : * under-parenthesization.
8737 : *
8738 : * NB: We need to see tgisinternal triggers in partitions, in case the
8739 : * tgenabled flag has been changed from the parent.
8740 : */
8741 0 : appendPQExpBuffer(query,
8742 : "SELECT t.tgrelid, t.tgname, "
8743 : "pg_catalog.pg_get_triggerdef(t.oid, false) AS tgdef, "
8744 : "t.tgenabled, t.tableoid, t.oid, t.tgisinternal as tgispartition\n"
8745 : "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
8746 : "JOIN pg_catalog.pg_trigger t ON (src.tbloid = t.tgrelid) "
8747 : "LEFT JOIN pg_catalog.pg_trigger u ON (u.oid = t.tgparentid) "
8748 : "WHERE (NOT t.tgisinternal OR t.tgenabled != u.tgenabled) "
8749 : "ORDER BY t.tgrelid, t.tgname",
8750 : tbloids->data);
8751 : }
8752 0 : else if (fout->remoteVersion >= 110000)
8753 : {
8754 : /*
8755 : * NB: We need to see tgisinternal triggers in partitions, in case the
8756 : * tgenabled flag has been changed from the parent. No tgparentid in
8757 : * version 11-12, so we have to match them via pg_depend.
8758 : *
8759 : * See above about pretty=true in pg_get_triggerdef.
8760 : */
8761 0 : appendPQExpBuffer(query,
8762 : "SELECT t.tgrelid, t.tgname, "
8763 : "pg_catalog.pg_get_triggerdef(t.oid, false) AS tgdef, "
8764 : "t.tgenabled, t.tableoid, t.oid, t.tgisinternal as tgispartition "
8765 : "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
8766 : "JOIN pg_catalog.pg_trigger t ON (src.tbloid = t.tgrelid) "
8767 : "LEFT JOIN pg_catalog.pg_depend AS d ON "
8768 : " d.classid = 'pg_catalog.pg_trigger'::pg_catalog.regclass AND "
8769 : " d.refclassid = 'pg_catalog.pg_trigger'::pg_catalog.regclass AND "
8770 : " d.objid = t.oid "
8771 : "LEFT JOIN pg_catalog.pg_trigger AS pt ON pt.oid = refobjid "
8772 : "WHERE (NOT t.tgisinternal OR t.tgenabled != pt.tgenabled) "
8773 : "ORDER BY t.tgrelid, t.tgname",
8774 : tbloids->data);
8775 : }
8776 : else
8777 : {
8778 : /* See above about pretty=true in pg_get_triggerdef */
8779 0 : appendPQExpBuffer(query,
8780 : "SELECT t.tgrelid, t.tgname, "
8781 : "pg_catalog.pg_get_triggerdef(t.oid, false) AS tgdef, "
8782 : "t.tgenabled, false as tgispartition, "
8783 : "t.tableoid, t.oid "
8784 : "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
8785 : "JOIN pg_catalog.pg_trigger t ON (src.tbloid = t.tgrelid) "
8786 : "WHERE NOT tgisinternal "
8787 : "ORDER BY t.tgrelid, t.tgname",
8788 : tbloids->data);
8789 : }
8790 :
8791 370 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
8792 :
8793 370 : ntups = PQntuples(res);
8794 :
8795 370 : i_tableoid = PQfnumber(res, "tableoid");
8796 370 : i_oid = PQfnumber(res, "oid");
8797 370 : i_tgrelid = PQfnumber(res, "tgrelid");
8798 370 : i_tgname = PQfnumber(res, "tgname");
8799 370 : i_tgenabled = PQfnumber(res, "tgenabled");
8800 370 : i_tgispartition = PQfnumber(res, "tgispartition");
8801 370 : i_tgdef = PQfnumber(res, "tgdef");
8802 :
8803 370 : tginfo = (TriggerInfo *) pg_malloc(ntups * sizeof(TriggerInfo));
8804 :
8805 : /*
8806 : * Outer loop iterates once per table, not once per row. Incrementing of
8807 : * j is handled by the inner loop.
8808 : */
8809 370 : curtblindx = -1;
8810 1042 : for (int j = 0; j < ntups;)
8811 : {
8812 672 : Oid tgrelid = atooid(PQgetvalue(res, j, i_tgrelid));
8813 672 : TableInfo *tbinfo = NULL;
8814 : int numtrigs;
8815 :
8816 : /* Count rows for this table */
8817 1106 : for (numtrigs = 1; numtrigs < ntups - j; numtrigs++)
8818 992 : if (atooid(PQgetvalue(res, j + numtrigs, i_tgrelid)) != tgrelid)
8819 558 : break;
8820 :
8821 : /*
8822 : * Locate the associated TableInfo; we rely on tblinfo[] being in OID
8823 : * order.
8824 : */
8825 35340 : while (++curtblindx < numTables)
8826 : {
8827 35340 : tbinfo = &tblinfo[curtblindx];
8828 35340 : if (tbinfo->dobj.catId.oid == tgrelid)
8829 672 : break;
8830 : }
8831 672 : if (curtblindx >= numTables)
8832 0 : pg_fatal("unrecognized table OID %u", tgrelid);
8833 :
8834 : /* Save data for this table */
8835 672 : tbinfo->triggers = tginfo + j;
8836 672 : tbinfo->numTriggers = numtrigs;
8837 :
8838 1778 : for (int c = 0; c < numtrigs; c++, j++)
8839 : {
8840 1106 : tginfo[j].dobj.objType = DO_TRIGGER;
8841 1106 : tginfo[j].dobj.catId.tableoid = atooid(PQgetvalue(res, j, i_tableoid));
8842 1106 : tginfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
8843 1106 : AssignDumpId(&tginfo[j].dobj);
8844 1106 : tginfo[j].dobj.name = pg_strdup(PQgetvalue(res, j, i_tgname));
8845 1106 : tginfo[j].dobj.namespace = tbinfo->dobj.namespace;
8846 1106 : tginfo[j].tgtable = tbinfo;
8847 1106 : tginfo[j].tgenabled = *(PQgetvalue(res, j, i_tgenabled));
8848 1106 : tginfo[j].tgispartition = *(PQgetvalue(res, j, i_tgispartition)) == 't';
8849 1106 : tginfo[j].tgdef = pg_strdup(PQgetvalue(res, j, i_tgdef));
8850 : }
8851 : }
8852 :
8853 370 : PQclear(res);
8854 :
8855 370 : destroyPQExpBuffer(query);
8856 370 : destroyPQExpBuffer(tbloids);
8857 370 : }
8858 :
8859 : /*
8860 : * getEventTriggers
8861 : * get information about event triggers
8862 : */
8863 : void
8864 370 : getEventTriggers(Archive *fout)
8865 : {
8866 : int i;
8867 : PQExpBuffer query;
8868 : PGresult *res;
8869 : EventTriggerInfo *evtinfo;
8870 : int i_tableoid,
8871 : i_oid,
8872 : i_evtname,
8873 : i_evtevent,
8874 : i_evtowner,
8875 : i_evttags,
8876 : i_evtfname,
8877 : i_evtenabled;
8878 : int ntups;
8879 :
8880 : /* Before 9.3, there are no event triggers */
8881 370 : if (fout->remoteVersion < 90300)
8882 0 : return;
8883 :
8884 370 : query = createPQExpBuffer();
8885 :
8886 370 : appendPQExpBufferStr(query,
8887 : "SELECT e.tableoid, e.oid, evtname, evtenabled, "
8888 : "evtevent, evtowner, "
8889 : "array_to_string(array("
8890 : "select quote_literal(x) "
8891 : " from unnest(evttags) as t(x)), ', ') as evttags, "
8892 : "e.evtfoid::regproc as evtfname "
8893 : "FROM pg_event_trigger e "
8894 : "ORDER BY e.oid");
8895 :
8896 370 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
8897 :
8898 370 : ntups = PQntuples(res);
8899 :
8900 370 : evtinfo = (EventTriggerInfo *) pg_malloc(ntups * sizeof(EventTriggerInfo));
8901 :
8902 370 : i_tableoid = PQfnumber(res, "tableoid");
8903 370 : i_oid = PQfnumber(res, "oid");
8904 370 : i_evtname = PQfnumber(res, "evtname");
8905 370 : i_evtevent = PQfnumber(res, "evtevent");
8906 370 : i_evtowner = PQfnumber(res, "evtowner");
8907 370 : i_evttags = PQfnumber(res, "evttags");
8908 370 : i_evtfname = PQfnumber(res, "evtfname");
8909 370 : i_evtenabled = PQfnumber(res, "evtenabled");
8910 :
8911 486 : for (i = 0; i < ntups; i++)
8912 : {
8913 116 : evtinfo[i].dobj.objType = DO_EVENT_TRIGGER;
8914 116 : evtinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
8915 116 : evtinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
8916 116 : AssignDumpId(&evtinfo[i].dobj);
8917 116 : evtinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_evtname));
8918 116 : evtinfo[i].evtname = pg_strdup(PQgetvalue(res, i, i_evtname));
8919 116 : evtinfo[i].evtevent = pg_strdup(PQgetvalue(res, i, i_evtevent));
8920 116 : evtinfo[i].evtowner = getRoleName(PQgetvalue(res, i, i_evtowner));
8921 116 : evtinfo[i].evttags = pg_strdup(PQgetvalue(res, i, i_evttags));
8922 116 : evtinfo[i].evtfname = pg_strdup(PQgetvalue(res, i, i_evtfname));
8923 116 : evtinfo[i].evtenabled = *(PQgetvalue(res, i, i_evtenabled));
8924 :
8925 : /* Decide whether we want to dump it */
8926 116 : selectDumpableObject(&(evtinfo[i].dobj), fout);
8927 : }
8928 :
8929 370 : PQclear(res);
8930 :
8931 370 : destroyPQExpBuffer(query);
8932 : }
8933 :
8934 : /*
8935 : * getProcLangs
8936 : * get basic information about every procedural language in the system
8937 : *
8938 : * NB: this must run after getFuncs() because we assume we can do
8939 : * findFuncByOid().
8940 : */
8941 : void
8942 370 : getProcLangs(Archive *fout)
8943 : {
8944 : PGresult *res;
8945 : int ntups;
8946 : int i;
8947 370 : PQExpBuffer query = createPQExpBuffer();
8948 : ProcLangInfo *planginfo;
8949 : int i_tableoid;
8950 : int i_oid;
8951 : int i_lanname;
8952 : int i_lanpltrusted;
8953 : int i_lanplcallfoid;
8954 : int i_laninline;
8955 : int i_lanvalidator;
8956 : int i_lanacl;
8957 : int i_acldefault;
8958 : int i_lanowner;
8959 :
8960 370 : appendPQExpBufferStr(query, "SELECT tableoid, oid, "
8961 : "lanname, lanpltrusted, lanplcallfoid, "
8962 : "laninline, lanvalidator, "
8963 : "lanacl, "
8964 : "acldefault('l', lanowner) AS acldefault, "
8965 : "lanowner "
8966 : "FROM pg_language "
8967 : "WHERE lanispl "
8968 : "ORDER BY oid");
8969 :
8970 370 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
8971 :
8972 370 : ntups = PQntuples(res);
8973 :
8974 370 : planginfo = (ProcLangInfo *) pg_malloc(ntups * sizeof(ProcLangInfo));
8975 :
8976 370 : i_tableoid = PQfnumber(res, "tableoid");
8977 370 : i_oid = PQfnumber(res, "oid");
8978 370 : i_lanname = PQfnumber(res, "lanname");
8979 370 : i_lanpltrusted = PQfnumber(res, "lanpltrusted");
8980 370 : i_lanplcallfoid = PQfnumber(res, "lanplcallfoid");
8981 370 : i_laninline = PQfnumber(res, "laninline");
8982 370 : i_lanvalidator = PQfnumber(res, "lanvalidator");
8983 370 : i_lanacl = PQfnumber(res, "lanacl");
8984 370 : i_acldefault = PQfnumber(res, "acldefault");
8985 370 : i_lanowner = PQfnumber(res, "lanowner");
8986 :
8987 842 : for (i = 0; i < ntups; i++)
8988 : {
8989 472 : planginfo[i].dobj.objType = DO_PROCLANG;
8990 472 : planginfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
8991 472 : planginfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
8992 472 : AssignDumpId(&planginfo[i].dobj);
8993 :
8994 472 : planginfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_lanname));
8995 472 : planginfo[i].dacl.acl = pg_strdup(PQgetvalue(res, i, i_lanacl));
8996 472 : planginfo[i].dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault));
8997 472 : planginfo[i].dacl.privtype = 0;
8998 472 : planginfo[i].dacl.initprivs = NULL;
8999 472 : planginfo[i].lanpltrusted = *(PQgetvalue(res, i, i_lanpltrusted)) == 't';
9000 472 : planginfo[i].lanplcallfoid = atooid(PQgetvalue(res, i, i_lanplcallfoid));
9001 472 : planginfo[i].laninline = atooid(PQgetvalue(res, i, i_laninline));
9002 472 : planginfo[i].lanvalidator = atooid(PQgetvalue(res, i, i_lanvalidator));
9003 472 : planginfo[i].lanowner = getRoleName(PQgetvalue(res, i, i_lanowner));
9004 :
9005 : /* Decide whether we want to dump it */
9006 472 : selectDumpableProcLang(&(planginfo[i]), fout);
9007 :
9008 : /* Mark whether language has an ACL */
9009 472 : if (!PQgetisnull(res, i, i_lanacl))
9010 102 : planginfo[i].dobj.components |= DUMP_COMPONENT_ACL;
9011 : }
9012 :
9013 370 : PQclear(res);
9014 :
9015 370 : destroyPQExpBuffer(query);
9016 370 : }
9017 :
9018 : /*
9019 : * getCasts
9020 : * get basic information about most casts in the system
9021 : *
9022 : * Skip casts from a range to its multirange, since we'll create those
9023 : * automatically.
9024 : */
9025 : void
9026 370 : getCasts(Archive *fout)
9027 : {
9028 : PGresult *res;
9029 : int ntups;
9030 : int i;
9031 370 : PQExpBuffer query = createPQExpBuffer();
9032 : CastInfo *castinfo;
9033 : int i_tableoid;
9034 : int i_oid;
9035 : int i_castsource;
9036 : int i_casttarget;
9037 : int i_castfunc;
9038 : int i_castcontext;
9039 : int i_castmethod;
9040 :
9041 370 : if (fout->remoteVersion >= 140000)
9042 : {
9043 370 : appendPQExpBufferStr(query, "SELECT tableoid, oid, "
9044 : "castsource, casttarget, castfunc, castcontext, "
9045 : "castmethod "
9046 : "FROM pg_cast c "
9047 : "WHERE NOT EXISTS ( "
9048 : "SELECT 1 FROM pg_range r "
9049 : "WHERE c.castsource = r.rngtypid "
9050 : "AND c.casttarget = r.rngmultitypid "
9051 : ") "
9052 : "ORDER BY 3,4");
9053 : }
9054 : else
9055 : {
9056 0 : appendPQExpBufferStr(query, "SELECT tableoid, oid, "
9057 : "castsource, casttarget, castfunc, castcontext, "
9058 : "castmethod "
9059 : "FROM pg_cast ORDER BY 3,4");
9060 : }
9061 :
9062 370 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
9063 :
9064 370 : ntups = PQntuples(res);
9065 :
9066 370 : castinfo = (CastInfo *) pg_malloc(ntups * sizeof(CastInfo));
9067 :
9068 370 : i_tableoid = PQfnumber(res, "tableoid");
9069 370 : i_oid = PQfnumber(res, "oid");
9070 370 : i_castsource = PQfnumber(res, "castsource");
9071 370 : i_casttarget = PQfnumber(res, "casttarget");
9072 370 : i_castfunc = PQfnumber(res, "castfunc");
9073 370 : i_castcontext = PQfnumber(res, "castcontext");
9074 370 : i_castmethod = PQfnumber(res, "castmethod");
9075 :
9076 87876 : for (i = 0; i < ntups; i++)
9077 : {
9078 : PQExpBufferData namebuf;
9079 : TypeInfo *sTypeInfo;
9080 : TypeInfo *tTypeInfo;
9081 :
9082 87506 : castinfo[i].dobj.objType = DO_CAST;
9083 87506 : castinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
9084 87506 : castinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
9085 87506 : AssignDumpId(&castinfo[i].dobj);
9086 87506 : castinfo[i].castsource = atooid(PQgetvalue(res, i, i_castsource));
9087 87506 : castinfo[i].casttarget = atooid(PQgetvalue(res, i, i_casttarget));
9088 87506 : castinfo[i].castfunc = atooid(PQgetvalue(res, i, i_castfunc));
9089 87506 : castinfo[i].castcontext = *(PQgetvalue(res, i, i_castcontext));
9090 87506 : castinfo[i].castmethod = *(PQgetvalue(res, i, i_castmethod));
9091 :
9092 : /*
9093 : * Try to name cast as concatenation of typnames. This is only used
9094 : * for purposes of sorting. If we fail to find either type, the name
9095 : * will be an empty string.
9096 : */
9097 87506 : initPQExpBuffer(&namebuf);
9098 87506 : sTypeInfo = findTypeByOid(castinfo[i].castsource);
9099 87506 : tTypeInfo = findTypeByOid(castinfo[i].casttarget);
9100 87506 : if (sTypeInfo && tTypeInfo)
9101 87506 : appendPQExpBuffer(&namebuf, "%s %s",
9102 : sTypeInfo->dobj.name, tTypeInfo->dobj.name);
9103 87506 : castinfo[i].dobj.name = namebuf.data;
9104 :
9105 : /* Decide whether we want to dump it */
9106 87506 : selectDumpableCast(&(castinfo[i]), fout);
9107 : }
9108 :
9109 370 : PQclear(res);
9110 :
9111 370 : destroyPQExpBuffer(query);
9112 370 : }
9113 :
9114 : static char *
9115 200 : get_language_name(Archive *fout, Oid langid)
9116 : {
9117 : PQExpBuffer query;
9118 : PGresult *res;
9119 : char *lanname;
9120 :
9121 200 : query = createPQExpBuffer();
9122 200 : appendPQExpBuffer(query, "SELECT lanname FROM pg_language WHERE oid = %u", langid);
9123 200 : res = ExecuteSqlQueryForSingleRow(fout, query->data);
9124 200 : lanname = pg_strdup(fmtId(PQgetvalue(res, 0, 0)));
9125 200 : destroyPQExpBuffer(query);
9126 200 : PQclear(res);
9127 :
9128 200 : return lanname;
9129 : }
9130 :
9131 : /*
9132 : * getTransforms
9133 : * get basic information about every transform in the system
9134 : */
9135 : void
9136 370 : getTransforms(Archive *fout)
9137 : {
9138 : PGresult *res;
9139 : int ntups;
9140 : int i;
9141 : PQExpBuffer query;
9142 : TransformInfo *transforminfo;
9143 : int i_tableoid;
9144 : int i_oid;
9145 : int i_trftype;
9146 : int i_trflang;
9147 : int i_trffromsql;
9148 : int i_trftosql;
9149 :
9150 : /* Transforms didn't exist pre-9.5 */
9151 370 : if (fout->remoteVersion < 90500)
9152 0 : return;
9153 :
9154 370 : query = createPQExpBuffer();
9155 :
9156 370 : appendPQExpBufferStr(query, "SELECT tableoid, oid, "
9157 : "trftype, trflang, trffromsql::oid, trftosql::oid "
9158 : "FROM pg_transform "
9159 : "ORDER BY 3,4");
9160 :
9161 370 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
9162 :
9163 370 : ntups = PQntuples(res);
9164 :
9165 370 : transforminfo = (TransformInfo *) pg_malloc(ntups * sizeof(TransformInfo));
9166 :
9167 370 : i_tableoid = PQfnumber(res, "tableoid");
9168 370 : i_oid = PQfnumber(res, "oid");
9169 370 : i_trftype = PQfnumber(res, "trftype");
9170 370 : i_trflang = PQfnumber(res, "trflang");
9171 370 : i_trffromsql = PQfnumber(res, "trffromsql");
9172 370 : i_trftosql = PQfnumber(res, "trftosql");
9173 :
9174 486 : for (i = 0; i < ntups; i++)
9175 : {
9176 : PQExpBufferData namebuf;
9177 : TypeInfo *typeInfo;
9178 : char *lanname;
9179 :
9180 116 : transforminfo[i].dobj.objType = DO_TRANSFORM;
9181 116 : transforminfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
9182 116 : transforminfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
9183 116 : AssignDumpId(&transforminfo[i].dobj);
9184 116 : transforminfo[i].trftype = atooid(PQgetvalue(res, i, i_trftype));
9185 116 : transforminfo[i].trflang = atooid(PQgetvalue(res, i, i_trflang));
9186 116 : transforminfo[i].trffromsql = atooid(PQgetvalue(res, i, i_trffromsql));
9187 116 : transforminfo[i].trftosql = atooid(PQgetvalue(res, i, i_trftosql));
9188 :
9189 : /*
9190 : * Try to name transform as concatenation of type and language name.
9191 : * This is only used for purposes of sorting. If we fail to find
9192 : * either, the name will be an empty string.
9193 : */
9194 116 : initPQExpBuffer(&namebuf);
9195 116 : typeInfo = findTypeByOid(transforminfo[i].trftype);
9196 116 : lanname = get_language_name(fout, transforminfo[i].trflang);
9197 116 : if (typeInfo && lanname)
9198 116 : appendPQExpBuffer(&namebuf, "%s %s",
9199 : typeInfo->dobj.name, lanname);
9200 116 : transforminfo[i].dobj.name = namebuf.data;
9201 116 : free(lanname);
9202 :
9203 : /* Decide whether we want to dump it */
9204 116 : selectDumpableObject(&(transforminfo[i].dobj), fout);
9205 : }
9206 :
9207 370 : PQclear(res);
9208 :
9209 370 : destroyPQExpBuffer(query);
9210 : }
9211 :
9212 : /*
9213 : * getTableAttrs -
9214 : * for each interesting table, read info about its attributes
9215 : * (names, types, default values, CHECK constraints, etc)
9216 : *
9217 : * modifies tblinfo
9218 : */
9219 : void
9220 370 : getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
9221 : {
9222 370 : DumpOptions *dopt = fout->dopt;
9223 370 : PQExpBuffer q = createPQExpBuffer();
9224 370 : PQExpBuffer tbloids = createPQExpBuffer();
9225 370 : PQExpBuffer checkoids = createPQExpBuffer();
9226 370 : PQExpBuffer invalidnotnulloids = NULL;
9227 : PGresult *res;
9228 : int ntups;
9229 : int curtblindx;
9230 : int i_attrelid;
9231 : int i_attnum;
9232 : int i_attname;
9233 : int i_atttypname;
9234 : int i_attstattarget;
9235 : int i_attstorage;
9236 : int i_typstorage;
9237 : int i_attidentity;
9238 : int i_attgenerated;
9239 : int i_attisdropped;
9240 : int i_attlen;
9241 : int i_attalign;
9242 : int i_attislocal;
9243 : int i_notnull_name;
9244 : int i_notnull_comment;
9245 : int i_notnull_noinherit;
9246 : int i_notnull_islocal;
9247 : int i_notnull_invalidoid;
9248 : int i_attoptions;
9249 : int i_attcollation;
9250 : int i_attcompression;
9251 : int i_attfdwoptions;
9252 : int i_attmissingval;
9253 : int i_atthasdef;
9254 :
9255 : /*
9256 : * We want to perform just one query against pg_attribute, and then just
9257 : * one against pg_attrdef (for DEFAULTs) and two against pg_constraint
9258 : * (for CHECK constraints and for NOT NULL constraints). However, we
9259 : * mustn't try to select every row of those catalogs and then sort it out
9260 : * on the client side, because some of the server-side functions we need
9261 : * would be unsafe to apply to tables we don't have lock on. Hence, we
9262 : * build an array of the OIDs of tables we care about (and now have lock
9263 : * on!), and use a WHERE clause to constrain which rows are selected.
9264 : */
9265 370 : appendPQExpBufferChar(tbloids, '{');
9266 370 : appendPQExpBufferChar(checkoids, '{');
9267 98826 : for (int i = 0; i < numTables; i++)
9268 : {
9269 98456 : TableInfo *tbinfo = &tblinfo[i];
9270 :
9271 : /* Don't bother to collect info for sequences */
9272 98456 : if (tbinfo->relkind == RELKIND_SEQUENCE)
9273 1312 : continue;
9274 :
9275 : /*
9276 : * Don't bother with uninteresting tables, either. For binary
9277 : * upgrades, this is bypassed for pg_largeobject_metadata and
9278 : * pg_shdepend so that the columns names are collected for the
9279 : * corresponding COPY commands. Restoring the data for those catalogs
9280 : * is faster than restoring the equivalent set of large object
9281 : * commands. We can only do this for upgrades from v12 and newer; in
9282 : * older versions, pg_largeobject_metadata was created WITH OIDS, so
9283 : * the OID column is hidden and won't be dumped.
9284 : */
9285 97144 : if (!tbinfo->interesting &&
9286 83204 : !(fout->dopt->binary_upgrade && fout->remoteVersion >= 120000 &&
9287 15516 : (tbinfo->dobj.catId.oid == LargeObjectMetadataRelationId ||
9288 15444 : tbinfo->dobj.catId.oid == SharedDependRelationId)))
9289 83060 : continue;
9290 :
9291 : /* OK, we need info for this table */
9292 14084 : if (tbloids->len > 1) /* do we have more than the '{'? */
9293 13802 : appendPQExpBufferChar(tbloids, ',');
9294 14084 : appendPQExpBuffer(tbloids, "%u", tbinfo->dobj.catId.oid);
9295 :
9296 14084 : if (tbinfo->ncheck > 0)
9297 : {
9298 : /* Also make a list of the ones with check constraints */
9299 1172 : if (checkoids->len > 1) /* do we have more than the '{'? */
9300 1022 : appendPQExpBufferChar(checkoids, ',');
9301 1172 : appendPQExpBuffer(checkoids, "%u", tbinfo->dobj.catId.oid);
9302 : }
9303 : }
9304 370 : appendPQExpBufferChar(tbloids, '}');
9305 370 : appendPQExpBufferChar(checkoids, '}');
9306 :
9307 : /*
9308 : * Find all the user attributes and their types.
9309 : *
9310 : * Since we only want to dump COLLATE clauses for attributes whose
9311 : * collation is different from their type's default, we use a CASE here to
9312 : * suppress uninteresting attcollations cheaply.
9313 : */
9314 370 : appendPQExpBufferStr(q,
9315 : "SELECT\n"
9316 : "a.attrelid,\n"
9317 : "a.attnum,\n"
9318 : "a.attname,\n"
9319 : "a.attstattarget,\n"
9320 : "a.attstorage,\n"
9321 : "t.typstorage,\n"
9322 : "a.atthasdef,\n"
9323 : "a.attisdropped,\n"
9324 : "a.attlen,\n"
9325 : "a.attalign,\n"
9326 : "a.attislocal,\n"
9327 : "pg_catalog.format_type(t.oid, a.atttypmod) AS atttypname,\n"
9328 : "array_to_string(a.attoptions, ', ') AS attoptions,\n"
9329 : "CASE WHEN a.attcollation <> t.typcollation "
9330 : "THEN a.attcollation ELSE 0 END AS attcollation,\n"
9331 : "pg_catalog.array_to_string(ARRAY("
9332 : "SELECT pg_catalog.quote_ident(option_name) || "
9333 : "' ' || pg_catalog.quote_literal(option_value) "
9334 : "FROM pg_catalog.pg_options_to_table(attfdwoptions) "
9335 : "ORDER BY option_name"
9336 : "), E',\n ') AS attfdwoptions,\n");
9337 :
9338 : /*
9339 : * Find out any NOT NULL markings for each column. In 18 and up we read
9340 : * pg_constraint to obtain the constraint name, and for valid constraints
9341 : * also pg_description to obtain its comment. notnull_noinherit is set
9342 : * according to the NO INHERIT property. For versions prior to 18, we
9343 : * store an empty string as the name when a constraint is marked as
9344 : * attnotnull (this cues dumpTableSchema to print the NOT NULL clause
9345 : * without a name); also, such cases are never NO INHERIT.
9346 : *
9347 : * For invalid constraints, we need to store their OIDs for processing
9348 : * elsewhere, so we bring the pg_constraint.oid value when the constraint
9349 : * is invalid, and NULL otherwise. Their comments are handled not here
9350 : * but by collectComments, because they're their own dumpable object.
9351 : *
9352 : * We track in notnull_islocal whether the constraint was defined directly
9353 : * in this table or via an ancestor, for binary upgrade. flagInhAttrs
9354 : * might modify this later; that routine is also in charge of determining
9355 : * the correct inhcount.
9356 : */
9357 370 : if (fout->remoteVersion >= 180000)
9358 370 : appendPQExpBufferStr(q,
9359 : "co.conname AS notnull_name,\n"
9360 : "CASE WHEN co.convalidated THEN pt.description"
9361 : " ELSE NULL END AS notnull_comment,\n"
9362 : "CASE WHEN NOT co.convalidated THEN co.oid "
9363 : "ELSE NULL END AS notnull_invalidoid,\n"
9364 : "co.connoinherit AS notnull_noinherit,\n"
9365 : "co.conislocal AS notnull_islocal,\n");
9366 : else
9367 0 : appendPQExpBufferStr(q,
9368 : "CASE WHEN a.attnotnull THEN '' ELSE NULL END AS notnull_name,\n"
9369 : "NULL AS notnull_comment,\n"
9370 : "NULL AS notnull_invalidoid,\n"
9371 : "false AS notnull_noinherit,\n"
9372 : "a.attislocal AS notnull_islocal,\n");
9373 :
9374 370 : if (fout->remoteVersion >= 140000)
9375 370 : appendPQExpBufferStr(q,
9376 : "a.attcompression AS attcompression,\n");
9377 : else
9378 0 : appendPQExpBufferStr(q,
9379 : "'' AS attcompression,\n");
9380 :
9381 370 : if (fout->remoteVersion >= 100000)
9382 370 : appendPQExpBufferStr(q,
9383 : "a.attidentity,\n");
9384 : else
9385 0 : appendPQExpBufferStr(q,
9386 : "'' AS attidentity,\n");
9387 :
9388 370 : if (fout->remoteVersion >= 110000)
9389 370 : appendPQExpBufferStr(q,
9390 : "CASE WHEN a.atthasmissing AND NOT a.attisdropped "
9391 : "THEN a.attmissingval ELSE null END AS attmissingval,\n");
9392 : else
9393 0 : appendPQExpBufferStr(q,
9394 : "NULL AS attmissingval,\n");
9395 :
9396 370 : if (fout->remoteVersion >= 120000)
9397 370 : appendPQExpBufferStr(q,
9398 : "a.attgenerated\n");
9399 : else
9400 0 : appendPQExpBufferStr(q,
9401 : "'' AS attgenerated\n");
9402 :
9403 : /* need left join to pg_type to not fail on dropped columns ... */
9404 370 : appendPQExpBuffer(q,
9405 : "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
9406 : "JOIN pg_catalog.pg_attribute a ON (src.tbloid = a.attrelid) "
9407 : "LEFT JOIN pg_catalog.pg_type t "
9408 : "ON (a.atttypid = t.oid)\n",
9409 : tbloids->data);
9410 :
9411 : /*
9412 : * In versions 18 and up, we need pg_constraint for explicit NOT NULL
9413 : * entries and pg_description to get their comments.
9414 : */
9415 370 : if (fout->remoteVersion >= 180000)
9416 370 : appendPQExpBufferStr(q,
9417 : " LEFT JOIN pg_catalog.pg_constraint co ON "
9418 : "(a.attrelid = co.conrelid\n"
9419 : " AND co.contype = 'n' AND "
9420 : "co.conkey = array[a.attnum])\n"
9421 : " LEFT JOIN pg_catalog.pg_description pt ON "
9422 : "(pt.classoid = co.tableoid AND pt.objoid = co.oid)\n");
9423 :
9424 370 : appendPQExpBufferStr(q,
9425 : "WHERE a.attnum > 0::pg_catalog.int2\n"
9426 : "ORDER BY a.attrelid, a.attnum");
9427 :
9428 370 : res = ExecuteSqlQuery(fout, q->data, PGRES_TUPLES_OK);
9429 :
9430 370 : ntups = PQntuples(res);
9431 :
9432 370 : i_attrelid = PQfnumber(res, "attrelid");
9433 370 : i_attnum = PQfnumber(res, "attnum");
9434 370 : i_attname = PQfnumber(res, "attname");
9435 370 : i_atttypname = PQfnumber(res, "atttypname");
9436 370 : i_attstattarget = PQfnumber(res, "attstattarget");
9437 370 : i_attstorage = PQfnumber(res, "attstorage");
9438 370 : i_typstorage = PQfnumber(res, "typstorage");
9439 370 : i_attidentity = PQfnumber(res, "attidentity");
9440 370 : i_attgenerated = PQfnumber(res, "attgenerated");
9441 370 : i_attisdropped = PQfnumber(res, "attisdropped");
9442 370 : i_attlen = PQfnumber(res, "attlen");
9443 370 : i_attalign = PQfnumber(res, "attalign");
9444 370 : i_attislocal = PQfnumber(res, "attislocal");
9445 370 : i_notnull_name = PQfnumber(res, "notnull_name");
9446 370 : i_notnull_comment = PQfnumber(res, "notnull_comment");
9447 370 : i_notnull_invalidoid = PQfnumber(res, "notnull_invalidoid");
9448 370 : i_notnull_noinherit = PQfnumber(res, "notnull_noinherit");
9449 370 : i_notnull_islocal = PQfnumber(res, "notnull_islocal");
9450 370 : i_attoptions = PQfnumber(res, "attoptions");
9451 370 : i_attcollation = PQfnumber(res, "attcollation");
9452 370 : i_attcompression = PQfnumber(res, "attcompression");
9453 370 : i_attfdwoptions = PQfnumber(res, "attfdwoptions");
9454 370 : i_attmissingval = PQfnumber(res, "attmissingval");
9455 370 : i_atthasdef = PQfnumber(res, "atthasdef");
9456 :
9457 : /* Within the next loop, we'll accumulate OIDs of tables with defaults */
9458 370 : resetPQExpBuffer(tbloids);
9459 370 : appendPQExpBufferChar(tbloids, '{');
9460 :
9461 : /*
9462 : * Outer loop iterates once per table, not once per row. Incrementing of
9463 : * r is handled by the inner loop.
9464 : */
9465 370 : curtblindx = -1;
9466 14142 : for (int r = 0; r < ntups;)
9467 : {
9468 13772 : Oid attrelid = atooid(PQgetvalue(res, r, i_attrelid));
9469 13772 : TableInfo *tbinfo = NULL;
9470 : int numatts;
9471 : bool hasdefaults;
9472 :
9473 : /* Count rows for this table */
9474 50664 : for (numatts = 1; numatts < ntups - r; numatts++)
9475 50388 : if (atooid(PQgetvalue(res, r + numatts, i_attrelid)) != attrelid)
9476 13496 : break;
9477 :
9478 : /*
9479 : * Locate the associated TableInfo; we rely on tblinfo[] being in OID
9480 : * order.
9481 : */
9482 68168 : while (++curtblindx < numTables)
9483 : {
9484 68168 : tbinfo = &tblinfo[curtblindx];
9485 68168 : if (tbinfo->dobj.catId.oid == attrelid)
9486 13772 : break;
9487 : }
9488 13772 : if (curtblindx >= numTables)
9489 0 : pg_fatal("unrecognized table OID %u", attrelid);
9490 : /* cross-check that we only got requested tables */
9491 13772 : if (tbinfo->relkind == RELKIND_SEQUENCE ||
9492 13772 : (!tbinfo->interesting &&
9493 144 : !(fout->dopt->binary_upgrade && fout->remoteVersion >= 120000 &&
9494 144 : (tbinfo->dobj.catId.oid == LargeObjectMetadataRelationId ||
9495 72 : tbinfo->dobj.catId.oid == SharedDependRelationId))))
9496 0 : pg_fatal("unexpected column data for table \"%s\"",
9497 : tbinfo->dobj.name);
9498 :
9499 : /* Save data for this table */
9500 13772 : tbinfo->numatts = numatts;
9501 13772 : tbinfo->attnames = (char **) pg_malloc(numatts * sizeof(char *));
9502 13772 : tbinfo->atttypnames = (char **) pg_malloc(numatts * sizeof(char *));
9503 13772 : tbinfo->attstattarget = (int *) pg_malloc(numatts * sizeof(int));
9504 13772 : tbinfo->attstorage = (char *) pg_malloc(numatts * sizeof(char));
9505 13772 : tbinfo->typstorage = (char *) pg_malloc(numatts * sizeof(char));
9506 13772 : tbinfo->attidentity = (char *) pg_malloc(numatts * sizeof(char));
9507 13772 : tbinfo->attgenerated = (char *) pg_malloc(numatts * sizeof(char));
9508 13772 : tbinfo->attisdropped = (bool *) pg_malloc(numatts * sizeof(bool));
9509 13772 : tbinfo->attlen = (int *) pg_malloc(numatts * sizeof(int));
9510 13772 : tbinfo->attalign = (char *) pg_malloc(numatts * sizeof(char));
9511 13772 : tbinfo->attislocal = (bool *) pg_malloc(numatts * sizeof(bool));
9512 13772 : tbinfo->attoptions = (char **) pg_malloc(numatts * sizeof(char *));
9513 13772 : tbinfo->attcollation = (Oid *) pg_malloc(numatts * sizeof(Oid));
9514 13772 : tbinfo->attcompression = (char *) pg_malloc(numatts * sizeof(char));
9515 13772 : tbinfo->attfdwoptions = (char **) pg_malloc(numatts * sizeof(char *));
9516 13772 : tbinfo->attmissingval = (char **) pg_malloc(numatts * sizeof(char *));
9517 13772 : tbinfo->notnull_constrs = (char **) pg_malloc(numatts * sizeof(char *));
9518 13772 : tbinfo->notnull_comment = (char **) pg_malloc(numatts * sizeof(char *));
9519 13772 : tbinfo->notnull_invalid = (bool *) pg_malloc(numatts * sizeof(bool));
9520 13772 : tbinfo->notnull_noinh = (bool *) pg_malloc(numatts * sizeof(bool));
9521 13772 : tbinfo->notnull_islocal = (bool *) pg_malloc(numatts * sizeof(bool));
9522 13772 : tbinfo->attrdefs = (AttrDefInfo **) pg_malloc(numatts * sizeof(AttrDefInfo *));
9523 13772 : hasdefaults = false;
9524 :
9525 64436 : for (int j = 0; j < numatts; j++, r++)
9526 : {
9527 50664 : if (j + 1 != atoi(PQgetvalue(res, r, i_attnum)))
9528 0 : pg_fatal("invalid column numbering in table \"%s\"",
9529 : tbinfo->dobj.name);
9530 50664 : tbinfo->attnames[j] = pg_strdup(PQgetvalue(res, r, i_attname));
9531 50664 : tbinfo->atttypnames[j] = pg_strdup(PQgetvalue(res, r, i_atttypname));
9532 50664 : if (PQgetisnull(res, r, i_attstattarget))
9533 50572 : tbinfo->attstattarget[j] = -1;
9534 : else
9535 92 : tbinfo->attstattarget[j] = atoi(PQgetvalue(res, r, i_attstattarget));
9536 50664 : tbinfo->attstorage[j] = *(PQgetvalue(res, r, i_attstorage));
9537 50664 : tbinfo->typstorage[j] = *(PQgetvalue(res, r, i_typstorage));
9538 50664 : tbinfo->attidentity[j] = *(PQgetvalue(res, r, i_attidentity));
9539 50664 : tbinfo->attgenerated[j] = *(PQgetvalue(res, r, i_attgenerated));
9540 50664 : tbinfo->needs_override = tbinfo->needs_override || (tbinfo->attidentity[j] == ATTRIBUTE_IDENTITY_ALWAYS);
9541 50664 : tbinfo->attisdropped[j] = (PQgetvalue(res, r, i_attisdropped)[0] == 't');
9542 50664 : tbinfo->attlen[j] = atoi(PQgetvalue(res, r, i_attlen));
9543 50664 : tbinfo->attalign[j] = *(PQgetvalue(res, r, i_attalign));
9544 50664 : tbinfo->attislocal[j] = (PQgetvalue(res, r, i_attislocal)[0] == 't');
9545 :
9546 : /* Handle not-null constraint name and flags */
9547 50664 : determineNotNullFlags(fout, res, r,
9548 : tbinfo, j,
9549 : i_notnull_name,
9550 : i_notnull_comment,
9551 : i_notnull_invalidoid,
9552 : i_notnull_noinherit,
9553 : i_notnull_islocal,
9554 : &invalidnotnulloids);
9555 :
9556 50664 : tbinfo->notnull_comment[j] = PQgetisnull(res, r, i_notnull_comment) ?
9557 50664 : NULL : pg_strdup(PQgetvalue(res, r, i_notnull_comment));
9558 50664 : tbinfo->attoptions[j] = pg_strdup(PQgetvalue(res, r, i_attoptions));
9559 50664 : tbinfo->attcollation[j] = atooid(PQgetvalue(res, r, i_attcollation));
9560 50664 : tbinfo->attcompression[j] = *(PQgetvalue(res, r, i_attcompression));
9561 50664 : tbinfo->attfdwoptions[j] = pg_strdup(PQgetvalue(res, r, i_attfdwoptions));
9562 50664 : tbinfo->attmissingval[j] = pg_strdup(PQgetvalue(res, r, i_attmissingval));
9563 50664 : tbinfo->attrdefs[j] = NULL; /* fix below */
9564 50664 : if (PQgetvalue(res, r, i_atthasdef)[0] == 't')
9565 2728 : hasdefaults = true;
9566 : }
9567 :
9568 13772 : if (hasdefaults)
9569 : {
9570 : /* Collect OIDs of interesting tables that have defaults */
9571 2036 : if (tbloids->len > 1) /* do we have more than the '{'? */
9572 1888 : appendPQExpBufferChar(tbloids, ',');
9573 2036 : appendPQExpBuffer(tbloids, "%u", tbinfo->dobj.catId.oid);
9574 : }
9575 : }
9576 :
9577 : /* If invalidnotnulloids has any data, finalize it */
9578 370 : if (invalidnotnulloids != NULL)
9579 98 : appendPQExpBufferChar(invalidnotnulloids, '}');
9580 :
9581 370 : PQclear(res);
9582 :
9583 : /*
9584 : * Now get info about column defaults. This is skipped for a data-only
9585 : * dump, as it is only needed for table schemas.
9586 : */
9587 370 : if (dopt->dumpSchema && tbloids->len > 1)
9588 : {
9589 : AttrDefInfo *attrdefs;
9590 : int numDefaults;
9591 132 : TableInfo *tbinfo = NULL;
9592 :
9593 132 : pg_log_info("finding table default expressions");
9594 :
9595 132 : appendPQExpBufferChar(tbloids, '}');
9596 :
9597 132 : printfPQExpBuffer(q, "SELECT a.tableoid, a.oid, adrelid, adnum, "
9598 : "pg_catalog.pg_get_expr(adbin, adrelid) AS adsrc\n"
9599 : "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
9600 : "JOIN pg_catalog.pg_attrdef a ON (src.tbloid = a.adrelid)\n"
9601 : "ORDER BY a.adrelid, a.adnum",
9602 : tbloids->data);
9603 :
9604 132 : res = ExecuteSqlQuery(fout, q->data, PGRES_TUPLES_OK);
9605 :
9606 132 : numDefaults = PQntuples(res);
9607 132 : attrdefs = (AttrDefInfo *) pg_malloc(numDefaults * sizeof(AttrDefInfo));
9608 :
9609 132 : curtblindx = -1;
9610 2664 : for (int j = 0; j < numDefaults; j++)
9611 : {
9612 2532 : Oid adtableoid = atooid(PQgetvalue(res, j, 0));
9613 2532 : Oid adoid = atooid(PQgetvalue(res, j, 1));
9614 2532 : Oid adrelid = atooid(PQgetvalue(res, j, 2));
9615 2532 : int adnum = atoi(PQgetvalue(res, j, 3));
9616 2532 : char *adsrc = PQgetvalue(res, j, 4);
9617 :
9618 : /*
9619 : * Locate the associated TableInfo; we rely on tblinfo[] being in
9620 : * OID order.
9621 : */
9622 2532 : if (tbinfo == NULL || tbinfo->dobj.catId.oid != adrelid)
9623 : {
9624 40664 : while (++curtblindx < numTables)
9625 : {
9626 40664 : tbinfo = &tblinfo[curtblindx];
9627 40664 : if (tbinfo->dobj.catId.oid == adrelid)
9628 1900 : break;
9629 : }
9630 1900 : if (curtblindx >= numTables)
9631 0 : pg_fatal("unrecognized table OID %u", adrelid);
9632 : }
9633 :
9634 2532 : if (adnum <= 0 || adnum > tbinfo->numatts)
9635 0 : pg_fatal("invalid adnum value %d for table \"%s\"",
9636 : adnum, tbinfo->dobj.name);
9637 :
9638 : /*
9639 : * dropped columns shouldn't have defaults, but just in case,
9640 : * ignore 'em
9641 : */
9642 2532 : if (tbinfo->attisdropped[adnum - 1])
9643 0 : continue;
9644 :
9645 2532 : attrdefs[j].dobj.objType = DO_ATTRDEF;
9646 2532 : attrdefs[j].dobj.catId.tableoid = adtableoid;
9647 2532 : attrdefs[j].dobj.catId.oid = adoid;
9648 2532 : AssignDumpId(&attrdefs[j].dobj);
9649 2532 : attrdefs[j].adtable = tbinfo;
9650 2532 : attrdefs[j].adnum = adnum;
9651 2532 : attrdefs[j].adef_expr = pg_strdup(adsrc);
9652 :
9653 2532 : attrdefs[j].dobj.name = pg_strdup(tbinfo->dobj.name);
9654 2532 : attrdefs[j].dobj.namespace = tbinfo->dobj.namespace;
9655 :
9656 2532 : attrdefs[j].dobj.dump = tbinfo->dobj.dump;
9657 :
9658 : /*
9659 : * Figure out whether the default/generation expression should be
9660 : * dumped as part of the main CREATE TABLE (or similar) command or
9661 : * as a separate ALTER TABLE (or similar) command. The preference
9662 : * is to put it into the CREATE command, but in some cases that's
9663 : * not possible.
9664 : */
9665 2532 : if (tbinfo->attgenerated[adnum - 1])
9666 : {
9667 : /*
9668 : * Column generation expressions cannot be dumped separately,
9669 : * because there is no syntax for it. By setting separate to
9670 : * false here we prevent the "default" from being processed as
9671 : * its own dumpable object. Later, flagInhAttrs() will mark
9672 : * it as not to be dumped at all, if possible (that is, if it
9673 : * can be inherited from a parent).
9674 : */
9675 1408 : attrdefs[j].separate = false;
9676 : }
9677 1124 : else if (tbinfo->relkind == RELKIND_VIEW)
9678 : {
9679 : /*
9680 : * Defaults on a VIEW must always be dumped as separate ALTER
9681 : * TABLE commands.
9682 : */
9683 76 : attrdefs[j].separate = true;
9684 : }
9685 1048 : else if (!shouldPrintColumn(dopt, tbinfo, adnum - 1))
9686 : {
9687 : /* column will be suppressed, print default separately */
9688 8 : attrdefs[j].separate = true;
9689 : }
9690 : else
9691 : {
9692 1040 : attrdefs[j].separate = false;
9693 : }
9694 :
9695 2532 : if (!attrdefs[j].separate)
9696 : {
9697 : /*
9698 : * Mark the default as needing to appear before the table, so
9699 : * that any dependencies it has must be emitted before the
9700 : * CREATE TABLE. If this is not possible, we'll change to
9701 : * "separate" mode while sorting dependencies.
9702 : */
9703 2448 : addObjectDependency(&tbinfo->dobj,
9704 2448 : attrdefs[j].dobj.dumpId);
9705 : }
9706 :
9707 2532 : tbinfo->attrdefs[adnum - 1] = &attrdefs[j];
9708 : }
9709 :
9710 132 : PQclear(res);
9711 : }
9712 :
9713 : /*
9714 : * Get info about NOT NULL NOT VALID constraints. This is skipped for a
9715 : * data-only dump, as it is only needed for table schemas.
9716 : */
9717 370 : if (dopt->dumpSchema && invalidnotnulloids)
9718 : {
9719 : ConstraintInfo *constrs;
9720 : int numConstrs;
9721 : int i_tableoid;
9722 : int i_oid;
9723 : int i_conrelid;
9724 : int i_conname;
9725 : int i_consrc;
9726 : int i_conislocal;
9727 :
9728 86 : pg_log_info("finding invalid not-null constraints");
9729 :
9730 86 : resetPQExpBuffer(q);
9731 86 : appendPQExpBuffer(q,
9732 : "SELECT c.tableoid, c.oid, conrelid, conname, "
9733 : "pg_catalog.pg_get_constraintdef(c.oid) AS consrc, "
9734 : "conislocal, convalidated "
9735 : "FROM unnest('%s'::pg_catalog.oid[]) AS src(conoid)\n"
9736 : "JOIN pg_catalog.pg_constraint c ON (src.conoid = c.oid)\n"
9737 : "ORDER BY c.conrelid, c.conname",
9738 86 : invalidnotnulloids->data);
9739 :
9740 86 : res = ExecuteSqlQuery(fout, q->data, PGRES_TUPLES_OK);
9741 :
9742 86 : numConstrs = PQntuples(res);
9743 86 : constrs = (ConstraintInfo *) pg_malloc(numConstrs * sizeof(ConstraintInfo));
9744 :
9745 86 : i_tableoid = PQfnumber(res, "tableoid");
9746 86 : i_oid = PQfnumber(res, "oid");
9747 86 : i_conrelid = PQfnumber(res, "conrelid");
9748 86 : i_conname = PQfnumber(res, "conname");
9749 86 : i_consrc = PQfnumber(res, "consrc");
9750 86 : i_conislocal = PQfnumber(res, "conislocal");
9751 :
9752 : /* As above, this loop iterates once per table, not once per row */
9753 86 : curtblindx = -1;
9754 232 : for (int j = 0; j < numConstrs;)
9755 : {
9756 146 : Oid conrelid = atooid(PQgetvalue(res, j, i_conrelid));
9757 146 : TableInfo *tbinfo = NULL;
9758 : int numcons;
9759 :
9760 : /* Count rows for this table */
9761 146 : for (numcons = 1; numcons < numConstrs - j; numcons++)
9762 60 : if (atooid(PQgetvalue(res, j + numcons, i_conrelid)) != conrelid)
9763 60 : break;
9764 :
9765 : /*
9766 : * Locate the associated TableInfo; we rely on tblinfo[] being in
9767 : * OID order.
9768 : */
9769 28760 : while (++curtblindx < numTables)
9770 : {
9771 28760 : tbinfo = &tblinfo[curtblindx];
9772 28760 : if (tbinfo->dobj.catId.oid == conrelid)
9773 146 : break;
9774 : }
9775 146 : if (curtblindx >= numTables)
9776 0 : pg_fatal("unrecognized table OID %u", conrelid);
9777 :
9778 292 : for (int c = 0; c < numcons; c++, j++)
9779 : {
9780 146 : constrs[j].dobj.objType = DO_CONSTRAINT;
9781 146 : constrs[j].dobj.catId.tableoid = atooid(PQgetvalue(res, j, i_tableoid));
9782 146 : constrs[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
9783 146 : AssignDumpId(&constrs[j].dobj);
9784 146 : constrs[j].dobj.name = pg_strdup(PQgetvalue(res, j, i_conname));
9785 146 : constrs[j].dobj.namespace = tbinfo->dobj.namespace;
9786 146 : constrs[j].contable = tbinfo;
9787 146 : constrs[j].condomain = NULL;
9788 146 : constrs[j].contype = 'n';
9789 146 : constrs[j].condef = pg_strdup(PQgetvalue(res, j, i_consrc));
9790 146 : constrs[j].confrelid = InvalidOid;
9791 146 : constrs[j].conindex = 0;
9792 146 : constrs[j].condeferrable = false;
9793 146 : constrs[j].condeferred = false;
9794 146 : constrs[j].conislocal = (PQgetvalue(res, j, i_conislocal)[0] == 't');
9795 :
9796 : /*
9797 : * All invalid not-null constraints must be dumped separately,
9798 : * because CREATE TABLE would not create them as invalid, and
9799 : * also because they must be created after potentially
9800 : * violating data has been loaded.
9801 : */
9802 146 : constrs[j].separate = true;
9803 :
9804 146 : constrs[j].dobj.dump = tbinfo->dobj.dump;
9805 : }
9806 : }
9807 86 : PQclear(res);
9808 : }
9809 :
9810 : /*
9811 : * Get info about table CHECK constraints. This is skipped for a
9812 : * data-only dump, as it is only needed for table schemas.
9813 : */
9814 370 : if (dopt->dumpSchema && checkoids->len > 2)
9815 : {
9816 : ConstraintInfo *constrs;
9817 : int numConstrs;
9818 : int i_tableoid;
9819 : int i_oid;
9820 : int i_conrelid;
9821 : int i_conname;
9822 : int i_consrc;
9823 : int i_conislocal;
9824 : int i_convalidated;
9825 :
9826 134 : pg_log_info("finding table check constraints");
9827 :
9828 134 : resetPQExpBuffer(q);
9829 134 : appendPQExpBuffer(q,
9830 : "SELECT c.tableoid, c.oid, conrelid, conname, "
9831 : "pg_catalog.pg_get_constraintdef(c.oid) AS consrc, "
9832 : "conislocal, convalidated "
9833 : "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
9834 : "JOIN pg_catalog.pg_constraint c ON (src.tbloid = c.conrelid)\n"
9835 : "WHERE contype = 'c' "
9836 : "ORDER BY c.conrelid, c.conname",
9837 : checkoids->data);
9838 :
9839 134 : res = ExecuteSqlQuery(fout, q->data, PGRES_TUPLES_OK);
9840 :
9841 134 : numConstrs = PQntuples(res);
9842 134 : constrs = (ConstraintInfo *) pg_malloc(numConstrs * sizeof(ConstraintInfo));
9843 :
9844 134 : i_tableoid = PQfnumber(res, "tableoid");
9845 134 : i_oid = PQfnumber(res, "oid");
9846 134 : i_conrelid = PQfnumber(res, "conrelid");
9847 134 : i_conname = PQfnumber(res, "conname");
9848 134 : i_consrc = PQfnumber(res, "consrc");
9849 134 : i_conislocal = PQfnumber(res, "conislocal");
9850 134 : i_convalidated = PQfnumber(res, "convalidated");
9851 :
9852 : /* As above, this loop iterates once per table, not once per row */
9853 134 : curtblindx = -1;
9854 1204 : for (int j = 0; j < numConstrs;)
9855 : {
9856 1070 : Oid conrelid = atooid(PQgetvalue(res, j, i_conrelid));
9857 1070 : TableInfo *tbinfo = NULL;
9858 : int numcons;
9859 :
9860 : /* Count rows for this table */
9861 1364 : for (numcons = 1; numcons < numConstrs - j; numcons++)
9862 1230 : if (atooid(PQgetvalue(res, j + numcons, i_conrelid)) != conrelid)
9863 936 : break;
9864 :
9865 : /*
9866 : * Locate the associated TableInfo; we rely on tblinfo[] being in
9867 : * OID order.
9868 : */
9869 39322 : while (++curtblindx < numTables)
9870 : {
9871 39322 : tbinfo = &tblinfo[curtblindx];
9872 39322 : if (tbinfo->dobj.catId.oid == conrelid)
9873 1070 : break;
9874 : }
9875 1070 : if (curtblindx >= numTables)
9876 0 : pg_fatal("unrecognized table OID %u", conrelid);
9877 :
9878 1070 : if (numcons != tbinfo->ncheck)
9879 : {
9880 0 : pg_log_error(ngettext("expected %d check constraint on table \"%s\" but found %d",
9881 : "expected %d check constraints on table \"%s\" but found %d",
9882 : tbinfo->ncheck),
9883 : tbinfo->ncheck, tbinfo->dobj.name, numcons);
9884 0 : pg_log_error_hint("The system catalogs might be corrupted.");
9885 0 : exit_nicely(1);
9886 : }
9887 :
9888 1070 : tbinfo->checkexprs = constrs + j;
9889 :
9890 2434 : for (int c = 0; c < numcons; c++, j++)
9891 : {
9892 1364 : bool validated = PQgetvalue(res, j, i_convalidated)[0] == 't';
9893 :
9894 1364 : constrs[j].dobj.objType = DO_CONSTRAINT;
9895 1364 : constrs[j].dobj.catId.tableoid = atooid(PQgetvalue(res, j, i_tableoid));
9896 1364 : constrs[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
9897 1364 : AssignDumpId(&constrs[j].dobj);
9898 1364 : constrs[j].dobj.name = pg_strdup(PQgetvalue(res, j, i_conname));
9899 1364 : constrs[j].dobj.namespace = tbinfo->dobj.namespace;
9900 1364 : constrs[j].contable = tbinfo;
9901 1364 : constrs[j].condomain = NULL;
9902 1364 : constrs[j].contype = 'c';
9903 1364 : constrs[j].condef = pg_strdup(PQgetvalue(res, j, i_consrc));
9904 1364 : constrs[j].confrelid = InvalidOid;
9905 1364 : constrs[j].conindex = 0;
9906 1364 : constrs[j].condeferrable = false;
9907 1364 : constrs[j].condeferred = false;
9908 1364 : constrs[j].conislocal = (PQgetvalue(res, j, i_conislocal)[0] == 't');
9909 :
9910 : /*
9911 : * An unvalidated constraint needs to be dumped separately, so
9912 : * that potentially-violating existing data is loaded before
9913 : * the constraint.
9914 : */
9915 1364 : constrs[j].separate = !validated;
9916 :
9917 1364 : constrs[j].dobj.dump = tbinfo->dobj.dump;
9918 :
9919 : /*
9920 : * Mark the constraint as needing to appear before the table
9921 : * --- this is so that any other dependencies of the
9922 : * constraint will be emitted before we try to create the
9923 : * table. If the constraint is to be dumped separately, it
9924 : * will be dumped after data is loaded anyway, so don't do it.
9925 : * (There's an automatic dependency in the opposite direction
9926 : * anyway, so don't need to add one manually here.)
9927 : */
9928 1364 : if (!constrs[j].separate)
9929 1234 : addObjectDependency(&tbinfo->dobj,
9930 1234 : constrs[j].dobj.dumpId);
9931 :
9932 : /*
9933 : * We will detect later whether the constraint must be split
9934 : * out from the table definition.
9935 : */
9936 : }
9937 : }
9938 :
9939 134 : PQclear(res);
9940 : }
9941 :
9942 370 : destroyPQExpBuffer(q);
9943 370 : destroyPQExpBuffer(tbloids);
9944 370 : destroyPQExpBuffer(checkoids);
9945 370 : }
9946 :
9947 : /*
9948 : * Based on the getTableAttrs query's row corresponding to one column, set
9949 : * the name and flags to handle a not-null constraint for that column in
9950 : * the tbinfo struct.
9951 : *
9952 : * Result row 'r' is for tbinfo's attribute 'j'.
9953 : *
9954 : * There are four possibilities:
9955 : * 1) the column has no not-null constraints. In that case, ->notnull_constrs
9956 : * (the constraint name) remains NULL.
9957 : * 2) The column has a constraint with no name (this is the case when
9958 : * constraints come from pre-18 servers). In this case, ->notnull_constrs
9959 : * is set to the empty string; dumpTableSchema will print just "NOT NULL".
9960 : * 3) The column has an invalid not-null constraint. This must be treated
9961 : * as a separate object (because it must be created after the table data
9962 : * is loaded). So we add its OID to invalidnotnulloids for processing
9963 : * elsewhere and do nothing further with it here. We distinguish this
9964 : * case because the "notnull_invalidoid" column has been set to a non-NULL
9965 : * value, which is the constraint OID. Valid constraints have a null OID.
9966 : * 4) The column has a constraint with a known name; in that case
9967 : * notnull_constrs carries that name and dumpTableSchema will print
9968 : * "CONSTRAINT the_name NOT NULL". However, if the name is the default
9969 : * (table_column_not_null) and there's no comment on the constraint,
9970 : * there's no need to print that name in the dump, so notnull_constrs
9971 : * is set to the empty string and it behaves as case 2.
9972 : *
9973 : * In a child table that inherits from a parent already containing NOT NULL
9974 : * constraints and the columns in the child don't have their own NOT NULL
9975 : * declarations, we suppress printing constraints in the child: the
9976 : * constraints are acquired at the point where the child is attached to the
9977 : * parent. This is tracked in ->notnull_islocal; for servers pre-18 this is
9978 : * set not here but in flagInhAttrs. That flag is also used when the
9979 : * constraint was validated in a child but all its parent have it as NOT
9980 : * VALID.
9981 : *
9982 : * Any of these constraints might have the NO INHERIT bit. If so we set
9983 : * ->notnull_noinh and NO INHERIT will be printed by dumpTableSchema.
9984 : *
9985 : * In case 4 above, the name comparison is a bit of a hack; it actually fails
9986 : * to do the right thing in all but the trivial case. However, the downside
9987 : * of getting it wrong is simply that the name is printed rather than
9988 : * suppressed, so it's not a big deal.
9989 : *
9990 : * invalidnotnulloids is expected to be given as NULL; if any invalid not-null
9991 : * constraints are found, it is initialized and filled with the array of
9992 : * OIDs of such constraints, for later processing.
9993 : */
9994 : static void
9995 50664 : determineNotNullFlags(Archive *fout, PGresult *res, int r,
9996 : TableInfo *tbinfo, int j,
9997 : int i_notnull_name,
9998 : int i_notnull_comment,
9999 : int i_notnull_invalidoid,
10000 : int i_notnull_noinherit,
10001 : int i_notnull_islocal,
10002 : PQExpBuffer *invalidnotnulloids)
10003 : {
10004 50664 : DumpOptions *dopt = fout->dopt;
10005 :
10006 : /*
10007 : * If this not-null constraint is not valid, list its OID in
10008 : * invalidnotnulloids and do nothing further. It'll be processed
10009 : * elsewhere later.
10010 : *
10011 : * Because invalid not-null constraints are rare, we don't want to malloc
10012 : * invalidnotnulloids until we're sure we're going it need it, which
10013 : * happens here.
10014 : */
10015 50664 : if (!PQgetisnull(res, r, i_notnull_invalidoid))
10016 : {
10017 158 : char *constroid = PQgetvalue(res, r, i_notnull_invalidoid);
10018 :
10019 158 : if (*invalidnotnulloids == NULL)
10020 : {
10021 98 : *invalidnotnulloids = createPQExpBuffer();
10022 98 : appendPQExpBufferChar(*invalidnotnulloids, '{');
10023 98 : appendPQExpBufferStr(*invalidnotnulloids, constroid);
10024 : }
10025 : else
10026 60 : appendPQExpBuffer(*invalidnotnulloids, ",%s", constroid);
10027 :
10028 : /*
10029 : * Track when a parent constraint is invalid for the cases where a
10030 : * child constraint has been validated independenly.
10031 : */
10032 158 : tbinfo->notnull_invalid[j] = true;
10033 :
10034 : /* nothing else to do */
10035 158 : tbinfo->notnull_constrs[j] = NULL;
10036 158 : return;
10037 : }
10038 :
10039 : /*
10040 : * notnull_noinh is straight from the query result. notnull_islocal also,
10041 : * though flagInhAttrs may change that one later.
10042 : */
10043 50506 : tbinfo->notnull_noinh[j] = PQgetvalue(res, r, i_notnull_noinherit)[0] == 't';
10044 50506 : tbinfo->notnull_islocal[j] = PQgetvalue(res, r, i_notnull_islocal)[0] == 't';
10045 50506 : tbinfo->notnull_invalid[j] = false;
10046 :
10047 : /*
10048 : * Determine a constraint name to use. If the column is not marked not-
10049 : * null, we set NULL which cues ... to do nothing. An empty string says
10050 : * to print an unnamed NOT NULL, and anything else is a constraint name to
10051 : * use.
10052 : */
10053 50506 : if (fout->remoteVersion < 180000)
10054 : {
10055 : /*
10056 : * < 18 doesn't have not-null names, so an unnamed constraint is
10057 : * sufficient.
10058 : */
10059 0 : if (PQgetisnull(res, r, i_notnull_name))
10060 0 : tbinfo->notnull_constrs[j] = NULL;
10061 : else
10062 0 : tbinfo->notnull_constrs[j] = "";
10063 : }
10064 : else
10065 : {
10066 50506 : if (PQgetisnull(res, r, i_notnull_name))
10067 45002 : tbinfo->notnull_constrs[j] = NULL;
10068 : else
10069 : {
10070 : /*
10071 : * In binary upgrade of inheritance child tables, must have a
10072 : * constraint name that we can UPDATE later; same if there's a
10073 : * comment on the constraint.
10074 : */
10075 5504 : if ((dopt->binary_upgrade &&
10076 652 : !tbinfo->ispartition &&
10077 6000 : !tbinfo->notnull_islocal) ||
10078 5504 : !PQgetisnull(res, r, i_notnull_comment))
10079 : {
10080 108 : tbinfo->notnull_constrs[j] =
10081 108 : pstrdup(PQgetvalue(res, r, i_notnull_name));
10082 : }
10083 : else
10084 : {
10085 : char *default_name;
10086 :
10087 : /* XXX should match ChooseConstraintName better */
10088 5396 : default_name = psprintf("%s_%s_not_null", tbinfo->dobj.name,
10089 5396 : tbinfo->attnames[j]);
10090 5396 : if (strcmp(default_name,
10091 5396 : PQgetvalue(res, r, i_notnull_name)) == 0)
10092 3524 : tbinfo->notnull_constrs[j] = "";
10093 : else
10094 : {
10095 1872 : tbinfo->notnull_constrs[j] =
10096 1872 : pstrdup(PQgetvalue(res, r, i_notnull_name));
10097 : }
10098 5396 : free(default_name);
10099 : }
10100 : }
10101 : }
10102 : }
10103 :
10104 : /*
10105 : * Test whether a column should be printed as part of table's CREATE TABLE.
10106 : * Column number is zero-based.
10107 : *
10108 : * Normally this is always true, but it's false for dropped columns, as well
10109 : * as those that were inherited without any local definition. (If we print
10110 : * such a column it will mistakenly get pg_attribute.attislocal set to true.)
10111 : * For partitions, it's always true, because we want the partitions to be
10112 : * created independently and ATTACH PARTITION used afterwards.
10113 : *
10114 : * In binary_upgrade mode, we must print all columns and fix the attislocal/
10115 : * attisdropped state later, so as to keep control of the physical column
10116 : * order.
10117 : *
10118 : * This function exists because there are scattered nonobvious places that
10119 : * must be kept in sync with this decision.
10120 : */
10121 : bool
10122 82424 : shouldPrintColumn(const DumpOptions *dopt, const TableInfo *tbinfo, int colno)
10123 : {
10124 82424 : if (dopt->binary_upgrade)
10125 12436 : return true;
10126 69988 : if (tbinfo->attisdropped[colno])
10127 1476 : return false;
10128 68512 : return (tbinfo->attislocal[colno] || tbinfo->ispartition);
10129 : }
10130 :
10131 :
10132 : /*
10133 : * getTSParsers:
10134 : * get information about all text search parsers in the system catalogs
10135 : */
10136 : void
10137 370 : getTSParsers(Archive *fout)
10138 : {
10139 : PGresult *res;
10140 : int ntups;
10141 : int i;
10142 : PQExpBuffer query;
10143 : TSParserInfo *prsinfo;
10144 : int i_tableoid;
10145 : int i_oid;
10146 : int i_prsname;
10147 : int i_prsnamespace;
10148 : int i_prsstart;
10149 : int i_prstoken;
10150 : int i_prsend;
10151 : int i_prsheadline;
10152 : int i_prslextype;
10153 :
10154 370 : query = createPQExpBuffer();
10155 :
10156 : /*
10157 : * find all text search objects, including builtin ones; we filter out
10158 : * system-defined objects at dump-out time.
10159 : */
10160 :
10161 370 : appendPQExpBufferStr(query, "SELECT tableoid, oid, prsname, prsnamespace, "
10162 : "prsstart::oid, prstoken::oid, "
10163 : "prsend::oid, prsheadline::oid, prslextype::oid "
10164 : "FROM pg_ts_parser");
10165 :
10166 370 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
10167 :
10168 370 : ntups = PQntuples(res);
10169 :
10170 370 : prsinfo = (TSParserInfo *) pg_malloc(ntups * sizeof(TSParserInfo));
10171 :
10172 370 : i_tableoid = PQfnumber(res, "tableoid");
10173 370 : i_oid = PQfnumber(res, "oid");
10174 370 : i_prsname = PQfnumber(res, "prsname");
10175 370 : i_prsnamespace = PQfnumber(res, "prsnamespace");
10176 370 : i_prsstart = PQfnumber(res, "prsstart");
10177 370 : i_prstoken = PQfnumber(res, "prstoken");
10178 370 : i_prsend = PQfnumber(res, "prsend");
10179 370 : i_prsheadline = PQfnumber(res, "prsheadline");
10180 370 : i_prslextype = PQfnumber(res, "prslextype");
10181 :
10182 842 : for (i = 0; i < ntups; i++)
10183 : {
10184 472 : prsinfo[i].dobj.objType = DO_TSPARSER;
10185 472 : prsinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
10186 472 : prsinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
10187 472 : AssignDumpId(&prsinfo[i].dobj);
10188 472 : prsinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_prsname));
10189 944 : prsinfo[i].dobj.namespace =
10190 472 : findNamespace(atooid(PQgetvalue(res, i, i_prsnamespace)));
10191 472 : prsinfo[i].prsstart = atooid(PQgetvalue(res, i, i_prsstart));
10192 472 : prsinfo[i].prstoken = atooid(PQgetvalue(res, i, i_prstoken));
10193 472 : prsinfo[i].prsend = atooid(PQgetvalue(res, i, i_prsend));
10194 472 : prsinfo[i].prsheadline = atooid(PQgetvalue(res, i, i_prsheadline));
10195 472 : prsinfo[i].prslextype = atooid(PQgetvalue(res, i, i_prslextype));
10196 :
10197 : /* Decide whether we want to dump it */
10198 472 : selectDumpableObject(&(prsinfo[i].dobj), fout);
10199 : }
10200 :
10201 370 : PQclear(res);
10202 :
10203 370 : destroyPQExpBuffer(query);
10204 370 : }
10205 :
10206 : /*
10207 : * getTSDictionaries:
10208 : * get information about all text search dictionaries in the system catalogs
10209 : */
10210 : void
10211 370 : getTSDictionaries(Archive *fout)
10212 : {
10213 : PGresult *res;
10214 : int ntups;
10215 : int i;
10216 : PQExpBuffer query;
10217 : TSDictInfo *dictinfo;
10218 : int i_tableoid;
10219 : int i_oid;
10220 : int i_dictname;
10221 : int i_dictnamespace;
10222 : int i_dictowner;
10223 : int i_dicttemplate;
10224 : int i_dictinitoption;
10225 :
10226 370 : query = createPQExpBuffer();
10227 :
10228 370 : appendPQExpBufferStr(query, "SELECT tableoid, oid, dictname, "
10229 : "dictnamespace, dictowner, "
10230 : "dicttemplate, dictinitoption "
10231 : "FROM pg_ts_dict");
10232 :
10233 370 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
10234 :
10235 370 : ntups = PQntuples(res);
10236 :
10237 370 : dictinfo = (TSDictInfo *) pg_malloc(ntups * sizeof(TSDictInfo));
10238 :
10239 370 : i_tableoid = PQfnumber(res, "tableoid");
10240 370 : i_oid = PQfnumber(res, "oid");
10241 370 : i_dictname = PQfnumber(res, "dictname");
10242 370 : i_dictnamespace = PQfnumber(res, "dictnamespace");
10243 370 : i_dictowner = PQfnumber(res, "dictowner");
10244 370 : i_dictinitoption = PQfnumber(res, "dictinitoption");
10245 370 : i_dicttemplate = PQfnumber(res, "dicttemplate");
10246 :
10247 11698 : for (i = 0; i < ntups; i++)
10248 : {
10249 11328 : dictinfo[i].dobj.objType = DO_TSDICT;
10250 11328 : dictinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
10251 11328 : dictinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
10252 11328 : AssignDumpId(&dictinfo[i].dobj);
10253 11328 : dictinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_dictname));
10254 22656 : dictinfo[i].dobj.namespace =
10255 11328 : findNamespace(atooid(PQgetvalue(res, i, i_dictnamespace)));
10256 11328 : dictinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_dictowner));
10257 11328 : dictinfo[i].dicttemplate = atooid(PQgetvalue(res, i, i_dicttemplate));
10258 11328 : if (PQgetisnull(res, i, i_dictinitoption))
10259 472 : dictinfo[i].dictinitoption = NULL;
10260 : else
10261 10856 : dictinfo[i].dictinitoption = pg_strdup(PQgetvalue(res, i, i_dictinitoption));
10262 :
10263 : /* Decide whether we want to dump it */
10264 11328 : selectDumpableObject(&(dictinfo[i].dobj), fout);
10265 : }
10266 :
10267 370 : PQclear(res);
10268 :
10269 370 : destroyPQExpBuffer(query);
10270 370 : }
10271 :
10272 : /*
10273 : * getTSTemplates:
10274 : * get information about all text search templates in the system catalogs
10275 : */
10276 : void
10277 370 : getTSTemplates(Archive *fout)
10278 : {
10279 : PGresult *res;
10280 : int ntups;
10281 : int i;
10282 : PQExpBuffer query;
10283 : TSTemplateInfo *tmplinfo;
10284 : int i_tableoid;
10285 : int i_oid;
10286 : int i_tmplname;
10287 : int i_tmplnamespace;
10288 : int i_tmplinit;
10289 : int i_tmpllexize;
10290 :
10291 370 : query = createPQExpBuffer();
10292 :
10293 370 : appendPQExpBufferStr(query, "SELECT tableoid, oid, tmplname, "
10294 : "tmplnamespace, tmplinit::oid, tmpllexize::oid "
10295 : "FROM pg_ts_template");
10296 :
10297 370 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
10298 :
10299 370 : ntups = PQntuples(res);
10300 :
10301 370 : tmplinfo = (TSTemplateInfo *) pg_malloc(ntups * sizeof(TSTemplateInfo));
10302 :
10303 370 : i_tableoid = PQfnumber(res, "tableoid");
10304 370 : i_oid = PQfnumber(res, "oid");
10305 370 : i_tmplname = PQfnumber(res, "tmplname");
10306 370 : i_tmplnamespace = PQfnumber(res, "tmplnamespace");
10307 370 : i_tmplinit = PQfnumber(res, "tmplinit");
10308 370 : i_tmpllexize = PQfnumber(res, "tmpllexize");
10309 :
10310 2322 : for (i = 0; i < ntups; i++)
10311 : {
10312 1952 : tmplinfo[i].dobj.objType = DO_TSTEMPLATE;
10313 1952 : tmplinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
10314 1952 : tmplinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
10315 1952 : AssignDumpId(&tmplinfo[i].dobj);
10316 1952 : tmplinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_tmplname));
10317 3904 : tmplinfo[i].dobj.namespace =
10318 1952 : findNamespace(atooid(PQgetvalue(res, i, i_tmplnamespace)));
10319 1952 : tmplinfo[i].tmplinit = atooid(PQgetvalue(res, i, i_tmplinit));
10320 1952 : tmplinfo[i].tmpllexize = atooid(PQgetvalue(res, i, i_tmpllexize));
10321 :
10322 : /* Decide whether we want to dump it */
10323 1952 : selectDumpableObject(&(tmplinfo[i].dobj), fout);
10324 : }
10325 :
10326 370 : PQclear(res);
10327 :
10328 370 : destroyPQExpBuffer(query);
10329 370 : }
10330 :
10331 : /*
10332 : * getTSConfigurations:
10333 : * get information about all text search configurations
10334 : */
10335 : void
10336 370 : getTSConfigurations(Archive *fout)
10337 : {
10338 : PGresult *res;
10339 : int ntups;
10340 : int i;
10341 : PQExpBuffer query;
10342 : TSConfigInfo *cfginfo;
10343 : int i_tableoid;
10344 : int i_oid;
10345 : int i_cfgname;
10346 : int i_cfgnamespace;
10347 : int i_cfgowner;
10348 : int i_cfgparser;
10349 :
10350 370 : query = createPQExpBuffer();
10351 :
10352 370 : appendPQExpBufferStr(query, "SELECT tableoid, oid, cfgname, "
10353 : "cfgnamespace, cfgowner, cfgparser "
10354 : "FROM pg_ts_config");
10355 :
10356 370 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
10357 :
10358 370 : ntups = PQntuples(res);
10359 :
10360 370 : cfginfo = (TSConfigInfo *) pg_malloc(ntups * sizeof(TSConfigInfo));
10361 :
10362 370 : i_tableoid = PQfnumber(res, "tableoid");
10363 370 : i_oid = PQfnumber(res, "oid");
10364 370 : i_cfgname = PQfnumber(res, "cfgname");
10365 370 : i_cfgnamespace = PQfnumber(res, "cfgnamespace");
10366 370 : i_cfgowner = PQfnumber(res, "cfgowner");
10367 370 : i_cfgparser = PQfnumber(res, "cfgparser");
10368 :
10369 11628 : for (i = 0; i < ntups; i++)
10370 : {
10371 11258 : cfginfo[i].dobj.objType = DO_TSCONFIG;
10372 11258 : cfginfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
10373 11258 : cfginfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
10374 11258 : AssignDumpId(&cfginfo[i].dobj);
10375 11258 : cfginfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_cfgname));
10376 22516 : cfginfo[i].dobj.namespace =
10377 11258 : findNamespace(atooid(PQgetvalue(res, i, i_cfgnamespace)));
10378 11258 : cfginfo[i].rolname = getRoleName(PQgetvalue(res, i, i_cfgowner));
10379 11258 : cfginfo[i].cfgparser = atooid(PQgetvalue(res, i, i_cfgparser));
10380 :
10381 : /* Decide whether we want to dump it */
10382 11258 : selectDumpableObject(&(cfginfo[i].dobj), fout);
10383 : }
10384 :
10385 370 : PQclear(res);
10386 :
10387 370 : destroyPQExpBuffer(query);
10388 370 : }
10389 :
10390 : /*
10391 : * getForeignDataWrappers:
10392 : * get information about all foreign-data wrappers in the system catalogs
10393 : */
10394 : void
10395 370 : getForeignDataWrappers(Archive *fout)
10396 : {
10397 : PGresult *res;
10398 : int ntups;
10399 : int i;
10400 : PQExpBuffer query;
10401 : FdwInfo *fdwinfo;
10402 : int i_tableoid;
10403 : int i_oid;
10404 : int i_fdwname;
10405 : int i_fdwowner;
10406 : int i_fdwhandler;
10407 : int i_fdwvalidator;
10408 : int i_fdwacl;
10409 : int i_acldefault;
10410 : int i_fdwoptions;
10411 :
10412 370 : query = createPQExpBuffer();
10413 :
10414 370 : appendPQExpBufferStr(query, "SELECT tableoid, oid, fdwname, "
10415 : "fdwowner, "
10416 : "fdwhandler::pg_catalog.regproc, "
10417 : "fdwvalidator::pg_catalog.regproc, "
10418 : "fdwacl, "
10419 : "acldefault('F', fdwowner) AS acldefault, "
10420 : "array_to_string(ARRAY("
10421 : "SELECT quote_ident(option_name) || ' ' || "
10422 : "quote_literal(option_value) "
10423 : "FROM pg_options_to_table(fdwoptions) "
10424 : "ORDER BY option_name"
10425 : "), E',\n ') AS fdwoptions "
10426 : "FROM pg_foreign_data_wrapper");
10427 :
10428 370 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
10429 :
10430 370 : ntups = PQntuples(res);
10431 :
10432 370 : fdwinfo = (FdwInfo *) pg_malloc(ntups * sizeof(FdwInfo));
10433 :
10434 370 : i_tableoid = PQfnumber(res, "tableoid");
10435 370 : i_oid = PQfnumber(res, "oid");
10436 370 : i_fdwname = PQfnumber(res, "fdwname");
10437 370 : i_fdwowner = PQfnumber(res, "fdwowner");
10438 370 : i_fdwhandler = PQfnumber(res, "fdwhandler");
10439 370 : i_fdwvalidator = PQfnumber(res, "fdwvalidator");
10440 370 : i_fdwacl = PQfnumber(res, "fdwacl");
10441 370 : i_acldefault = PQfnumber(res, "acldefault");
10442 370 : i_fdwoptions = PQfnumber(res, "fdwoptions");
10443 :
10444 524 : for (i = 0; i < ntups; i++)
10445 : {
10446 154 : fdwinfo[i].dobj.objType = DO_FDW;
10447 154 : fdwinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
10448 154 : fdwinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
10449 154 : AssignDumpId(&fdwinfo[i].dobj);
10450 154 : fdwinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_fdwname));
10451 154 : fdwinfo[i].dobj.namespace = NULL;
10452 154 : fdwinfo[i].dacl.acl = pg_strdup(PQgetvalue(res, i, i_fdwacl));
10453 154 : fdwinfo[i].dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault));
10454 154 : fdwinfo[i].dacl.privtype = 0;
10455 154 : fdwinfo[i].dacl.initprivs = NULL;
10456 154 : fdwinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_fdwowner));
10457 154 : fdwinfo[i].fdwhandler = pg_strdup(PQgetvalue(res, i, i_fdwhandler));
10458 154 : fdwinfo[i].fdwvalidator = pg_strdup(PQgetvalue(res, i, i_fdwvalidator));
10459 154 : fdwinfo[i].fdwoptions = pg_strdup(PQgetvalue(res, i, i_fdwoptions));
10460 :
10461 : /* Decide whether we want to dump it */
10462 154 : selectDumpableObject(&(fdwinfo[i].dobj), fout);
10463 :
10464 : /* Mark whether FDW has an ACL */
10465 154 : if (!PQgetisnull(res, i, i_fdwacl))
10466 102 : fdwinfo[i].dobj.components |= DUMP_COMPONENT_ACL;
10467 : }
10468 :
10469 370 : PQclear(res);
10470 :
10471 370 : destroyPQExpBuffer(query);
10472 370 : }
10473 :
10474 : /*
10475 : * getForeignServers:
10476 : * get information about all foreign servers in the system catalogs
10477 : */
10478 : void
10479 370 : getForeignServers(Archive *fout)
10480 : {
10481 : PGresult *res;
10482 : int ntups;
10483 : int i;
10484 : PQExpBuffer query;
10485 : ForeignServerInfo *srvinfo;
10486 : int i_tableoid;
10487 : int i_oid;
10488 : int i_srvname;
10489 : int i_srvowner;
10490 : int i_srvfdw;
10491 : int i_srvtype;
10492 : int i_srvversion;
10493 : int i_srvacl;
10494 : int i_acldefault;
10495 : int i_srvoptions;
10496 :
10497 370 : query = createPQExpBuffer();
10498 :
10499 370 : appendPQExpBufferStr(query, "SELECT tableoid, oid, srvname, "
10500 : "srvowner, "
10501 : "srvfdw, srvtype, srvversion, srvacl, "
10502 : "acldefault('S', srvowner) AS acldefault, "
10503 : "array_to_string(ARRAY("
10504 : "SELECT quote_ident(option_name) || ' ' || "
10505 : "quote_literal(option_value) "
10506 : "FROM pg_options_to_table(srvoptions) "
10507 : "ORDER BY option_name"
10508 : "), E',\n ') AS srvoptions "
10509 : "FROM pg_foreign_server");
10510 :
10511 370 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
10512 :
10513 370 : ntups = PQntuples(res);
10514 :
10515 370 : srvinfo = (ForeignServerInfo *) pg_malloc(ntups * sizeof(ForeignServerInfo));
10516 :
10517 370 : i_tableoid = PQfnumber(res, "tableoid");
10518 370 : i_oid = PQfnumber(res, "oid");
10519 370 : i_srvname = PQfnumber(res, "srvname");
10520 370 : i_srvowner = PQfnumber(res, "srvowner");
10521 370 : i_srvfdw = PQfnumber(res, "srvfdw");
10522 370 : i_srvtype = PQfnumber(res, "srvtype");
10523 370 : i_srvversion = PQfnumber(res, "srvversion");
10524 370 : i_srvacl = PQfnumber(res, "srvacl");
10525 370 : i_acldefault = PQfnumber(res, "acldefault");
10526 370 : i_srvoptions = PQfnumber(res, "srvoptions");
10527 :
10528 532 : for (i = 0; i < ntups; i++)
10529 : {
10530 162 : srvinfo[i].dobj.objType = DO_FOREIGN_SERVER;
10531 162 : srvinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
10532 162 : srvinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
10533 162 : AssignDumpId(&srvinfo[i].dobj);
10534 162 : srvinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_srvname));
10535 162 : srvinfo[i].dobj.namespace = NULL;
10536 162 : srvinfo[i].dacl.acl = pg_strdup(PQgetvalue(res, i, i_srvacl));
10537 162 : srvinfo[i].dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault));
10538 162 : srvinfo[i].dacl.privtype = 0;
10539 162 : srvinfo[i].dacl.initprivs = NULL;
10540 162 : srvinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_srvowner));
10541 162 : srvinfo[i].srvfdw = atooid(PQgetvalue(res, i, i_srvfdw));
10542 162 : srvinfo[i].srvtype = pg_strdup(PQgetvalue(res, i, i_srvtype));
10543 162 : srvinfo[i].srvversion = pg_strdup(PQgetvalue(res, i, i_srvversion));
10544 162 : srvinfo[i].srvoptions = pg_strdup(PQgetvalue(res, i, i_srvoptions));
10545 :
10546 : /* Decide whether we want to dump it */
10547 162 : selectDumpableObject(&(srvinfo[i].dobj), fout);
10548 :
10549 : /* Servers have user mappings */
10550 162 : srvinfo[i].dobj.components |= DUMP_COMPONENT_USERMAP;
10551 :
10552 : /* Mark whether server has an ACL */
10553 162 : if (!PQgetisnull(res, i, i_srvacl))
10554 102 : srvinfo[i].dobj.components |= DUMP_COMPONENT_ACL;
10555 : }
10556 :
10557 370 : PQclear(res);
10558 :
10559 370 : destroyPQExpBuffer(query);
10560 370 : }
10561 :
10562 : /*
10563 : * getDefaultACLs:
10564 : * get information about all default ACL information in the system catalogs
10565 : */
10566 : void
10567 370 : getDefaultACLs(Archive *fout)
10568 : {
10569 370 : DumpOptions *dopt = fout->dopt;
10570 : DefaultACLInfo *daclinfo;
10571 : PQExpBuffer query;
10572 : PGresult *res;
10573 : int i_oid;
10574 : int i_tableoid;
10575 : int i_defaclrole;
10576 : int i_defaclnamespace;
10577 : int i_defaclobjtype;
10578 : int i_defaclacl;
10579 : int i_acldefault;
10580 : int i,
10581 : ntups;
10582 :
10583 370 : query = createPQExpBuffer();
10584 :
10585 : /*
10586 : * Global entries (with defaclnamespace=0) replace the hard-wired default
10587 : * ACL for their object type. We should dump them as deltas from the
10588 : * default ACL, since that will be used as a starting point for
10589 : * interpreting the ALTER DEFAULT PRIVILEGES commands. On the other hand,
10590 : * non-global entries can only add privileges not revoke them. We must
10591 : * dump those as-is (i.e., as deltas from an empty ACL).
10592 : *
10593 : * We can use defaclobjtype as the object type for acldefault(), except
10594 : * for the case of 'S' (DEFACLOBJ_SEQUENCE) which must be converted to
10595 : * 's'.
10596 : */
10597 370 : appendPQExpBufferStr(query,
10598 : "SELECT oid, tableoid, "
10599 : "defaclrole, "
10600 : "defaclnamespace, "
10601 : "defaclobjtype, "
10602 : "defaclacl, "
10603 : "CASE WHEN defaclnamespace = 0 THEN "
10604 : "acldefault(CASE WHEN defaclobjtype = 'S' "
10605 : "THEN 's'::\"char\" ELSE defaclobjtype END, "
10606 : "defaclrole) ELSE '{}' END AS acldefault "
10607 : "FROM pg_default_acl");
10608 :
10609 370 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
10610 :
10611 370 : ntups = PQntuples(res);
10612 :
10613 370 : daclinfo = (DefaultACLInfo *) pg_malloc(ntups * sizeof(DefaultACLInfo));
10614 :
10615 370 : i_oid = PQfnumber(res, "oid");
10616 370 : i_tableoid = PQfnumber(res, "tableoid");
10617 370 : i_defaclrole = PQfnumber(res, "defaclrole");
10618 370 : i_defaclnamespace = PQfnumber(res, "defaclnamespace");
10619 370 : i_defaclobjtype = PQfnumber(res, "defaclobjtype");
10620 370 : i_defaclacl = PQfnumber(res, "defaclacl");
10621 370 : i_acldefault = PQfnumber(res, "acldefault");
10622 :
10623 806 : for (i = 0; i < ntups; i++)
10624 : {
10625 436 : Oid nspid = atooid(PQgetvalue(res, i, i_defaclnamespace));
10626 :
10627 436 : daclinfo[i].dobj.objType = DO_DEFAULT_ACL;
10628 436 : daclinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
10629 436 : daclinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
10630 436 : AssignDumpId(&daclinfo[i].dobj);
10631 : /* cheesy ... is it worth coming up with a better object name? */
10632 436 : daclinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_defaclobjtype));
10633 :
10634 436 : if (nspid != InvalidOid)
10635 204 : daclinfo[i].dobj.namespace = findNamespace(nspid);
10636 : else
10637 232 : daclinfo[i].dobj.namespace = NULL;
10638 :
10639 436 : daclinfo[i].dacl.acl = pg_strdup(PQgetvalue(res, i, i_defaclacl));
10640 436 : daclinfo[i].dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault));
10641 436 : daclinfo[i].dacl.privtype = 0;
10642 436 : daclinfo[i].dacl.initprivs = NULL;
10643 436 : daclinfo[i].defaclrole = getRoleName(PQgetvalue(res, i, i_defaclrole));
10644 436 : daclinfo[i].defaclobjtype = *(PQgetvalue(res, i, i_defaclobjtype));
10645 :
10646 : /* Default ACLs are ACLs, of course */
10647 436 : daclinfo[i].dobj.components |= DUMP_COMPONENT_ACL;
10648 :
10649 : /* Decide whether we want to dump it */
10650 436 : selectDumpableDefaultACL(&(daclinfo[i]), dopt);
10651 : }
10652 :
10653 370 : PQclear(res);
10654 :
10655 370 : destroyPQExpBuffer(query);
10656 370 : }
10657 :
10658 : /*
10659 : * getRoleName -- look up the name of a role, given its OID
10660 : *
10661 : * In current usage, we don't expect failures, so error out for a bad OID.
10662 : */
10663 : static const char *
10664 1174006 : getRoleName(const char *roleoid_str)
10665 : {
10666 1174006 : Oid roleoid = atooid(roleoid_str);
10667 :
10668 : /*
10669 : * Do binary search to find the appropriate item.
10670 : */
10671 1174006 : if (nrolenames > 0)
10672 : {
10673 1174006 : RoleNameItem *low = &rolenames[0];
10674 1174006 : RoleNameItem *high = &rolenames[nrolenames - 1];
10675 :
10676 4695396 : while (low <= high)
10677 : {
10678 4695396 : RoleNameItem *middle = low + (high - low) / 2;
10679 :
10680 4695396 : if (roleoid < middle->roleoid)
10681 3519508 : high = middle - 1;
10682 1175888 : else if (roleoid > middle->roleoid)
10683 1882 : low = middle + 1;
10684 : else
10685 1174006 : return middle->rolename; /* found a match */
10686 : }
10687 : }
10688 :
10689 0 : pg_fatal("role with OID %u does not exist", roleoid);
10690 : return NULL; /* keep compiler quiet */
10691 : }
10692 :
10693 : /*
10694 : * collectRoleNames --
10695 : *
10696 : * Construct a table of all known roles.
10697 : * The table is sorted by OID for speed in lookup.
10698 : */
10699 : static void
10700 372 : collectRoleNames(Archive *fout)
10701 : {
10702 : PGresult *res;
10703 : const char *query;
10704 : int i;
10705 :
10706 372 : query = "SELECT oid, rolname FROM pg_catalog.pg_roles ORDER BY 1";
10707 :
10708 372 : res = ExecuteSqlQuery(fout, query, PGRES_TUPLES_OK);
10709 :
10710 372 : nrolenames = PQntuples(res);
10711 :
10712 372 : rolenames = (RoleNameItem *) pg_malloc(nrolenames * sizeof(RoleNameItem));
10713 :
10714 7342 : for (i = 0; i < nrolenames; i++)
10715 : {
10716 6970 : rolenames[i].roleoid = atooid(PQgetvalue(res, i, 0));
10717 6970 : rolenames[i].rolename = pg_strdup(PQgetvalue(res, i, 1));
10718 : }
10719 :
10720 372 : PQclear(res);
10721 372 : }
10722 :
10723 : /*
10724 : * getAdditionalACLs
10725 : *
10726 : * We have now created all the DumpableObjects, and collected the ACL data
10727 : * that appears in the directly-associated catalog entries. However, there's
10728 : * more ACL-related info to collect. If any of a table's columns have ACLs,
10729 : * we must set the TableInfo's DUMP_COMPONENT_ACL components flag, as well as
10730 : * its hascolumnACLs flag (we won't store the ACLs themselves here, though).
10731 : * Also, in versions having the pg_init_privs catalog, read that and load the
10732 : * information into the relevant DumpableObjects.
10733 : */
10734 : static void
10735 366 : getAdditionalACLs(Archive *fout)
10736 : {
10737 366 : PQExpBuffer query = createPQExpBuffer();
10738 : PGresult *res;
10739 : int ntups,
10740 : i;
10741 :
10742 : /* Check for per-column ACLs */
10743 366 : appendPQExpBufferStr(query,
10744 : "SELECT DISTINCT attrelid FROM pg_attribute "
10745 : "WHERE attacl IS NOT NULL");
10746 :
10747 366 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
10748 :
10749 366 : ntups = PQntuples(res);
10750 1108 : for (i = 0; i < ntups; i++)
10751 : {
10752 742 : Oid relid = atooid(PQgetvalue(res, i, 0));
10753 : TableInfo *tblinfo;
10754 :
10755 742 : tblinfo = findTableByOid(relid);
10756 : /* OK to ignore tables we haven't got a DumpableObject for */
10757 742 : if (tblinfo)
10758 : {
10759 742 : tblinfo->dobj.components |= DUMP_COMPONENT_ACL;
10760 742 : tblinfo->hascolumnACLs = true;
10761 : }
10762 : }
10763 366 : PQclear(res);
10764 :
10765 : /* Fetch initial-privileges data */
10766 366 : if (fout->remoteVersion >= 90600)
10767 : {
10768 366 : printfPQExpBuffer(query,
10769 : "SELECT objoid, classoid, objsubid, privtype, initprivs "
10770 : "FROM pg_init_privs");
10771 :
10772 366 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
10773 :
10774 366 : ntups = PQntuples(res);
10775 86992 : for (i = 0; i < ntups; i++)
10776 : {
10777 86626 : Oid objoid = atooid(PQgetvalue(res, i, 0));
10778 86626 : Oid classoid = atooid(PQgetvalue(res, i, 1));
10779 86626 : int objsubid = atoi(PQgetvalue(res, i, 2));
10780 86626 : char privtype = *(PQgetvalue(res, i, 3));
10781 86626 : char *initprivs = PQgetvalue(res, i, 4);
10782 : CatalogId objId;
10783 : DumpableObject *dobj;
10784 :
10785 86626 : objId.tableoid = classoid;
10786 86626 : objId.oid = objoid;
10787 86626 : dobj = findObjectByCatalogId(objId);
10788 : /* OK to ignore entries we haven't got a DumpableObject for */
10789 86626 : if (dobj)
10790 : {
10791 : /* Cope with sub-object initprivs */
10792 62204 : if (objsubid != 0)
10793 : {
10794 7368 : if (dobj->objType == DO_TABLE)
10795 : {
10796 : /* For a column initprivs, set the table's ACL flags */
10797 7368 : dobj->components |= DUMP_COMPONENT_ACL;
10798 7368 : ((TableInfo *) dobj)->hascolumnACLs = true;
10799 : }
10800 : else
10801 0 : pg_log_warning("unsupported pg_init_privs entry: %u %u %d",
10802 : classoid, objoid, objsubid);
10803 7726 : continue;
10804 : }
10805 :
10806 : /*
10807 : * We ignore any pg_init_privs.initprivs entry for the public
10808 : * schema, as explained in getNamespaces().
10809 : */
10810 54836 : if (dobj->objType == DO_NAMESPACE &&
10811 724 : strcmp(dobj->name, "public") == 0)
10812 358 : continue;
10813 :
10814 : /* Else it had better be of a type we think has ACLs */
10815 54478 : if (dobj->objType == DO_NAMESPACE ||
10816 54112 : dobj->objType == DO_TYPE ||
10817 54064 : dobj->objType == DO_FUNC ||
10818 53868 : dobj->objType == DO_AGG ||
10819 53820 : dobj->objType == DO_TABLE ||
10820 0 : dobj->objType == DO_PROCLANG ||
10821 0 : dobj->objType == DO_FDW ||
10822 0 : dobj->objType == DO_FOREIGN_SERVER)
10823 54478 : {
10824 54478 : DumpableObjectWithAcl *daobj = (DumpableObjectWithAcl *) dobj;
10825 :
10826 54478 : daobj->dacl.privtype = privtype;
10827 54478 : daobj->dacl.initprivs = pstrdup(initprivs);
10828 : }
10829 : else
10830 0 : pg_log_warning("unsupported pg_init_privs entry: %u %u %d",
10831 : classoid, objoid, objsubid);
10832 : }
10833 : }
10834 366 : PQclear(res);
10835 : }
10836 :
10837 366 : destroyPQExpBuffer(query);
10838 366 : }
10839 :
10840 : /*
10841 : * dumpCommentExtended --
10842 : *
10843 : * This routine is used to dump any comments associated with the
10844 : * object handed to this routine. The routine takes the object type
10845 : * and object name (ready to print, except for schema decoration), plus
10846 : * the namespace and owner of the object (for labeling the ArchiveEntry),
10847 : * plus catalog ID and subid which are the lookup key for pg_description,
10848 : * plus the dump ID for the object (for setting a dependency).
10849 : * If a matching pg_description entry is found, it is dumped.
10850 : *
10851 : * Note: in some cases, such as comments for triggers and rules, the "type"
10852 : * string really looks like, e.g., "TRIGGER name ON". This is a bit of a hack
10853 : * but it doesn't seem worth complicating the API for all callers to make
10854 : * it cleaner.
10855 : *
10856 : * Note: although this routine takes a dumpId for dependency purposes,
10857 : * that purpose is just to mark the dependency in the emitted dump file
10858 : * for possible future use by pg_restore. We do NOT use it for determining
10859 : * ordering of the comment in the dump file, because this routine is called
10860 : * after dependency sorting occurs. This routine should be called just after
10861 : * calling ArchiveEntry() for the specified object.
10862 : */
10863 : static void
10864 13182 : dumpCommentExtended(Archive *fout, const char *type,
10865 : const char *name, const char *namespace,
10866 : const char *owner, CatalogId catalogId,
10867 : int subid, DumpId dumpId,
10868 : const char *initdb_comment)
10869 : {
10870 13182 : DumpOptions *dopt = fout->dopt;
10871 : CommentItem *comments;
10872 : int ncomments;
10873 :
10874 : /* do nothing, if --no-comments is supplied */
10875 13182 : if (dopt->no_comments)
10876 0 : return;
10877 :
10878 : /* Comments are schema not data ... except LO comments are data */
10879 13182 : if (strcmp(type, "LARGE OBJECT") != 0)
10880 : {
10881 13052 : if (!dopt->dumpSchema)
10882 0 : return;
10883 : }
10884 : else
10885 : {
10886 : /* We do dump LO comments in binary-upgrade mode */
10887 130 : if (!dopt->dumpData && !dopt->binary_upgrade)
10888 0 : return;
10889 : }
10890 :
10891 : /* Search for comments associated with catalogId, using table */
10892 13182 : ncomments = findComments(catalogId.tableoid, catalogId.oid,
10893 : &comments);
10894 :
10895 : /* Is there one matching the subid? */
10896 13182 : while (ncomments > 0)
10897 : {
10898 13078 : if (comments->objsubid == subid)
10899 13078 : break;
10900 0 : comments++;
10901 0 : ncomments--;
10902 : }
10903 :
10904 13182 : if (initdb_comment != NULL)
10905 : {
10906 : static CommentItem empty_comment = {.descr = ""};
10907 :
10908 : /*
10909 : * initdb creates this object with a comment. Skip dumping the
10910 : * initdb-provided comment, which would complicate matters for
10911 : * non-superuser use of pg_dump. When the DBA has removed initdb's
10912 : * comment, replicate that.
10913 : */
10914 232 : if (ncomments == 0)
10915 : {
10916 8 : comments = &empty_comment;
10917 8 : ncomments = 1;
10918 : }
10919 224 : else if (strcmp(comments->descr, initdb_comment) == 0)
10920 224 : ncomments = 0;
10921 : }
10922 :
10923 : /* If a comment exists, build COMMENT ON statement */
10924 13182 : if (ncomments > 0)
10925 : {
10926 12862 : PQExpBuffer query = createPQExpBuffer();
10927 12862 : PQExpBuffer tag = createPQExpBuffer();
10928 :
10929 12862 : appendPQExpBuffer(query, "COMMENT ON %s ", type);
10930 12862 : if (namespace && *namespace)
10931 12474 : appendPQExpBuffer(query, "%s.", fmtId(namespace));
10932 12862 : appendPQExpBuffer(query, "%s IS ", name);
10933 12862 : appendStringLiteralAH(query, comments->descr, fout);
10934 12862 : appendPQExpBufferStr(query, ";\n");
10935 :
10936 12862 : appendPQExpBuffer(tag, "%s %s", type, name);
10937 :
10938 : /*
10939 : * We mark comments as SECTION_NONE because they really belong in the
10940 : * same section as their parent, whether that is pre-data or
10941 : * post-data.
10942 : */
10943 12862 : ArchiveEntry(fout, nilCatalogId, createDumpId(),
10944 12862 : ARCHIVE_OPTS(.tag = tag->data,
10945 : .namespace = namespace,
10946 : .owner = owner,
10947 : .description = "COMMENT",
10948 : .section = SECTION_NONE,
10949 : .createStmt = query->data,
10950 : .deps = &dumpId,
10951 : .nDeps = 1));
10952 :
10953 12862 : destroyPQExpBuffer(query);
10954 12862 : destroyPQExpBuffer(tag);
10955 : }
10956 : }
10957 :
10958 : /*
10959 : * dumpComment --
10960 : *
10961 : * Typical simplification of the above function.
10962 : */
10963 : static inline void
10964 12872 : dumpComment(Archive *fout, const char *type,
10965 : const char *name, const char *namespace,
10966 : const char *owner, CatalogId catalogId,
10967 : int subid, DumpId dumpId)
10968 : {
10969 12872 : dumpCommentExtended(fout, type, name, namespace, owner,
10970 : catalogId, subid, dumpId, NULL);
10971 12872 : }
10972 :
10973 : /*
10974 : * appendNamedArgument --
10975 : *
10976 : * Convenience routine for constructing parameters of the form:
10977 : * 'paraname', 'value'::type
10978 : */
10979 : static void
10980 10258 : appendNamedArgument(PQExpBuffer out, Archive *fout, const char *argname,
10981 : const char *argtype, const char *argval)
10982 : {
10983 10258 : appendPQExpBufferStr(out, ",\n\t");
10984 :
10985 10258 : appendStringLiteralAH(out, argname, fout);
10986 10258 : appendPQExpBufferStr(out, ", ");
10987 :
10988 10258 : appendStringLiteralAH(out, argval, fout);
10989 10258 : appendPQExpBuffer(out, "::%s", argtype);
10990 10258 : }
10991 :
10992 : /*
10993 : * fetchAttributeStats --
10994 : *
10995 : * Fetch next batch of attribute statistics for dumpRelationStats_dumper().
10996 : */
10997 : static PGresult *
10998 2396 : fetchAttributeStats(Archive *fout)
10999 : {
11000 2396 : ArchiveHandle *AH = (ArchiveHandle *) fout;
11001 2396 : PQExpBuffer nspnames = createPQExpBuffer();
11002 2396 : PQExpBuffer relnames = createPQExpBuffer();
11003 2396 : int count = 0;
11004 2396 : PGresult *res = NULL;
11005 : static TocEntry *te;
11006 : static bool restarted;
11007 2396 : int max_rels = MAX_ATTR_STATS_RELS;
11008 :
11009 : /*
11010 : * Our query for retrieving statistics for multiple relations uses WITH
11011 : * ORDINALITY and multi-argument UNNEST(), both of which were introduced
11012 : * in v9.4. For older versions, we resort to gathering statistics for a
11013 : * single relation at a time.
11014 : */
11015 2396 : if (fout->remoteVersion < 90400)
11016 0 : max_rels = 1;
11017 :
11018 : /* If we're just starting, set our TOC pointer. */
11019 2396 : if (!te)
11020 120 : te = AH->toc->next;
11021 :
11022 : /*
11023 : * We can't easily avoid a second TOC scan for the tar format because it
11024 : * writes restore.sql separately, which means we must execute the queries
11025 : * twice. This feels risky, but there is no known reason it should
11026 : * generate different output than the first pass. Even if it does, the
11027 : * worst-case scenario is that restore.sql might have different statistics
11028 : * data than the archive.
11029 : */
11030 2396 : if (!restarted && te == AH->toc && AH->format == archTar)
11031 : {
11032 2 : te = AH->toc->next;
11033 2 : restarted = true;
11034 : }
11035 :
11036 2396 : appendPQExpBufferChar(nspnames, '{');
11037 2396 : appendPQExpBufferChar(relnames, '{');
11038 :
11039 : /*
11040 : * Scan the TOC for the next set of relevant stats entries. We assume
11041 : * that statistics are dumped in the order they are listed in the TOC.
11042 : * This is perhaps not the sturdiest assumption, so we verify it matches
11043 : * reality in dumpRelationStats_dumper().
11044 : */
11045 36522 : for (; te != AH->toc && count < max_rels; te = te->next)
11046 : {
11047 34126 : if ((te->reqs & REQ_STATS) != 0 &&
11048 7440 : strcmp(te->desc, "STATISTICS DATA") == 0)
11049 : {
11050 7440 : appendPGArray(nspnames, te->namespace);
11051 7440 : appendPGArray(relnames, te->tag);
11052 7440 : count++;
11053 : }
11054 : }
11055 :
11056 2396 : appendPQExpBufferChar(nspnames, '}');
11057 2396 : appendPQExpBufferChar(relnames, '}');
11058 :
11059 : /* Execute the query for the next batch of relations. */
11060 2396 : if (count > 0)
11061 : {
11062 218 : PQExpBuffer query = createPQExpBuffer();
11063 :
11064 218 : appendPQExpBufferStr(query, "EXECUTE getAttributeStats(");
11065 218 : appendStringLiteralAH(query, nspnames->data, fout);
11066 218 : appendPQExpBufferStr(query, "::pg_catalog.name[],");
11067 218 : appendStringLiteralAH(query, relnames->data, fout);
11068 218 : appendPQExpBufferStr(query, "::pg_catalog.name[])");
11069 218 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
11070 218 : destroyPQExpBuffer(query);
11071 : }
11072 :
11073 2396 : destroyPQExpBuffer(nspnames);
11074 2396 : destroyPQExpBuffer(relnames);
11075 2396 : return res;
11076 : }
11077 :
11078 : /*
11079 : * dumpRelationStats_dumper --
11080 : *
11081 : * Generate command to import stats into the relation on the new database.
11082 : * This routine is called by the Archiver when it wants the statistics to be
11083 : * dumped.
11084 : */
11085 : static char *
11086 7440 : dumpRelationStats_dumper(Archive *fout, const void *userArg, const TocEntry *te)
11087 : {
11088 7440 : const RelStatsInfo *rsinfo = (RelStatsInfo *) userArg;
11089 : static PGresult *res;
11090 : static int rownum;
11091 : PQExpBuffer query;
11092 : PQExpBufferData out_data;
11093 7440 : PQExpBuffer out = &out_data;
11094 : int i_schemaname;
11095 : int i_tablename;
11096 : int i_attname;
11097 : int i_inherited;
11098 : int i_null_frac;
11099 : int i_avg_width;
11100 : int i_n_distinct;
11101 : int i_most_common_vals;
11102 : int i_most_common_freqs;
11103 : int i_histogram_bounds;
11104 : int i_correlation;
11105 : int i_most_common_elems;
11106 : int i_most_common_elem_freqs;
11107 : int i_elem_count_histogram;
11108 : int i_range_length_histogram;
11109 : int i_range_empty_frac;
11110 : int i_range_bounds_histogram;
11111 : static TocEntry *expected_te;
11112 :
11113 : /*
11114 : * fetchAttributeStats() assumes that the statistics are dumped in the
11115 : * order they are listed in the TOC. We verify that here for safety.
11116 : */
11117 7440 : if (!expected_te)
11118 120 : expected_te = ((ArchiveHandle *) fout)->toc;
11119 :
11120 7440 : expected_te = expected_te->next;
11121 29938 : while ((expected_te->reqs & REQ_STATS) == 0 ||
11122 7440 : strcmp(expected_te->desc, "STATISTICS DATA") != 0)
11123 22498 : expected_te = expected_te->next;
11124 :
11125 7440 : if (te != expected_te)
11126 0 : pg_fatal("statistics dumped out of order (current: %d %s %s, expected: %d %s %s)",
11127 : te->dumpId, te->desc, te->tag,
11128 : expected_te->dumpId, expected_te->desc, expected_te->tag);
11129 :
11130 7440 : query = createPQExpBuffer();
11131 7440 : if (!fout->is_prepared[PREPQUERY_GETATTRIBUTESTATS])
11132 : {
11133 120 : appendPQExpBufferStr(query,
11134 : "PREPARE getAttributeStats(pg_catalog.name[], pg_catalog.name[]) AS\n"
11135 : "SELECT s.schemaname, s.tablename, s.attname, s.inherited, "
11136 : "s.null_frac, s.avg_width, s.n_distinct, "
11137 : "s.most_common_vals, s.most_common_freqs, "
11138 : "s.histogram_bounds, s.correlation, "
11139 : "s.most_common_elems, s.most_common_elem_freqs, "
11140 : "s.elem_count_histogram, ");
11141 :
11142 120 : if (fout->remoteVersion >= 170000)
11143 120 : appendPQExpBufferStr(query,
11144 : "s.range_length_histogram, "
11145 : "s.range_empty_frac, "
11146 : "s.range_bounds_histogram ");
11147 : else
11148 0 : appendPQExpBufferStr(query,
11149 : "NULL AS range_length_histogram,"
11150 : "NULL AS range_empty_frac,"
11151 : "NULL AS range_bounds_histogram ");
11152 :
11153 : /*
11154 : * The results must be in the order of the relations supplied in the
11155 : * parameters to ensure we remain in sync as we walk through the TOC.
11156 : * The redundant filter clause on s.tablename = ANY(...) seems
11157 : * sufficient to convince the planner to use
11158 : * pg_class_relname_nsp_index, which avoids a full scan of pg_stats.
11159 : * This may not work for all versions.
11160 : *
11161 : * Our query for retrieving statistics for multiple relations uses
11162 : * WITH ORDINALITY and multi-argument UNNEST(), both of which were
11163 : * introduced in v9.4. For older versions, we resort to gathering
11164 : * statistics for a single relation at a time.
11165 : */
11166 120 : if (fout->remoteVersion >= 90400)
11167 120 : appendPQExpBufferStr(query,
11168 : "FROM pg_catalog.pg_stats s "
11169 : "JOIN unnest($1, $2) WITH ORDINALITY AS u (schemaname, tablename, ord) "
11170 : "ON s.schemaname = u.schemaname "
11171 : "AND s.tablename = u.tablename "
11172 : "WHERE s.tablename = ANY($2) "
11173 : "ORDER BY u.ord, s.attname, s.inherited");
11174 : else
11175 0 : appendPQExpBufferStr(query,
11176 : "FROM pg_catalog.pg_stats s "
11177 : "WHERE s.schemaname = $1[1] "
11178 : "AND s.tablename = $2[1] "
11179 : "ORDER BY s.attname, s.inherited");
11180 :
11181 120 : ExecuteSqlStatement(fout, query->data);
11182 :
11183 120 : fout->is_prepared[PREPQUERY_GETATTRIBUTESTATS] = true;
11184 120 : resetPQExpBuffer(query);
11185 : }
11186 :
11187 7440 : initPQExpBuffer(out);
11188 :
11189 : /* restore relation stats */
11190 7440 : appendPQExpBufferStr(out, "SELECT * FROM pg_catalog.pg_restore_relation_stats(\n");
11191 7440 : appendPQExpBuffer(out, "\t'version', '%d'::integer,\n",
11192 : fout->remoteVersion);
11193 7440 : appendPQExpBufferStr(out, "\t'schemaname', ");
11194 7440 : appendStringLiteralAH(out, rsinfo->dobj.namespace->dobj.name, fout);
11195 7440 : appendPQExpBufferStr(out, ",\n");
11196 7440 : appendPQExpBufferStr(out, "\t'relname', ");
11197 7440 : appendStringLiteralAH(out, rsinfo->dobj.name, fout);
11198 7440 : appendPQExpBufferStr(out, ",\n");
11199 7440 : appendPQExpBuffer(out, "\t'relpages', '%d'::integer,\n", rsinfo->relpages);
11200 :
11201 : /*
11202 : * Before v14, a reltuples value of 0 was ambiguous: it could either mean
11203 : * the relation is empty, or it could mean that it hadn't yet been
11204 : * vacuumed or analyzed. (Newer versions use -1 for the latter case.)
11205 : * This ambiguity allegedly can cause the planner to choose inefficient
11206 : * plans after restoring to v18 or newer. To deal with this, let's just
11207 : * set reltuples to -1 in that case.
11208 : */
11209 7440 : if (fout->remoteVersion < 140000 && strcmp("0", rsinfo->reltuples) == 0)
11210 0 : appendPQExpBufferStr(out, "\t'reltuples', '-1'::real,\n");
11211 : else
11212 7440 : appendPQExpBuffer(out, "\t'reltuples', '%s'::real,\n", rsinfo->reltuples);
11213 :
11214 7440 : appendPQExpBuffer(out, "\t'relallvisible', '%d'::integer",
11215 7440 : rsinfo->relallvisible);
11216 :
11217 7440 : if (fout->remoteVersion >= 180000)
11218 7440 : appendPQExpBuffer(out, ",\n\t'relallfrozen', '%d'::integer", rsinfo->relallfrozen);
11219 :
11220 7440 : appendPQExpBufferStr(out, "\n);\n");
11221 :
11222 : /* Fetch the next batch of attribute statistics if needed. */
11223 7440 : if (rownum >= PQntuples(res))
11224 : {
11225 2396 : PQclear(res);
11226 2396 : res = fetchAttributeStats(fout);
11227 2396 : rownum = 0;
11228 : }
11229 :
11230 7440 : i_schemaname = PQfnumber(res, "schemaname");
11231 7440 : i_tablename = PQfnumber(res, "tablename");
11232 7440 : i_attname = PQfnumber(res, "attname");
11233 7440 : i_inherited = PQfnumber(res, "inherited");
11234 7440 : i_null_frac = PQfnumber(res, "null_frac");
11235 7440 : i_avg_width = PQfnumber(res, "avg_width");
11236 7440 : i_n_distinct = PQfnumber(res, "n_distinct");
11237 7440 : i_most_common_vals = PQfnumber(res, "most_common_vals");
11238 7440 : i_most_common_freqs = PQfnumber(res, "most_common_freqs");
11239 7440 : i_histogram_bounds = PQfnumber(res, "histogram_bounds");
11240 7440 : i_correlation = PQfnumber(res, "correlation");
11241 7440 : i_most_common_elems = PQfnumber(res, "most_common_elems");
11242 7440 : i_most_common_elem_freqs = PQfnumber(res, "most_common_elem_freqs");
11243 7440 : i_elem_count_histogram = PQfnumber(res, "elem_count_histogram");
11244 7440 : i_range_length_histogram = PQfnumber(res, "range_length_histogram");
11245 7440 : i_range_empty_frac = PQfnumber(res, "range_empty_frac");
11246 7440 : i_range_bounds_histogram = PQfnumber(res, "range_bounds_histogram");
11247 :
11248 : /* restore attribute stats */
11249 8994 : for (; rownum < PQntuples(res); rownum++)
11250 : {
11251 : const char *attname;
11252 :
11253 : /* Stop if the next stat row in our cache isn't for this relation. */
11254 6598 : if (strcmp(te->tag, PQgetvalue(res, rownum, i_tablename)) != 0 ||
11255 1554 : strcmp(te->namespace, PQgetvalue(res, rownum, i_schemaname)) != 0)
11256 : break;
11257 :
11258 1554 : appendPQExpBufferStr(out, "SELECT * FROM pg_catalog.pg_restore_attribute_stats(\n");
11259 1554 : appendPQExpBuffer(out, "\t'version', '%d'::integer,\n",
11260 : fout->remoteVersion);
11261 1554 : appendPQExpBufferStr(out, "\t'schemaname', ");
11262 1554 : appendStringLiteralAH(out, rsinfo->dobj.namespace->dobj.name, fout);
11263 1554 : appendPQExpBufferStr(out, ",\n\t'relname', ");
11264 1554 : appendStringLiteralAH(out, rsinfo->dobj.name, fout);
11265 :
11266 1554 : if (PQgetisnull(res, rownum, i_attname))
11267 0 : pg_fatal("unexpected null attname");
11268 1554 : attname = PQgetvalue(res, rownum, i_attname);
11269 :
11270 : /*
11271 : * Indexes look up attname in indAttNames to derive attnum, all others
11272 : * use attname directly. We must specify attnum for indexes, since
11273 : * their attnames are not necessarily stable across dump/reload.
11274 : */
11275 1554 : if (rsinfo->nindAttNames == 0)
11276 : {
11277 1472 : appendPQExpBufferStr(out, ",\n\t'attname', ");
11278 1472 : appendStringLiteralAH(out, attname, fout);
11279 : }
11280 : else
11281 : {
11282 82 : bool found = false;
11283 :
11284 156 : for (int i = 0; i < rsinfo->nindAttNames; i++)
11285 : {
11286 156 : if (strcmp(attname, rsinfo->indAttNames[i]) == 0)
11287 : {
11288 82 : appendPQExpBuffer(out, ",\n\t'attnum', '%d'::smallint",
11289 : i + 1);
11290 82 : found = true;
11291 82 : break;
11292 : }
11293 : }
11294 :
11295 82 : if (!found)
11296 0 : pg_fatal("could not find index attname \"%s\"", attname);
11297 : }
11298 :
11299 1554 : if (!PQgetisnull(res, rownum, i_inherited))
11300 1554 : appendNamedArgument(out, fout, "inherited", "boolean",
11301 1554 : PQgetvalue(res, rownum, i_inherited));
11302 1554 : if (!PQgetisnull(res, rownum, i_null_frac))
11303 1554 : appendNamedArgument(out, fout, "null_frac", "real",
11304 1554 : PQgetvalue(res, rownum, i_null_frac));
11305 1554 : if (!PQgetisnull(res, rownum, i_avg_width))
11306 1554 : appendNamedArgument(out, fout, "avg_width", "integer",
11307 1554 : PQgetvalue(res, rownum, i_avg_width));
11308 1554 : if (!PQgetisnull(res, rownum, i_n_distinct))
11309 1554 : appendNamedArgument(out, fout, "n_distinct", "real",
11310 1554 : PQgetvalue(res, rownum, i_n_distinct));
11311 1554 : if (!PQgetisnull(res, rownum, i_most_common_vals))
11312 768 : appendNamedArgument(out, fout, "most_common_vals", "text",
11313 768 : PQgetvalue(res, rownum, i_most_common_vals));
11314 1554 : if (!PQgetisnull(res, rownum, i_most_common_freqs))
11315 768 : appendNamedArgument(out, fout, "most_common_freqs", "real[]",
11316 768 : PQgetvalue(res, rownum, i_most_common_freqs));
11317 1554 : if (!PQgetisnull(res, rownum, i_histogram_bounds))
11318 950 : appendNamedArgument(out, fout, "histogram_bounds", "text",
11319 950 : PQgetvalue(res, rownum, i_histogram_bounds));
11320 1554 : if (!PQgetisnull(res, rownum, i_correlation))
11321 1492 : appendNamedArgument(out, fout, "correlation", "real",
11322 1492 : PQgetvalue(res, rownum, i_correlation));
11323 1554 : if (!PQgetisnull(res, rownum, i_most_common_elems))
11324 16 : appendNamedArgument(out, fout, "most_common_elems", "text",
11325 16 : PQgetvalue(res, rownum, i_most_common_elems));
11326 1554 : if (!PQgetisnull(res, rownum, i_most_common_elem_freqs))
11327 16 : appendNamedArgument(out, fout, "most_common_elem_freqs", "real[]",
11328 16 : PQgetvalue(res, rownum, i_most_common_elem_freqs));
11329 1554 : if (!PQgetisnull(res, rownum, i_elem_count_histogram))
11330 14 : appendNamedArgument(out, fout, "elem_count_histogram", "real[]",
11331 14 : PQgetvalue(res, rownum, i_elem_count_histogram));
11332 1554 : if (fout->remoteVersion >= 170000)
11333 : {
11334 1554 : if (!PQgetisnull(res, rownum, i_range_length_histogram))
11335 6 : appendNamedArgument(out, fout, "range_length_histogram", "text",
11336 6 : PQgetvalue(res, rownum, i_range_length_histogram));
11337 1554 : if (!PQgetisnull(res, rownum, i_range_empty_frac))
11338 6 : appendNamedArgument(out, fout, "range_empty_frac", "real",
11339 6 : PQgetvalue(res, rownum, i_range_empty_frac));
11340 1554 : if (!PQgetisnull(res, rownum, i_range_bounds_histogram))
11341 6 : appendNamedArgument(out, fout, "range_bounds_histogram", "text",
11342 6 : PQgetvalue(res, rownum, i_range_bounds_histogram));
11343 : }
11344 1554 : appendPQExpBufferStr(out, "\n);\n");
11345 : }
11346 :
11347 7440 : destroyPQExpBuffer(query);
11348 7440 : return out->data;
11349 : }
11350 :
11351 : /*
11352 : * dumpRelationStats --
11353 : *
11354 : * Make an ArchiveEntry for the relation statistics. The Archiver will take
11355 : * care of gathering the statistics and generating the restore commands when
11356 : * they are needed.
11357 : */
11358 : static void
11359 7584 : dumpRelationStats(Archive *fout, const RelStatsInfo *rsinfo)
11360 : {
11361 7584 : const DumpableObject *dobj = &rsinfo->dobj;
11362 :
11363 : /* nothing to do if we are not dumping statistics */
11364 7584 : if (!fout->dopt->dumpStatistics)
11365 0 : return;
11366 :
11367 7584 : ArchiveEntry(fout, nilCatalogId, createDumpId(),
11368 7584 : ARCHIVE_OPTS(.tag = dobj->name,
11369 : .namespace = dobj->namespace->dobj.name,
11370 : .description = "STATISTICS DATA",
11371 : .section = rsinfo->section,
11372 : .defnFn = dumpRelationStats_dumper,
11373 : .defnArg = rsinfo,
11374 : .deps = dobj->dependencies,
11375 : .nDeps = dobj->nDeps));
11376 : }
11377 :
11378 : /*
11379 : * dumpTableComment --
11380 : *
11381 : * As above, but dump comments for both the specified table (or view)
11382 : * and its columns.
11383 : */
11384 : static void
11385 172 : dumpTableComment(Archive *fout, const TableInfo *tbinfo,
11386 : const char *reltypename)
11387 : {
11388 172 : DumpOptions *dopt = fout->dopt;
11389 : CommentItem *comments;
11390 : int ncomments;
11391 : PQExpBuffer query;
11392 : PQExpBuffer tag;
11393 :
11394 : /* do nothing, if --no-comments is supplied */
11395 172 : if (dopt->no_comments)
11396 0 : return;
11397 :
11398 : /* Comments are SCHEMA not data */
11399 172 : if (!dopt->dumpSchema)
11400 0 : return;
11401 :
11402 : /* Search for comments associated with relation, using table */
11403 172 : ncomments = findComments(tbinfo->dobj.catId.tableoid,
11404 172 : tbinfo->dobj.catId.oid,
11405 : &comments);
11406 :
11407 : /* If comments exist, build COMMENT ON statements */
11408 172 : if (ncomments <= 0)
11409 0 : return;
11410 :
11411 172 : query = createPQExpBuffer();
11412 172 : tag = createPQExpBuffer();
11413 :
11414 496 : while (ncomments > 0)
11415 : {
11416 324 : const char *descr = comments->descr;
11417 324 : int objsubid = comments->objsubid;
11418 :
11419 324 : if (objsubid == 0)
11420 : {
11421 76 : resetPQExpBuffer(tag);
11422 76 : appendPQExpBuffer(tag, "%s %s", reltypename,
11423 76 : fmtId(tbinfo->dobj.name));
11424 :
11425 76 : resetPQExpBuffer(query);
11426 76 : appendPQExpBuffer(query, "COMMENT ON %s %s IS ", reltypename,
11427 76 : fmtQualifiedDumpable(tbinfo));
11428 76 : appendStringLiteralAH(query, descr, fout);
11429 76 : appendPQExpBufferStr(query, ";\n");
11430 :
11431 76 : ArchiveEntry(fout, nilCatalogId, createDumpId(),
11432 76 : ARCHIVE_OPTS(.tag = tag->data,
11433 : .namespace = tbinfo->dobj.namespace->dobj.name,
11434 : .owner = tbinfo->rolname,
11435 : .description = "COMMENT",
11436 : .section = SECTION_NONE,
11437 : .createStmt = query->data,
11438 : .deps = &(tbinfo->dobj.dumpId),
11439 : .nDeps = 1));
11440 : }
11441 248 : else if (objsubid > 0 && objsubid <= tbinfo->numatts)
11442 : {
11443 248 : resetPQExpBuffer(tag);
11444 248 : appendPQExpBuffer(tag, "COLUMN %s.",
11445 248 : fmtId(tbinfo->dobj.name));
11446 248 : appendPQExpBufferStr(tag, fmtId(tbinfo->attnames[objsubid - 1]));
11447 :
11448 248 : resetPQExpBuffer(query);
11449 248 : appendPQExpBuffer(query, "COMMENT ON COLUMN %s.",
11450 248 : fmtQualifiedDumpable(tbinfo));
11451 248 : appendPQExpBuffer(query, "%s IS ",
11452 248 : fmtId(tbinfo->attnames[objsubid - 1]));
11453 248 : appendStringLiteralAH(query, descr, fout);
11454 248 : appendPQExpBufferStr(query, ";\n");
11455 :
11456 248 : ArchiveEntry(fout, nilCatalogId, createDumpId(),
11457 248 : ARCHIVE_OPTS(.tag = tag->data,
11458 : .namespace = tbinfo->dobj.namespace->dobj.name,
11459 : .owner = tbinfo->rolname,
11460 : .description = "COMMENT",
11461 : .section = SECTION_NONE,
11462 : .createStmt = query->data,
11463 : .deps = &(tbinfo->dobj.dumpId),
11464 : .nDeps = 1));
11465 : }
11466 :
11467 324 : comments++;
11468 324 : ncomments--;
11469 : }
11470 :
11471 172 : destroyPQExpBuffer(query);
11472 172 : destroyPQExpBuffer(tag);
11473 : }
11474 :
11475 : /*
11476 : * findComments --
11477 : *
11478 : * Find the comment(s), if any, associated with the given object. All the
11479 : * objsubid values associated with the given classoid/objoid are found with
11480 : * one search.
11481 : */
11482 : static int
11483 13430 : findComments(Oid classoid, Oid objoid, CommentItem **items)
11484 : {
11485 13430 : CommentItem *middle = NULL;
11486 : CommentItem *low;
11487 : CommentItem *high;
11488 : int nmatch;
11489 :
11490 : /*
11491 : * Do binary search to find some item matching the object.
11492 : */
11493 13430 : low = &comments[0];
11494 13430 : high = &comments[ncomments - 1];
11495 133662 : while (low <= high)
11496 : {
11497 133558 : middle = low + (high - low) / 2;
11498 :
11499 133558 : if (classoid < middle->classoid)
11500 15906 : high = middle - 1;
11501 117652 : else if (classoid > middle->classoid)
11502 15020 : low = middle + 1;
11503 102632 : else if (objoid < middle->objoid)
11504 43150 : high = middle - 1;
11505 59482 : else if (objoid > middle->objoid)
11506 46156 : low = middle + 1;
11507 : else
11508 13326 : break; /* found a match */
11509 : }
11510 :
11511 13430 : if (low > high) /* no matches */
11512 : {
11513 104 : *items = NULL;
11514 104 : return 0;
11515 : }
11516 :
11517 : /*
11518 : * Now determine how many items match the object. The search loop
11519 : * invariant still holds: only items between low and high inclusive could
11520 : * match.
11521 : */
11522 13326 : nmatch = 1;
11523 13478 : while (middle > low)
11524 : {
11525 6286 : if (classoid != middle[-1].classoid ||
11526 5940 : objoid != middle[-1].objoid)
11527 : break;
11528 152 : middle--;
11529 152 : nmatch++;
11530 : }
11531 :
11532 13326 : *items = middle;
11533 :
11534 13326 : middle += nmatch;
11535 13326 : while (middle <= high)
11536 : {
11537 7188 : if (classoid != middle->classoid ||
11538 6464 : objoid != middle->objoid)
11539 : break;
11540 0 : middle++;
11541 0 : nmatch++;
11542 : }
11543 :
11544 13326 : return nmatch;
11545 : }
11546 :
11547 : /*
11548 : * collectComments --
11549 : *
11550 : * Construct a table of all comments available for database objects;
11551 : * also set the has-comment component flag for each relevant object.
11552 : *
11553 : * We used to do per-object queries for the comments, but it's much faster
11554 : * to pull them all over at once, and on most databases the memory cost
11555 : * isn't high.
11556 : *
11557 : * The table is sorted by classoid/objid/objsubid for speed in lookup.
11558 : */
11559 : static void
11560 370 : collectComments(Archive *fout)
11561 : {
11562 : PGresult *res;
11563 : PQExpBuffer query;
11564 : int i_description;
11565 : int i_classoid;
11566 : int i_objoid;
11567 : int i_objsubid;
11568 : int ntups;
11569 : int i;
11570 : DumpableObject *dobj;
11571 :
11572 370 : query = createPQExpBuffer();
11573 :
11574 370 : appendPQExpBufferStr(query, "SELECT description, classoid, objoid, objsubid "
11575 : "FROM pg_catalog.pg_description "
11576 : "ORDER BY classoid, objoid, objsubid");
11577 :
11578 370 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
11579 :
11580 : /* Construct lookup table containing OIDs in numeric form */
11581 :
11582 370 : i_description = PQfnumber(res, "description");
11583 370 : i_classoid = PQfnumber(res, "classoid");
11584 370 : i_objoid = PQfnumber(res, "objoid");
11585 370 : i_objsubid = PQfnumber(res, "objsubid");
11586 :
11587 370 : ntups = PQntuples(res);
11588 :
11589 370 : comments = (CommentItem *) pg_malloc(ntups * sizeof(CommentItem));
11590 370 : ncomments = 0;
11591 370 : dobj = NULL;
11592 :
11593 1973728 : for (i = 0; i < ntups; i++)
11594 : {
11595 : CatalogId objId;
11596 : int subid;
11597 :
11598 1973358 : objId.tableoid = atooid(PQgetvalue(res, i, i_classoid));
11599 1973358 : objId.oid = atooid(PQgetvalue(res, i, i_objoid));
11600 1973358 : subid = atoi(PQgetvalue(res, i, i_objsubid));
11601 :
11602 : /* We needn't remember comments that don't match any dumpable object */
11603 1973358 : if (dobj == NULL ||
11604 710426 : dobj->catId.tableoid != objId.tableoid ||
11605 705822 : dobj->catId.oid != objId.oid)
11606 1973154 : dobj = findObjectByCatalogId(objId);
11607 1973358 : if (dobj == NULL)
11608 1262574 : continue;
11609 :
11610 : /*
11611 : * Comments on columns of composite types are linked to the type's
11612 : * pg_class entry, but we need to set the DUMP_COMPONENT_COMMENT flag
11613 : * in the type's own DumpableObject.
11614 : */
11615 710784 : if (subid != 0 && dobj->objType == DO_TABLE &&
11616 436 : ((TableInfo *) dobj)->relkind == RELKIND_COMPOSITE_TYPE)
11617 102 : {
11618 : TypeInfo *cTypeInfo;
11619 :
11620 102 : cTypeInfo = findTypeByOid(((TableInfo *) dobj)->reltype);
11621 102 : if (cTypeInfo)
11622 102 : cTypeInfo->dobj.components |= DUMP_COMPONENT_COMMENT;
11623 : }
11624 : else
11625 710682 : dobj->components |= DUMP_COMPONENT_COMMENT;
11626 :
11627 710784 : comments[ncomments].descr = pg_strdup(PQgetvalue(res, i, i_description));
11628 710784 : comments[ncomments].classoid = objId.tableoid;
11629 710784 : comments[ncomments].objoid = objId.oid;
11630 710784 : comments[ncomments].objsubid = subid;
11631 710784 : ncomments++;
11632 : }
11633 :
11634 370 : PQclear(res);
11635 370 : destroyPQExpBuffer(query);
11636 370 : }
11637 :
11638 : /*
11639 : * dumpDumpableObject
11640 : *
11641 : * This routine and its subsidiaries are responsible for creating
11642 : * ArchiveEntries (TOC objects) for each object to be dumped.
11643 : */
11644 : static void
11645 1380402 : dumpDumpableObject(Archive *fout, DumpableObject *dobj)
11646 : {
11647 : /*
11648 : * Clear any dump-request bits for components that don't exist for this
11649 : * object. (This makes it safe to initially use DUMP_COMPONENT_ALL as the
11650 : * request for every kind of object.)
11651 : */
11652 1380402 : dobj->dump &= dobj->components;
11653 :
11654 : /* Now, short-circuit if there's nothing to be done here. */
11655 1380402 : if (dobj->dump == 0)
11656 1222076 : return;
11657 :
11658 158326 : switch (dobj->objType)
11659 : {
11660 990 : case DO_NAMESPACE:
11661 990 : dumpNamespace(fout, (const NamespaceInfo *) dobj);
11662 990 : break;
11663 48 : case DO_EXTENSION:
11664 48 : dumpExtension(fout, (const ExtensionInfo *) dobj);
11665 48 : break;
11666 1936 : case DO_TYPE:
11667 1936 : dumpType(fout, (const TypeInfo *) dobj);
11668 1936 : break;
11669 158 : case DO_SHELL_TYPE:
11670 158 : dumpShellType(fout, (const ShellTypeInfo *) dobj);
11671 158 : break;
11672 3756 : case DO_FUNC:
11673 3756 : dumpFunc(fout, (const FuncInfo *) dobj);
11674 3756 : break;
11675 596 : case DO_AGG:
11676 596 : dumpAgg(fout, (const AggInfo *) dobj);
11677 596 : break;
11678 5020 : case DO_OPERATOR:
11679 5020 : dumpOpr(fout, (const OprInfo *) dobj);
11680 5020 : break;
11681 184 : case DO_ACCESS_METHOD:
11682 184 : dumpAccessMethod(fout, (const AccessMethodInfo *) dobj);
11683 184 : break;
11684 1356 : case DO_OPCLASS:
11685 1356 : dumpOpclass(fout, (const OpclassInfo *) dobj);
11686 1356 : break;
11687 1122 : case DO_OPFAMILY:
11688 1122 : dumpOpfamily(fout, (const OpfamilyInfo *) dobj);
11689 1122 : break;
11690 5098 : case DO_COLLATION:
11691 5098 : dumpCollation(fout, (const CollInfo *) dobj);
11692 5098 : break;
11693 856 : case DO_CONVERSION:
11694 856 : dumpConversion(fout, (const ConvInfo *) dobj);
11695 856 : break;
11696 62602 : case DO_TABLE:
11697 62602 : dumpTable(fout, (const TableInfo *) dobj);
11698 62602 : break;
11699 2870 : case DO_TABLE_ATTACH:
11700 2870 : dumpTableAttach(fout, (const TableAttachInfo *) dobj);
11701 2870 : break;
11702 2208 : case DO_ATTRDEF:
11703 2208 : dumpAttrDef(fout, (const AttrDefInfo *) dobj);
11704 2208 : break;
11705 5376 : case DO_INDEX:
11706 5376 : dumpIndex(fout, (const IndxInfo *) dobj);
11707 5376 : break;
11708 1224 : case DO_INDEX_ATTACH:
11709 1224 : dumpIndexAttach(fout, (const IndexAttachInfo *) dobj);
11710 1224 : break;
11711 302 : case DO_STATSEXT:
11712 302 : dumpStatisticsExt(fout, (const StatsExtInfo *) dobj);
11713 302 : break;
11714 864 : case DO_REFRESH_MATVIEW:
11715 864 : refreshMatViewData(fout, (const TableDataInfo *) dobj);
11716 864 : break;
11717 2452 : case DO_RULE:
11718 2452 : dumpRule(fout, (const RuleInfo *) dobj);
11719 2452 : break;
11720 1106 : case DO_TRIGGER:
11721 1106 : dumpTrigger(fout, (const TriggerInfo *) dobj);
11722 1106 : break;
11723 96 : case DO_EVENT_TRIGGER:
11724 96 : dumpEventTrigger(fout, (const EventTriggerInfo *) dobj);
11725 96 : break;
11726 4924 : case DO_CONSTRAINT:
11727 4924 : dumpConstraint(fout, (const ConstraintInfo *) dobj);
11728 4924 : break;
11729 366 : case DO_FK_CONSTRAINT:
11730 366 : dumpConstraint(fout, (const ConstraintInfo *) dobj);
11731 366 : break;
11732 188 : case DO_PROCLANG:
11733 188 : dumpProcLang(fout, (const ProcLangInfo *) dobj);
11734 188 : break;
11735 146 : case DO_CAST:
11736 146 : dumpCast(fout, (const CastInfo *) dobj);
11737 146 : break;
11738 96 : case DO_TRANSFORM:
11739 96 : dumpTransform(fout, (const TransformInfo *) dobj);
11740 96 : break;
11741 822 : case DO_SEQUENCE_SET:
11742 822 : dumpSequenceData(fout, (const TableDataInfo *) dobj);
11743 822 : break;
11744 8992 : case DO_TABLE_DATA:
11745 8992 : dumpTableData(fout, (const TableDataInfo *) dobj);
11746 8992 : break;
11747 29870 : case DO_DUMMY_TYPE:
11748 : /* table rowtypes and array types are never dumped separately */
11749 29870 : break;
11750 94 : case DO_TSPARSER:
11751 94 : dumpTSParser(fout, (const TSParserInfo *) dobj);
11752 94 : break;
11753 358 : case DO_TSDICT:
11754 358 : dumpTSDictionary(fout, (const TSDictInfo *) dobj);
11755 358 : break;
11756 118 : case DO_TSTEMPLATE:
11757 118 : dumpTSTemplate(fout, (const TSTemplateInfo *) dobj);
11758 118 : break;
11759 308 : case DO_TSCONFIG:
11760 308 : dumpTSConfig(fout, (const TSConfigInfo *) dobj);
11761 308 : break;
11762 116 : case DO_FDW:
11763 116 : dumpForeignDataWrapper(fout, (const FdwInfo *) dobj);
11764 116 : break;
11765 124 : case DO_FOREIGN_SERVER:
11766 124 : dumpForeignServer(fout, (const ForeignServerInfo *) dobj);
11767 124 : break;
11768 368 : case DO_DEFAULT_ACL:
11769 368 : dumpDefaultACL(fout, (const DefaultACLInfo *) dobj);
11770 368 : break;
11771 178 : case DO_LARGE_OBJECT:
11772 178 : dumpLO(fout, (const LoInfo *) dobj);
11773 178 : break;
11774 190 : case DO_LARGE_OBJECT_DATA:
11775 190 : if (dobj->dump & DUMP_COMPONENT_DATA)
11776 : {
11777 : LoInfo *loinfo;
11778 : TocEntry *te;
11779 :
11780 190 : loinfo = (LoInfo *) findObjectByDumpId(dobj->dependencies[0]);
11781 190 : if (loinfo == NULL)
11782 0 : pg_fatal("missing metadata for large objects \"%s\"",
11783 : dobj->name);
11784 :
11785 190 : te = ArchiveEntry(fout, dobj->catId, dobj->dumpId,
11786 190 : ARCHIVE_OPTS(.tag = dobj->name,
11787 : .owner = loinfo->rolname,
11788 : .description = "BLOBS",
11789 : .section = SECTION_DATA,
11790 : .deps = dobj->dependencies,
11791 : .nDeps = dobj->nDeps,
11792 : .dumpFn = dumpLOs,
11793 : .dumpArg = loinfo));
11794 :
11795 : /*
11796 : * Set the TocEntry's dataLength in case we are doing a
11797 : * parallel dump and want to order dump jobs by table size.
11798 : * (We need some size estimate for every TocEntry with a
11799 : * DataDumper function.) We don't currently have any cheap
11800 : * way to estimate the size of LOs, but fortunately it doesn't
11801 : * matter too much as long as we get large batches of LOs
11802 : * processed reasonably early. Assume 8K per blob.
11803 : */
11804 190 : te->dataLength = loinfo->numlos * (pgoff_t) 8192;
11805 : }
11806 190 : break;
11807 736 : case DO_POLICY:
11808 736 : dumpPolicy(fout, (const PolicyInfo *) dobj);
11809 736 : break;
11810 654 : case DO_PUBLICATION:
11811 654 : dumpPublication(fout, (const PublicationInfo *) dobj);
11812 654 : break;
11813 652 : case DO_PUBLICATION_REL:
11814 652 : dumpPublicationTable(fout, (const PublicationRelInfo *) dobj);
11815 652 : break;
11816 222 : case DO_PUBLICATION_TABLE_IN_SCHEMA:
11817 222 : dumpPublicationNamespace(fout,
11818 : (const PublicationSchemaInfo *) dobj);
11819 222 : break;
11820 256 : case DO_SUBSCRIPTION:
11821 256 : dumpSubscription(fout, (const SubscriptionInfo *) dobj);
11822 256 : break;
11823 4 : case DO_SUBSCRIPTION_REL:
11824 4 : dumpSubscriptionTable(fout, (const SubRelInfo *) dobj);
11825 4 : break;
11826 7584 : case DO_REL_STATS:
11827 7584 : dumpRelationStats(fout, (const RelStatsInfo *) dobj);
11828 7584 : break;
11829 740 : case DO_PRE_DATA_BOUNDARY:
11830 : case DO_POST_DATA_BOUNDARY:
11831 : /* never dumped, nothing to do */
11832 740 : break;
11833 : }
11834 : }
11835 :
11836 : /*
11837 : * dumpNamespace
11838 : * writes out to fout the queries to recreate a user-defined namespace
11839 : */
11840 : static void
11841 990 : dumpNamespace(Archive *fout, const NamespaceInfo *nspinfo)
11842 : {
11843 990 : DumpOptions *dopt = fout->dopt;
11844 : PQExpBuffer q;
11845 : PQExpBuffer delq;
11846 : char *qnspname;
11847 :
11848 : /* Do nothing if not dumping schema */
11849 990 : if (!dopt->dumpSchema)
11850 56 : return;
11851 :
11852 934 : q = createPQExpBuffer();
11853 934 : delq = createPQExpBuffer();
11854 :
11855 934 : qnspname = pg_strdup(fmtId(nspinfo->dobj.name));
11856 :
11857 934 : if (nspinfo->create)
11858 : {
11859 634 : appendPQExpBuffer(delq, "DROP SCHEMA %s;\n", qnspname);
11860 634 : appendPQExpBuffer(q, "CREATE SCHEMA %s;\n", qnspname);
11861 : }
11862 : else
11863 : {
11864 : /* see selectDumpableNamespace() */
11865 300 : appendPQExpBufferStr(delq,
11866 : "-- *not* dropping schema, since initdb creates it\n");
11867 300 : appendPQExpBufferStr(q,
11868 : "-- *not* creating schema, since initdb creates it\n");
11869 : }
11870 :
11871 934 : if (dopt->binary_upgrade)
11872 180 : binary_upgrade_extension_member(q, &nspinfo->dobj,
11873 : "SCHEMA", qnspname, NULL);
11874 :
11875 934 : if (nspinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
11876 406 : ArchiveEntry(fout, nspinfo->dobj.catId, nspinfo->dobj.dumpId,
11877 406 : ARCHIVE_OPTS(.tag = nspinfo->dobj.name,
11878 : .owner = nspinfo->rolname,
11879 : .description = "SCHEMA",
11880 : .section = SECTION_PRE_DATA,
11881 : .createStmt = q->data,
11882 : .dropStmt = delq->data));
11883 :
11884 : /* Dump Schema Comments and Security Labels */
11885 934 : if (nspinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
11886 : {
11887 310 : const char *initdb_comment = NULL;
11888 :
11889 310 : if (!nspinfo->create && strcmp(qnspname, "public") == 0)
11890 232 : initdb_comment = "standard public schema";
11891 310 : dumpCommentExtended(fout, "SCHEMA", qnspname,
11892 310 : NULL, nspinfo->rolname,
11893 310 : nspinfo->dobj.catId, 0, nspinfo->dobj.dumpId,
11894 : initdb_comment);
11895 : }
11896 :
11897 934 : if (nspinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
11898 0 : dumpSecLabel(fout, "SCHEMA", qnspname,
11899 0 : NULL, nspinfo->rolname,
11900 0 : nspinfo->dobj.catId, 0, nspinfo->dobj.dumpId);
11901 :
11902 934 : if (nspinfo->dobj.dump & DUMP_COMPONENT_ACL)
11903 718 : dumpACL(fout, nspinfo->dobj.dumpId, InvalidDumpId, "SCHEMA",
11904 : qnspname, NULL, NULL,
11905 718 : NULL, nspinfo->rolname, &nspinfo->dacl);
11906 :
11907 934 : free(qnspname);
11908 :
11909 934 : destroyPQExpBuffer(q);
11910 934 : destroyPQExpBuffer(delq);
11911 : }
11912 :
11913 : /*
11914 : * dumpExtension
11915 : * writes out to fout the queries to recreate an extension
11916 : */
11917 : static void
11918 48 : dumpExtension(Archive *fout, const ExtensionInfo *extinfo)
11919 : {
11920 48 : DumpOptions *dopt = fout->dopt;
11921 : PQExpBuffer q;
11922 : PQExpBuffer delq;
11923 : char *qextname;
11924 :
11925 : /* Do nothing if not dumping schema */
11926 48 : if (!dopt->dumpSchema)
11927 2 : return;
11928 :
11929 46 : q = createPQExpBuffer();
11930 46 : delq = createPQExpBuffer();
11931 :
11932 46 : qextname = pg_strdup(fmtId(extinfo->dobj.name));
11933 :
11934 46 : appendPQExpBuffer(delq, "DROP EXTENSION %s;\n", qextname);
11935 :
11936 46 : if (!dopt->binary_upgrade)
11937 : {
11938 : /*
11939 : * In a regular dump, we simply create the extension, intentionally
11940 : * not specifying a version, so that the destination installation's
11941 : * default version is used.
11942 : *
11943 : * Use of IF NOT EXISTS here is unlike our behavior for other object
11944 : * types; but there are various scenarios in which it's convenient to
11945 : * manually create the desired extension before restoring, so we
11946 : * prefer to allow it to exist already.
11947 : */
11948 34 : appendPQExpBuffer(q, "CREATE EXTENSION IF NOT EXISTS %s WITH SCHEMA %s;\n",
11949 34 : qextname, fmtId(extinfo->namespace));
11950 : }
11951 : else
11952 : {
11953 : /*
11954 : * In binary-upgrade mode, it's critical to reproduce the state of the
11955 : * database exactly, so our procedure is to create an empty extension,
11956 : * restore all the contained objects normally, and add them to the
11957 : * extension one by one. This function performs just the first of
11958 : * those steps. binary_upgrade_extension_member() takes care of
11959 : * adding member objects as they're created.
11960 : */
11961 : int i;
11962 : int n;
11963 :
11964 12 : appendPQExpBufferStr(q, "-- For binary upgrade, create an empty extension and insert objects into it\n");
11965 :
11966 : /*
11967 : * We unconditionally create the extension, so we must drop it if it
11968 : * exists. This could happen if the user deleted 'plpgsql' and then
11969 : * readded it, causing its oid to be greater than g_last_builtin_oid.
11970 : */
11971 12 : appendPQExpBuffer(q, "DROP EXTENSION IF EXISTS %s;\n", qextname);
11972 :
11973 12 : appendPQExpBufferStr(q,
11974 : "SELECT pg_catalog.binary_upgrade_create_empty_extension(");
11975 12 : appendStringLiteralAH(q, extinfo->dobj.name, fout);
11976 12 : appendPQExpBufferStr(q, ", ");
11977 12 : appendStringLiteralAH(q, extinfo->namespace, fout);
11978 12 : appendPQExpBufferStr(q, ", ");
11979 12 : appendPQExpBuffer(q, "%s, ", extinfo->relocatable ? "true" : "false");
11980 12 : appendStringLiteralAH(q, extinfo->extversion, fout);
11981 12 : appendPQExpBufferStr(q, ", ");
11982 :
11983 : /*
11984 : * Note that we're pushing extconfig (an OID array) back into
11985 : * pg_extension exactly as-is. This is OK because pg_class OIDs are
11986 : * preserved in binary upgrade.
11987 : */
11988 12 : if (strlen(extinfo->extconfig) > 2)
11989 2 : appendStringLiteralAH(q, extinfo->extconfig, fout);
11990 : else
11991 10 : appendPQExpBufferStr(q, "NULL");
11992 12 : appendPQExpBufferStr(q, ", ");
11993 12 : if (strlen(extinfo->extcondition) > 2)
11994 2 : appendStringLiteralAH(q, extinfo->extcondition, fout);
11995 : else
11996 10 : appendPQExpBufferStr(q, "NULL");
11997 12 : appendPQExpBufferStr(q, ", ");
11998 12 : appendPQExpBufferStr(q, "ARRAY[");
11999 12 : n = 0;
12000 24 : for (i = 0; i < extinfo->dobj.nDeps; i++)
12001 : {
12002 : DumpableObject *extobj;
12003 :
12004 12 : extobj = findObjectByDumpId(extinfo->dobj.dependencies[i]);
12005 12 : if (extobj && extobj->objType == DO_EXTENSION)
12006 : {
12007 0 : if (n++ > 0)
12008 0 : appendPQExpBufferChar(q, ',');
12009 0 : appendStringLiteralAH(q, extobj->name, fout);
12010 : }
12011 : }
12012 12 : appendPQExpBufferStr(q, "]::pg_catalog.text[]");
12013 12 : appendPQExpBufferStr(q, ");\n");
12014 : }
12015 :
12016 46 : if (extinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
12017 46 : ArchiveEntry(fout, extinfo->dobj.catId, extinfo->dobj.dumpId,
12018 46 : ARCHIVE_OPTS(.tag = extinfo->dobj.name,
12019 : .description = "EXTENSION",
12020 : .section = SECTION_PRE_DATA,
12021 : .createStmt = q->data,
12022 : .dropStmt = delq->data));
12023 :
12024 : /* Dump Extension Comments and Security Labels */
12025 46 : if (extinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
12026 46 : dumpComment(fout, "EXTENSION", qextname,
12027 : NULL, "",
12028 46 : extinfo->dobj.catId, 0, extinfo->dobj.dumpId);
12029 :
12030 46 : if (extinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
12031 0 : dumpSecLabel(fout, "EXTENSION", qextname,
12032 : NULL, "",
12033 0 : extinfo->dobj.catId, 0, extinfo->dobj.dumpId);
12034 :
12035 46 : free(qextname);
12036 :
12037 46 : destroyPQExpBuffer(q);
12038 46 : destroyPQExpBuffer(delq);
12039 : }
12040 :
12041 : /*
12042 : * dumpType
12043 : * writes out to fout the queries to recreate a user-defined type
12044 : */
12045 : static void
12046 1936 : dumpType(Archive *fout, const TypeInfo *tyinfo)
12047 : {
12048 1936 : DumpOptions *dopt = fout->dopt;
12049 :
12050 : /* Do nothing if not dumping schema */
12051 1936 : if (!dopt->dumpSchema)
12052 98 : return;
12053 :
12054 : /* Dump out in proper style */
12055 1838 : if (tyinfo->typtype == TYPTYPE_BASE)
12056 572 : dumpBaseType(fout, tyinfo);
12057 1266 : else if (tyinfo->typtype == TYPTYPE_DOMAIN)
12058 316 : dumpDomain(fout, tyinfo);
12059 950 : else if (tyinfo->typtype == TYPTYPE_COMPOSITE)
12060 272 : dumpCompositeType(fout, tyinfo);
12061 678 : else if (tyinfo->typtype == TYPTYPE_ENUM)
12062 194 : dumpEnumType(fout, tyinfo);
12063 484 : else if (tyinfo->typtype == TYPTYPE_RANGE)
12064 248 : dumpRangeType(fout, tyinfo);
12065 236 : else if (tyinfo->typtype == TYPTYPE_PSEUDO && !tyinfo->isDefined)
12066 86 : dumpUndefinedType(fout, tyinfo);
12067 : else
12068 150 : pg_log_warning("typtype of data type \"%s\" appears to be invalid",
12069 : tyinfo->dobj.name);
12070 : }
12071 :
12072 : /*
12073 : * dumpEnumType
12074 : * writes out to fout the queries to recreate a user-defined enum type
12075 : */
12076 : static void
12077 194 : dumpEnumType(Archive *fout, const TypeInfo *tyinfo)
12078 : {
12079 194 : DumpOptions *dopt = fout->dopt;
12080 194 : PQExpBuffer q = createPQExpBuffer();
12081 194 : PQExpBuffer delq = createPQExpBuffer();
12082 194 : PQExpBuffer query = createPQExpBuffer();
12083 : PGresult *res;
12084 : int num,
12085 : i;
12086 : Oid enum_oid;
12087 : char *qtypname;
12088 : char *qualtypname;
12089 : char *label;
12090 : int i_enumlabel;
12091 : int i_oid;
12092 :
12093 194 : if (!fout->is_prepared[PREPQUERY_DUMPENUMTYPE])
12094 : {
12095 : /* Set up query for enum-specific details */
12096 92 : appendPQExpBufferStr(query,
12097 : "PREPARE dumpEnumType(pg_catalog.oid) AS\n"
12098 : "SELECT oid, enumlabel "
12099 : "FROM pg_catalog.pg_enum "
12100 : "WHERE enumtypid = $1 "
12101 : "ORDER BY enumsortorder");
12102 :
12103 92 : ExecuteSqlStatement(fout, query->data);
12104 :
12105 92 : fout->is_prepared[PREPQUERY_DUMPENUMTYPE] = true;
12106 : }
12107 :
12108 194 : printfPQExpBuffer(query,
12109 : "EXECUTE dumpEnumType('%u')",
12110 194 : tyinfo->dobj.catId.oid);
12111 :
12112 194 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
12113 :
12114 194 : num = PQntuples(res);
12115 :
12116 194 : qtypname = pg_strdup(fmtId(tyinfo->dobj.name));
12117 194 : qualtypname = pg_strdup(fmtQualifiedDumpable(tyinfo));
12118 :
12119 : /*
12120 : * CASCADE shouldn't be required here as for normal types since the I/O
12121 : * functions are generic and do not get dropped.
12122 : */
12123 194 : appendPQExpBuffer(delq, "DROP TYPE %s;\n", qualtypname);
12124 :
12125 194 : if (dopt->binary_upgrade)
12126 12 : binary_upgrade_set_type_oids_by_type_oid(fout, q,
12127 12 : tyinfo->dobj.catId.oid,
12128 : false, false);
12129 :
12130 194 : appendPQExpBuffer(q, "CREATE TYPE %s AS ENUM (",
12131 : qualtypname);
12132 :
12133 194 : if (!dopt->binary_upgrade)
12134 : {
12135 182 : i_enumlabel = PQfnumber(res, "enumlabel");
12136 :
12137 : /* Labels with server-assigned oids */
12138 1060 : for (i = 0; i < num; i++)
12139 : {
12140 878 : label = PQgetvalue(res, i, i_enumlabel);
12141 878 : if (i > 0)
12142 696 : appendPQExpBufferChar(q, ',');
12143 878 : appendPQExpBufferStr(q, "\n ");
12144 878 : appendStringLiteralAH(q, label, fout);
12145 : }
12146 : }
12147 :
12148 194 : appendPQExpBufferStr(q, "\n);\n");
12149 :
12150 194 : if (dopt->binary_upgrade)
12151 : {
12152 12 : i_oid = PQfnumber(res, "oid");
12153 12 : i_enumlabel = PQfnumber(res, "enumlabel");
12154 :
12155 : /* Labels with dump-assigned (preserved) oids */
12156 124 : for (i = 0; i < num; i++)
12157 : {
12158 112 : enum_oid = atooid(PQgetvalue(res, i, i_oid));
12159 112 : label = PQgetvalue(res, i, i_enumlabel);
12160 :
12161 112 : if (i == 0)
12162 12 : appendPQExpBufferStr(q, "\n-- For binary upgrade, must preserve pg_enum oids\n");
12163 112 : appendPQExpBuffer(q,
12164 : "SELECT pg_catalog.binary_upgrade_set_next_pg_enum_oid('%u'::pg_catalog.oid);\n",
12165 : enum_oid);
12166 112 : appendPQExpBuffer(q, "ALTER TYPE %s ADD VALUE ", qualtypname);
12167 112 : appendStringLiteralAH(q, label, fout);
12168 112 : appendPQExpBufferStr(q, ";\n\n");
12169 : }
12170 : }
12171 :
12172 194 : if (dopt->binary_upgrade)
12173 12 : binary_upgrade_extension_member(q, &tyinfo->dobj,
12174 : "TYPE", qtypname,
12175 12 : tyinfo->dobj.namespace->dobj.name);
12176 :
12177 194 : if (tyinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
12178 194 : ArchiveEntry(fout, tyinfo->dobj.catId, tyinfo->dobj.dumpId,
12179 194 : ARCHIVE_OPTS(.tag = tyinfo->dobj.name,
12180 : .namespace = tyinfo->dobj.namespace->dobj.name,
12181 : .owner = tyinfo->rolname,
12182 : .description = "TYPE",
12183 : .section = SECTION_PRE_DATA,
12184 : .createStmt = q->data,
12185 : .dropStmt = delq->data));
12186 :
12187 : /* Dump Type Comments and Security Labels */
12188 194 : if (tyinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
12189 76 : dumpComment(fout, "TYPE", qtypname,
12190 76 : tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
12191 76 : tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
12192 :
12193 194 : if (tyinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
12194 0 : dumpSecLabel(fout, "TYPE", qtypname,
12195 0 : tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
12196 0 : tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
12197 :
12198 194 : if (tyinfo->dobj.dump & DUMP_COMPONENT_ACL)
12199 76 : dumpACL(fout, tyinfo->dobj.dumpId, InvalidDumpId, "TYPE",
12200 : qtypname, NULL,
12201 76 : tyinfo->dobj.namespace->dobj.name,
12202 76 : NULL, tyinfo->rolname, &tyinfo->dacl);
12203 :
12204 194 : PQclear(res);
12205 194 : destroyPQExpBuffer(q);
12206 194 : destroyPQExpBuffer(delq);
12207 194 : destroyPQExpBuffer(query);
12208 194 : free(qtypname);
12209 194 : free(qualtypname);
12210 194 : }
12211 :
12212 : /*
12213 : * dumpRangeType
12214 : * writes out to fout the queries to recreate a user-defined range type
12215 : */
12216 : static void
12217 248 : dumpRangeType(Archive *fout, const TypeInfo *tyinfo)
12218 : {
12219 248 : DumpOptions *dopt = fout->dopt;
12220 248 : PQExpBuffer q = createPQExpBuffer();
12221 248 : PQExpBuffer delq = createPQExpBuffer();
12222 248 : PQExpBuffer query = createPQExpBuffer();
12223 : PGresult *res;
12224 : Oid collationOid;
12225 : char *qtypname;
12226 : char *qualtypname;
12227 : char *procname;
12228 :
12229 248 : if (!fout->is_prepared[PREPQUERY_DUMPRANGETYPE])
12230 : {
12231 : /* Set up query for range-specific details */
12232 92 : appendPQExpBufferStr(query,
12233 : "PREPARE dumpRangeType(pg_catalog.oid) AS\n");
12234 :
12235 92 : appendPQExpBufferStr(query,
12236 : "SELECT ");
12237 :
12238 92 : if (fout->remoteVersion >= 140000)
12239 92 : appendPQExpBufferStr(query,
12240 : "pg_catalog.format_type(rngmultitypid, NULL) AS rngmultitype, ");
12241 : else
12242 0 : appendPQExpBufferStr(query,
12243 : "NULL AS rngmultitype, ");
12244 :
12245 92 : appendPQExpBufferStr(query,
12246 : "pg_catalog.format_type(rngsubtype, NULL) AS rngsubtype, "
12247 : "opc.opcname AS opcname, "
12248 : "(SELECT nspname FROM pg_catalog.pg_namespace nsp "
12249 : " WHERE nsp.oid = opc.opcnamespace) AS opcnsp, "
12250 : "opc.opcdefault, "
12251 : "CASE WHEN rngcollation = st.typcollation THEN 0 "
12252 : " ELSE rngcollation END AS collation, "
12253 : "rngcanonical, rngsubdiff "
12254 : "FROM pg_catalog.pg_range r, pg_catalog.pg_type st, "
12255 : " pg_catalog.pg_opclass opc "
12256 : "WHERE st.oid = rngsubtype AND opc.oid = rngsubopc AND "
12257 : "rngtypid = $1");
12258 :
12259 92 : ExecuteSqlStatement(fout, query->data);
12260 :
12261 92 : fout->is_prepared[PREPQUERY_DUMPRANGETYPE] = true;
12262 : }
12263 :
12264 248 : printfPQExpBuffer(query,
12265 : "EXECUTE dumpRangeType('%u')",
12266 248 : tyinfo->dobj.catId.oid);
12267 :
12268 248 : res = ExecuteSqlQueryForSingleRow(fout, query->data);
12269 :
12270 248 : qtypname = pg_strdup(fmtId(tyinfo->dobj.name));
12271 248 : qualtypname = pg_strdup(fmtQualifiedDumpable(tyinfo));
12272 :
12273 : /*
12274 : * CASCADE shouldn't be required here as for normal types since the I/O
12275 : * functions are generic and do not get dropped.
12276 : */
12277 248 : appendPQExpBuffer(delq, "DROP TYPE %s;\n", qualtypname);
12278 :
12279 248 : if (dopt->binary_upgrade)
12280 16 : binary_upgrade_set_type_oids_by_type_oid(fout, q,
12281 16 : tyinfo->dobj.catId.oid,
12282 : false, true);
12283 :
12284 248 : appendPQExpBuffer(q, "CREATE TYPE %s AS RANGE (",
12285 : qualtypname);
12286 :
12287 248 : appendPQExpBuffer(q, "\n subtype = %s",
12288 : PQgetvalue(res, 0, PQfnumber(res, "rngsubtype")));
12289 :
12290 248 : if (!PQgetisnull(res, 0, PQfnumber(res, "rngmultitype")))
12291 248 : appendPQExpBuffer(q, ",\n multirange_type_name = %s",
12292 : PQgetvalue(res, 0, PQfnumber(res, "rngmultitype")));
12293 :
12294 : /* print subtype_opclass only if not default for subtype */
12295 248 : if (PQgetvalue(res, 0, PQfnumber(res, "opcdefault"))[0] != 't')
12296 : {
12297 76 : char *opcname = PQgetvalue(res, 0, PQfnumber(res, "opcname"));
12298 76 : char *nspname = PQgetvalue(res, 0, PQfnumber(res, "opcnsp"));
12299 :
12300 76 : appendPQExpBuffer(q, ",\n subtype_opclass = %s.",
12301 : fmtId(nspname));
12302 76 : appendPQExpBufferStr(q, fmtId(opcname));
12303 : }
12304 :
12305 248 : collationOid = atooid(PQgetvalue(res, 0, PQfnumber(res, "collation")));
12306 248 : if (OidIsValid(collationOid))
12307 : {
12308 86 : CollInfo *coll = findCollationByOid(collationOid);
12309 :
12310 86 : if (coll)
12311 86 : appendPQExpBuffer(q, ",\n collation = %s",
12312 86 : fmtQualifiedDumpable(coll));
12313 : }
12314 :
12315 248 : procname = PQgetvalue(res, 0, PQfnumber(res, "rngcanonical"));
12316 248 : if (strcmp(procname, "-") != 0)
12317 18 : appendPQExpBuffer(q, ",\n canonical = %s", procname);
12318 :
12319 248 : procname = PQgetvalue(res, 0, PQfnumber(res, "rngsubdiff"));
12320 248 : if (strcmp(procname, "-") != 0)
12321 46 : appendPQExpBuffer(q, ",\n subtype_diff = %s", procname);
12322 :
12323 248 : appendPQExpBufferStr(q, "\n);\n");
12324 :
12325 248 : if (dopt->binary_upgrade)
12326 16 : binary_upgrade_extension_member(q, &tyinfo->dobj,
12327 : "TYPE", qtypname,
12328 16 : tyinfo->dobj.namespace->dobj.name);
12329 :
12330 248 : if (tyinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
12331 248 : ArchiveEntry(fout, tyinfo->dobj.catId, tyinfo->dobj.dumpId,
12332 248 : ARCHIVE_OPTS(.tag = tyinfo->dobj.name,
12333 : .namespace = tyinfo->dobj.namespace->dobj.name,
12334 : .owner = tyinfo->rolname,
12335 : .description = "TYPE",
12336 : .section = SECTION_PRE_DATA,
12337 : .createStmt = q->data,
12338 : .dropStmt = delq->data));
12339 :
12340 : /* Dump Type Comments and Security Labels */
12341 248 : if (tyinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
12342 112 : dumpComment(fout, "TYPE", qtypname,
12343 112 : tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
12344 112 : tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
12345 :
12346 248 : if (tyinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
12347 0 : dumpSecLabel(fout, "TYPE", qtypname,
12348 0 : tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
12349 0 : tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
12350 :
12351 248 : if (tyinfo->dobj.dump & DUMP_COMPONENT_ACL)
12352 76 : dumpACL(fout, tyinfo->dobj.dumpId, InvalidDumpId, "TYPE",
12353 : qtypname, NULL,
12354 76 : tyinfo->dobj.namespace->dobj.name,
12355 76 : NULL, tyinfo->rolname, &tyinfo->dacl);
12356 :
12357 248 : PQclear(res);
12358 248 : destroyPQExpBuffer(q);
12359 248 : destroyPQExpBuffer(delq);
12360 248 : destroyPQExpBuffer(query);
12361 248 : free(qtypname);
12362 248 : free(qualtypname);
12363 248 : }
12364 :
12365 : /*
12366 : * dumpUndefinedType
12367 : * writes out to fout the queries to recreate a !typisdefined type
12368 : *
12369 : * This is a shell type, but we use different terminology to distinguish
12370 : * this case from where we have to emit a shell type definition to break
12371 : * circular dependencies. An undefined type shouldn't ever have anything
12372 : * depending on it.
12373 : */
12374 : static void
12375 86 : dumpUndefinedType(Archive *fout, const TypeInfo *tyinfo)
12376 : {
12377 86 : DumpOptions *dopt = fout->dopt;
12378 86 : PQExpBuffer q = createPQExpBuffer();
12379 86 : PQExpBuffer delq = createPQExpBuffer();
12380 : char *qtypname;
12381 : char *qualtypname;
12382 :
12383 86 : qtypname = pg_strdup(fmtId(tyinfo->dobj.name));
12384 86 : qualtypname = pg_strdup(fmtQualifiedDumpable(tyinfo));
12385 :
12386 86 : appendPQExpBuffer(delq, "DROP TYPE %s;\n", qualtypname);
12387 :
12388 86 : if (dopt->binary_upgrade)
12389 4 : binary_upgrade_set_type_oids_by_type_oid(fout, q,
12390 4 : tyinfo->dobj.catId.oid,
12391 : false, false);
12392 :
12393 86 : appendPQExpBuffer(q, "CREATE TYPE %s;\n",
12394 : qualtypname);
12395 :
12396 86 : if (dopt->binary_upgrade)
12397 4 : binary_upgrade_extension_member(q, &tyinfo->dobj,
12398 : "TYPE", qtypname,
12399 4 : tyinfo->dobj.namespace->dobj.name);
12400 :
12401 86 : if (tyinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
12402 86 : ArchiveEntry(fout, tyinfo->dobj.catId, tyinfo->dobj.dumpId,
12403 86 : ARCHIVE_OPTS(.tag = tyinfo->dobj.name,
12404 : .namespace = tyinfo->dobj.namespace->dobj.name,
12405 : .owner = tyinfo->rolname,
12406 : .description = "TYPE",
12407 : .section = SECTION_PRE_DATA,
12408 : .createStmt = q->data,
12409 : .dropStmt = delq->data));
12410 :
12411 : /* Dump Type Comments and Security Labels */
12412 86 : if (tyinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
12413 76 : dumpComment(fout, "TYPE", qtypname,
12414 76 : tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
12415 76 : tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
12416 :
12417 86 : if (tyinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
12418 0 : dumpSecLabel(fout, "TYPE", qtypname,
12419 0 : tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
12420 0 : tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
12421 :
12422 86 : if (tyinfo->dobj.dump & DUMP_COMPONENT_ACL)
12423 0 : dumpACL(fout, tyinfo->dobj.dumpId, InvalidDumpId, "TYPE",
12424 : qtypname, NULL,
12425 0 : tyinfo->dobj.namespace->dobj.name,
12426 0 : NULL, tyinfo->rolname, &tyinfo->dacl);
12427 :
12428 86 : destroyPQExpBuffer(q);
12429 86 : destroyPQExpBuffer(delq);
12430 86 : free(qtypname);
12431 86 : free(qualtypname);
12432 86 : }
12433 :
12434 : /*
12435 : * dumpBaseType
12436 : * writes out to fout the queries to recreate a user-defined base type
12437 : */
12438 : static void
12439 572 : dumpBaseType(Archive *fout, const TypeInfo *tyinfo)
12440 : {
12441 572 : DumpOptions *dopt = fout->dopt;
12442 572 : PQExpBuffer q = createPQExpBuffer();
12443 572 : PQExpBuffer delq = createPQExpBuffer();
12444 572 : PQExpBuffer query = createPQExpBuffer();
12445 : PGresult *res;
12446 : char *qtypname;
12447 : char *qualtypname;
12448 : char *typlen;
12449 : char *typinput;
12450 : char *typoutput;
12451 : char *typreceive;
12452 : char *typsend;
12453 : char *typmodin;
12454 : char *typmodout;
12455 : char *typanalyze;
12456 : char *typsubscript;
12457 : Oid typreceiveoid;
12458 : Oid typsendoid;
12459 : Oid typmodinoid;
12460 : Oid typmodoutoid;
12461 : Oid typanalyzeoid;
12462 : Oid typsubscriptoid;
12463 : char *typcategory;
12464 : char *typispreferred;
12465 : char *typdelim;
12466 : char *typbyval;
12467 : char *typalign;
12468 : char *typstorage;
12469 : char *typcollatable;
12470 : char *typdefault;
12471 572 : bool typdefault_is_literal = false;
12472 :
12473 572 : if (!fout->is_prepared[PREPQUERY_DUMPBASETYPE])
12474 : {
12475 : /* Set up query for type-specific details */
12476 92 : appendPQExpBufferStr(query,
12477 : "PREPARE dumpBaseType(pg_catalog.oid) AS\n"
12478 : "SELECT typlen, "
12479 : "typinput, typoutput, typreceive, typsend, "
12480 : "typreceive::pg_catalog.oid AS typreceiveoid, "
12481 : "typsend::pg_catalog.oid AS typsendoid, "
12482 : "typanalyze, "
12483 : "typanalyze::pg_catalog.oid AS typanalyzeoid, "
12484 : "typdelim, typbyval, typalign, typstorage, "
12485 : "typmodin, typmodout, "
12486 : "typmodin::pg_catalog.oid AS typmodinoid, "
12487 : "typmodout::pg_catalog.oid AS typmodoutoid, "
12488 : "typcategory, typispreferred, "
12489 : "(typcollation <> 0) AS typcollatable, "
12490 : "pg_catalog.pg_get_expr(typdefaultbin, 0) AS typdefaultbin, typdefault, ");
12491 :
12492 92 : if (fout->remoteVersion >= 140000)
12493 92 : appendPQExpBufferStr(query,
12494 : "typsubscript, "
12495 : "typsubscript::pg_catalog.oid AS typsubscriptoid ");
12496 : else
12497 0 : appendPQExpBufferStr(query,
12498 : "'-' AS typsubscript, 0 AS typsubscriptoid ");
12499 :
12500 92 : appendPQExpBufferStr(query, "FROM pg_catalog.pg_type "
12501 : "WHERE oid = $1");
12502 :
12503 92 : ExecuteSqlStatement(fout, query->data);
12504 :
12505 92 : fout->is_prepared[PREPQUERY_DUMPBASETYPE] = true;
12506 : }
12507 :
12508 572 : printfPQExpBuffer(query,
12509 : "EXECUTE dumpBaseType('%u')",
12510 572 : tyinfo->dobj.catId.oid);
12511 :
12512 572 : res = ExecuteSqlQueryForSingleRow(fout, query->data);
12513 :
12514 572 : typlen = PQgetvalue(res, 0, PQfnumber(res, "typlen"));
12515 572 : typinput = PQgetvalue(res, 0, PQfnumber(res, "typinput"));
12516 572 : typoutput = PQgetvalue(res, 0, PQfnumber(res, "typoutput"));
12517 572 : typreceive = PQgetvalue(res, 0, PQfnumber(res, "typreceive"));
12518 572 : typsend = PQgetvalue(res, 0, PQfnumber(res, "typsend"));
12519 572 : typmodin = PQgetvalue(res, 0, PQfnumber(res, "typmodin"));
12520 572 : typmodout = PQgetvalue(res, 0, PQfnumber(res, "typmodout"));
12521 572 : typanalyze = PQgetvalue(res, 0, PQfnumber(res, "typanalyze"));
12522 572 : typsubscript = PQgetvalue(res, 0, PQfnumber(res, "typsubscript"));
12523 572 : typreceiveoid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typreceiveoid")));
12524 572 : typsendoid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typsendoid")));
12525 572 : typmodinoid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typmodinoid")));
12526 572 : typmodoutoid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typmodoutoid")));
12527 572 : typanalyzeoid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typanalyzeoid")));
12528 572 : typsubscriptoid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typsubscriptoid")));
12529 572 : typcategory = PQgetvalue(res, 0, PQfnumber(res, "typcategory"));
12530 572 : typispreferred = PQgetvalue(res, 0, PQfnumber(res, "typispreferred"));
12531 572 : typdelim = PQgetvalue(res, 0, PQfnumber(res, "typdelim"));
12532 572 : typbyval = PQgetvalue(res, 0, PQfnumber(res, "typbyval"));
12533 572 : typalign = PQgetvalue(res, 0, PQfnumber(res, "typalign"));
12534 572 : typstorage = PQgetvalue(res, 0, PQfnumber(res, "typstorage"));
12535 572 : typcollatable = PQgetvalue(res, 0, PQfnumber(res, "typcollatable"));
12536 572 : if (!PQgetisnull(res, 0, PQfnumber(res, "typdefaultbin")))
12537 0 : typdefault = PQgetvalue(res, 0, PQfnumber(res, "typdefaultbin"));
12538 572 : else if (!PQgetisnull(res, 0, PQfnumber(res, "typdefault")))
12539 : {
12540 96 : typdefault = PQgetvalue(res, 0, PQfnumber(res, "typdefault"));
12541 96 : typdefault_is_literal = true; /* it needs quotes */
12542 : }
12543 : else
12544 476 : typdefault = NULL;
12545 :
12546 572 : qtypname = pg_strdup(fmtId(tyinfo->dobj.name));
12547 572 : qualtypname = pg_strdup(fmtQualifiedDumpable(tyinfo));
12548 :
12549 : /*
12550 : * The reason we include CASCADE is that the circular dependency between
12551 : * the type and its I/O functions makes it impossible to drop the type any
12552 : * other way.
12553 : */
12554 572 : appendPQExpBuffer(delq, "DROP TYPE %s CASCADE;\n", qualtypname);
12555 :
12556 : /*
12557 : * We might already have a shell type, but setting pg_type_oid is
12558 : * harmless, and in any case we'd better set the array type OID.
12559 : */
12560 572 : if (dopt->binary_upgrade)
12561 16 : binary_upgrade_set_type_oids_by_type_oid(fout, q,
12562 16 : tyinfo->dobj.catId.oid,
12563 : false, false);
12564 :
12565 572 : appendPQExpBuffer(q,
12566 : "CREATE TYPE %s (\n"
12567 : " INTERNALLENGTH = %s",
12568 : qualtypname,
12569 572 : (strcmp(typlen, "-1") == 0) ? "variable" : typlen);
12570 :
12571 : /* regproc result is sufficiently quoted already */
12572 572 : appendPQExpBuffer(q, ",\n INPUT = %s", typinput);
12573 572 : appendPQExpBuffer(q, ",\n OUTPUT = %s", typoutput);
12574 572 : if (OidIsValid(typreceiveoid))
12575 414 : appendPQExpBuffer(q, ",\n RECEIVE = %s", typreceive);
12576 572 : if (OidIsValid(typsendoid))
12577 414 : appendPQExpBuffer(q, ",\n SEND = %s", typsend);
12578 572 : if (OidIsValid(typmodinoid))
12579 70 : appendPQExpBuffer(q, ",\n TYPMOD_IN = %s", typmodin);
12580 572 : if (OidIsValid(typmodoutoid))
12581 70 : appendPQExpBuffer(q, ",\n TYPMOD_OUT = %s", typmodout);
12582 572 : if (OidIsValid(typanalyzeoid))
12583 6 : appendPQExpBuffer(q, ",\n ANALYZE = %s", typanalyze);
12584 :
12585 572 : if (strcmp(typcollatable, "t") == 0)
12586 60 : appendPQExpBufferStr(q, ",\n COLLATABLE = true");
12587 :
12588 572 : if (typdefault != NULL)
12589 : {
12590 96 : appendPQExpBufferStr(q, ",\n DEFAULT = ");
12591 96 : if (typdefault_is_literal)
12592 96 : appendStringLiteralAH(q, typdefault, fout);
12593 : else
12594 0 : appendPQExpBufferStr(q, typdefault);
12595 : }
12596 :
12597 572 : if (OidIsValid(typsubscriptoid))
12598 58 : appendPQExpBuffer(q, ",\n SUBSCRIPT = %s", typsubscript);
12599 :
12600 572 : if (OidIsValid(tyinfo->typelem))
12601 52 : appendPQExpBuffer(q, ",\n ELEMENT = %s",
12602 52 : getFormattedTypeName(fout, tyinfo->typelem,
12603 : zeroIsError));
12604 :
12605 572 : if (strcmp(typcategory, "U") != 0)
12606 : {
12607 316 : appendPQExpBufferStr(q, ",\n CATEGORY = ");
12608 316 : appendStringLiteralAH(q, typcategory, fout);
12609 : }
12610 :
12611 572 : if (strcmp(typispreferred, "t") == 0)
12612 58 : appendPQExpBufferStr(q, ",\n PREFERRED = true");
12613 :
12614 572 : if (typdelim && strcmp(typdelim, ",") != 0)
12615 : {
12616 6 : appendPQExpBufferStr(q, ",\n DELIMITER = ");
12617 6 : appendStringLiteralAH(q, typdelim, fout);
12618 : }
12619 :
12620 572 : if (*typalign == TYPALIGN_CHAR)
12621 24 : appendPQExpBufferStr(q, ",\n ALIGNMENT = char");
12622 548 : else if (*typalign == TYPALIGN_SHORT)
12623 12 : appendPQExpBufferStr(q, ",\n ALIGNMENT = int2");
12624 536 : else if (*typalign == TYPALIGN_INT)
12625 386 : appendPQExpBufferStr(q, ",\n ALIGNMENT = int4");
12626 150 : else if (*typalign == TYPALIGN_DOUBLE)
12627 150 : appendPQExpBufferStr(q, ",\n ALIGNMENT = double");
12628 :
12629 572 : if (*typstorage == TYPSTORAGE_PLAIN)
12630 422 : appendPQExpBufferStr(q, ",\n STORAGE = plain");
12631 150 : else if (*typstorage == TYPSTORAGE_EXTERNAL)
12632 0 : appendPQExpBufferStr(q, ",\n STORAGE = external");
12633 150 : else if (*typstorage == TYPSTORAGE_EXTENDED)
12634 132 : appendPQExpBufferStr(q, ",\n STORAGE = extended");
12635 18 : else if (*typstorage == TYPSTORAGE_MAIN)
12636 18 : appendPQExpBufferStr(q, ",\n STORAGE = main");
12637 :
12638 572 : if (strcmp(typbyval, "t") == 0)
12639 280 : appendPQExpBufferStr(q, ",\n PASSEDBYVALUE");
12640 :
12641 572 : appendPQExpBufferStr(q, "\n);\n");
12642 :
12643 572 : if (dopt->binary_upgrade)
12644 16 : binary_upgrade_extension_member(q, &tyinfo->dobj,
12645 : "TYPE", qtypname,
12646 16 : tyinfo->dobj.namespace->dobj.name);
12647 :
12648 572 : if (tyinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
12649 572 : ArchiveEntry(fout, tyinfo->dobj.catId, tyinfo->dobj.dumpId,
12650 572 : ARCHIVE_OPTS(.tag = tyinfo->dobj.name,
12651 : .namespace = tyinfo->dobj.namespace->dobj.name,
12652 : .owner = tyinfo->rolname,
12653 : .description = "TYPE",
12654 : .section = SECTION_PRE_DATA,
12655 : .createStmt = q->data,
12656 : .dropStmt = delq->data));
12657 :
12658 : /* Dump Type Comments and Security Labels */
12659 572 : if (tyinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
12660 502 : dumpComment(fout, "TYPE", qtypname,
12661 502 : tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
12662 502 : tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
12663 :
12664 572 : if (tyinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
12665 0 : dumpSecLabel(fout, "TYPE", qtypname,
12666 0 : tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
12667 0 : tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
12668 :
12669 572 : if (tyinfo->dobj.dump & DUMP_COMPONENT_ACL)
12670 76 : dumpACL(fout, tyinfo->dobj.dumpId, InvalidDumpId, "TYPE",
12671 : qtypname, NULL,
12672 76 : tyinfo->dobj.namespace->dobj.name,
12673 76 : NULL, tyinfo->rolname, &tyinfo->dacl);
12674 :
12675 572 : PQclear(res);
12676 572 : destroyPQExpBuffer(q);
12677 572 : destroyPQExpBuffer(delq);
12678 572 : destroyPQExpBuffer(query);
12679 572 : free(qtypname);
12680 572 : free(qualtypname);
12681 572 : }
12682 :
12683 : /*
12684 : * dumpDomain
12685 : * writes out to fout the queries to recreate a user-defined domain
12686 : */
12687 : static void
12688 316 : dumpDomain(Archive *fout, const TypeInfo *tyinfo)
12689 : {
12690 316 : DumpOptions *dopt = fout->dopt;
12691 316 : PQExpBuffer q = createPQExpBuffer();
12692 316 : PQExpBuffer delq = createPQExpBuffer();
12693 316 : PQExpBuffer query = createPQExpBuffer();
12694 : PGresult *res;
12695 : int i;
12696 : char *qtypname;
12697 : char *qualtypname;
12698 : char *typnotnull;
12699 : char *typdefn;
12700 : char *typdefault;
12701 : Oid typcollation;
12702 316 : bool typdefault_is_literal = false;
12703 :
12704 316 : if (!fout->is_prepared[PREPQUERY_DUMPDOMAIN])
12705 : {
12706 : /* Set up query for domain-specific details */
12707 86 : appendPQExpBufferStr(query,
12708 : "PREPARE dumpDomain(pg_catalog.oid) AS\n");
12709 :
12710 86 : appendPQExpBufferStr(query, "SELECT t.typnotnull, "
12711 : "pg_catalog.format_type(t.typbasetype, t.typtypmod) AS typdefn, "
12712 : "pg_catalog.pg_get_expr(t.typdefaultbin, 'pg_catalog.pg_type'::pg_catalog.regclass) AS typdefaultbin, "
12713 : "t.typdefault, "
12714 : "CASE WHEN t.typcollation <> u.typcollation "
12715 : "THEN t.typcollation ELSE 0 END AS typcollation "
12716 : "FROM pg_catalog.pg_type t "
12717 : "LEFT JOIN pg_catalog.pg_type u ON (t.typbasetype = u.oid) "
12718 : "WHERE t.oid = $1");
12719 :
12720 86 : ExecuteSqlStatement(fout, query->data);
12721 :
12722 86 : fout->is_prepared[PREPQUERY_DUMPDOMAIN] = true;
12723 : }
12724 :
12725 316 : printfPQExpBuffer(query,
12726 : "EXECUTE dumpDomain('%u')",
12727 316 : tyinfo->dobj.catId.oid);
12728 :
12729 316 : res = ExecuteSqlQueryForSingleRow(fout, query->data);
12730 :
12731 316 : typnotnull = PQgetvalue(res, 0, PQfnumber(res, "typnotnull"));
12732 316 : typdefn = PQgetvalue(res, 0, PQfnumber(res, "typdefn"));
12733 316 : if (!PQgetisnull(res, 0, PQfnumber(res, "typdefaultbin")))
12734 86 : typdefault = PQgetvalue(res, 0, PQfnumber(res, "typdefaultbin"));
12735 230 : else if (!PQgetisnull(res, 0, PQfnumber(res, "typdefault")))
12736 : {
12737 0 : typdefault = PQgetvalue(res, 0, PQfnumber(res, "typdefault"));
12738 0 : typdefault_is_literal = true; /* it needs quotes */
12739 : }
12740 : else
12741 230 : typdefault = NULL;
12742 316 : typcollation = atooid(PQgetvalue(res, 0, PQfnumber(res, "typcollation")));
12743 :
12744 316 : if (dopt->binary_upgrade)
12745 50 : binary_upgrade_set_type_oids_by_type_oid(fout, q,
12746 50 : tyinfo->dobj.catId.oid,
12747 : true, /* force array type */
12748 : false); /* force multirange type */
12749 :
12750 316 : qtypname = pg_strdup(fmtId(tyinfo->dobj.name));
12751 316 : qualtypname = pg_strdup(fmtQualifiedDumpable(tyinfo));
12752 :
12753 316 : appendPQExpBuffer(q,
12754 : "CREATE DOMAIN %s AS %s",
12755 : qualtypname,
12756 : typdefn);
12757 :
12758 : /* Print collation only if different from base type's collation */
12759 316 : if (OidIsValid(typcollation))
12760 : {
12761 : CollInfo *coll;
12762 :
12763 76 : coll = findCollationByOid(typcollation);
12764 76 : if (coll)
12765 76 : appendPQExpBuffer(q, " COLLATE %s", fmtQualifiedDumpable(coll));
12766 : }
12767 :
12768 : /*
12769 : * Print a not-null constraint if there's one. In servers older than 17
12770 : * these don't have names, so just print it unadorned; in newer ones they
12771 : * do, but most of the time it's going to be the standard generated one,
12772 : * so omit the name in that case also.
12773 : */
12774 316 : if (typnotnull[0] == 't')
12775 : {
12776 106 : if (fout->remoteVersion < 170000 || tyinfo->notnull == NULL)
12777 0 : appendPQExpBufferStr(q, " NOT NULL");
12778 : else
12779 : {
12780 106 : ConstraintInfo *notnull = tyinfo->notnull;
12781 :
12782 106 : if (!notnull->separate)
12783 : {
12784 : char *default_name;
12785 :
12786 : /* XXX should match ChooseConstraintName better */
12787 106 : default_name = psprintf("%s_not_null", tyinfo->dobj.name);
12788 :
12789 106 : if (strcmp(default_name, notnull->dobj.name) == 0)
12790 30 : appendPQExpBufferStr(q, " NOT NULL");
12791 : else
12792 76 : appendPQExpBuffer(q, " CONSTRAINT %s %s",
12793 76 : fmtId(notnull->dobj.name), notnull->condef);
12794 106 : free(default_name);
12795 : }
12796 : }
12797 : }
12798 :
12799 316 : if (typdefault != NULL)
12800 : {
12801 86 : appendPQExpBufferStr(q, " DEFAULT ");
12802 86 : if (typdefault_is_literal)
12803 0 : appendStringLiteralAH(q, typdefault, fout);
12804 : else
12805 86 : appendPQExpBufferStr(q, typdefault);
12806 : }
12807 :
12808 316 : PQclear(res);
12809 :
12810 : /*
12811 : * Add any CHECK constraints for the domain
12812 : */
12813 542 : for (i = 0; i < tyinfo->nDomChecks; i++)
12814 : {
12815 226 : ConstraintInfo *domcheck = &(tyinfo->domChecks[i]);
12816 :
12817 226 : if (!domcheck->separate && domcheck->contype == 'c')
12818 216 : appendPQExpBuffer(q, "\n\tCONSTRAINT %s %s",
12819 216 : fmtId(domcheck->dobj.name), domcheck->condef);
12820 : }
12821 :
12822 316 : appendPQExpBufferStr(q, ";\n");
12823 :
12824 316 : appendPQExpBuffer(delq, "DROP DOMAIN %s;\n", qualtypname);
12825 :
12826 316 : if (dopt->binary_upgrade)
12827 50 : binary_upgrade_extension_member(q, &tyinfo->dobj,
12828 : "DOMAIN", qtypname,
12829 50 : tyinfo->dobj.namespace->dobj.name);
12830 :
12831 316 : if (tyinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
12832 316 : ArchiveEntry(fout, tyinfo->dobj.catId, tyinfo->dobj.dumpId,
12833 316 : ARCHIVE_OPTS(.tag = tyinfo->dobj.name,
12834 : .namespace = tyinfo->dobj.namespace->dobj.name,
12835 : .owner = tyinfo->rolname,
12836 : .description = "DOMAIN",
12837 : .section = SECTION_PRE_DATA,
12838 : .createStmt = q->data,
12839 : .dropStmt = delq->data));
12840 :
12841 : /* Dump Domain Comments and Security Labels */
12842 316 : if (tyinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
12843 0 : dumpComment(fout, "DOMAIN", qtypname,
12844 0 : tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
12845 0 : tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
12846 :
12847 316 : if (tyinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
12848 0 : dumpSecLabel(fout, "DOMAIN", qtypname,
12849 0 : tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
12850 0 : tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
12851 :
12852 316 : if (tyinfo->dobj.dump & DUMP_COMPONENT_ACL)
12853 76 : dumpACL(fout, tyinfo->dobj.dumpId, InvalidDumpId, "TYPE",
12854 : qtypname, NULL,
12855 76 : tyinfo->dobj.namespace->dobj.name,
12856 76 : NULL, tyinfo->rolname, &tyinfo->dacl);
12857 :
12858 : /* Dump any per-constraint comments */
12859 542 : for (i = 0; i < tyinfo->nDomChecks; i++)
12860 : {
12861 226 : ConstraintInfo *domcheck = &(tyinfo->domChecks[i]);
12862 : PQExpBuffer conprefix;
12863 :
12864 : /* but only if the constraint itself was dumped here */
12865 226 : if (domcheck->separate)
12866 10 : continue;
12867 :
12868 216 : conprefix = createPQExpBuffer();
12869 216 : appendPQExpBuffer(conprefix, "CONSTRAINT %s ON DOMAIN",
12870 216 : fmtId(domcheck->dobj.name));
12871 :
12872 216 : if (domcheck->dobj.dump & DUMP_COMPONENT_COMMENT)
12873 76 : dumpComment(fout, conprefix->data, qtypname,
12874 76 : tyinfo->dobj.namespace->dobj.name,
12875 76 : tyinfo->rolname,
12876 76 : domcheck->dobj.catId, 0, tyinfo->dobj.dumpId);
12877 :
12878 216 : destroyPQExpBuffer(conprefix);
12879 : }
12880 :
12881 : /*
12882 : * And a comment on the not-null constraint, if there's one -- but only if
12883 : * the constraint itself was dumped here
12884 : */
12885 316 : if (tyinfo->notnull != NULL && !tyinfo->notnull->separate)
12886 : {
12887 106 : PQExpBuffer conprefix = createPQExpBuffer();
12888 :
12889 106 : appendPQExpBuffer(conprefix, "CONSTRAINT %s ON DOMAIN",
12890 106 : fmtId(tyinfo->notnull->dobj.name));
12891 :
12892 106 : if (tyinfo->notnull->dobj.dump & DUMP_COMPONENT_COMMENT)
12893 76 : dumpComment(fout, conprefix->data, qtypname,
12894 76 : tyinfo->dobj.namespace->dobj.name,
12895 76 : tyinfo->rolname,
12896 76 : tyinfo->notnull->dobj.catId, 0, tyinfo->dobj.dumpId);
12897 106 : destroyPQExpBuffer(conprefix);
12898 : }
12899 :
12900 316 : destroyPQExpBuffer(q);
12901 316 : destroyPQExpBuffer(delq);
12902 316 : destroyPQExpBuffer(query);
12903 316 : free(qtypname);
12904 316 : free(qualtypname);
12905 316 : }
12906 :
12907 : /*
12908 : * dumpCompositeType
12909 : * writes out to fout the queries to recreate a user-defined stand-alone
12910 : * composite type
12911 : */
12912 : static void
12913 272 : dumpCompositeType(Archive *fout, const TypeInfo *tyinfo)
12914 : {
12915 272 : DumpOptions *dopt = fout->dopt;
12916 272 : PQExpBuffer q = createPQExpBuffer();
12917 272 : PQExpBuffer dropped = createPQExpBuffer();
12918 272 : PQExpBuffer delq = createPQExpBuffer();
12919 272 : PQExpBuffer query = createPQExpBuffer();
12920 : PGresult *res;
12921 : char *qtypname;
12922 : char *qualtypname;
12923 : int ntups;
12924 : int i_attname;
12925 : int i_atttypdefn;
12926 : int i_attlen;
12927 : int i_attalign;
12928 : int i_attisdropped;
12929 : int i_attcollation;
12930 : int i;
12931 : int actual_atts;
12932 :
12933 272 : if (!fout->is_prepared[PREPQUERY_DUMPCOMPOSITETYPE])
12934 : {
12935 : /*
12936 : * Set up query for type-specific details.
12937 : *
12938 : * Since we only want to dump COLLATE clauses for attributes whose
12939 : * collation is different from their type's default, we use a CASE
12940 : * here to suppress uninteresting attcollations cheaply. atttypid
12941 : * will be 0 for dropped columns; collation does not matter for those.
12942 : */
12943 122 : appendPQExpBufferStr(query,
12944 : "PREPARE dumpCompositeType(pg_catalog.oid) AS\n"
12945 : "SELECT a.attname, a.attnum, "
12946 : "pg_catalog.format_type(a.atttypid, a.atttypmod) AS atttypdefn, "
12947 : "a.attlen, a.attalign, a.attisdropped, "
12948 : "CASE WHEN a.attcollation <> at.typcollation "
12949 : "THEN a.attcollation ELSE 0 END AS attcollation "
12950 : "FROM pg_catalog.pg_type ct "
12951 : "JOIN pg_catalog.pg_attribute a ON a.attrelid = ct.typrelid "
12952 : "LEFT JOIN pg_catalog.pg_type at ON at.oid = a.atttypid "
12953 : "WHERE ct.oid = $1 "
12954 : "ORDER BY a.attnum");
12955 :
12956 122 : ExecuteSqlStatement(fout, query->data);
12957 :
12958 122 : fout->is_prepared[PREPQUERY_DUMPCOMPOSITETYPE] = true;
12959 : }
12960 :
12961 272 : printfPQExpBuffer(query,
12962 : "EXECUTE dumpCompositeType('%u')",
12963 272 : tyinfo->dobj.catId.oid);
12964 :
12965 272 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
12966 :
12967 272 : ntups = PQntuples(res);
12968 :
12969 272 : i_attname = PQfnumber(res, "attname");
12970 272 : i_atttypdefn = PQfnumber(res, "atttypdefn");
12971 272 : i_attlen = PQfnumber(res, "attlen");
12972 272 : i_attalign = PQfnumber(res, "attalign");
12973 272 : i_attisdropped = PQfnumber(res, "attisdropped");
12974 272 : i_attcollation = PQfnumber(res, "attcollation");
12975 :
12976 272 : if (dopt->binary_upgrade)
12977 : {
12978 36 : binary_upgrade_set_type_oids_by_type_oid(fout, q,
12979 36 : tyinfo->dobj.catId.oid,
12980 : false, false);
12981 36 : binary_upgrade_set_pg_class_oids(fout, q, tyinfo->typrelid);
12982 : }
12983 :
12984 272 : qtypname = pg_strdup(fmtId(tyinfo->dobj.name));
12985 272 : qualtypname = pg_strdup(fmtQualifiedDumpable(tyinfo));
12986 :
12987 272 : appendPQExpBuffer(q, "CREATE TYPE %s AS (",
12988 : qualtypname);
12989 :
12990 272 : actual_atts = 0;
12991 860 : for (i = 0; i < ntups; i++)
12992 : {
12993 : char *attname;
12994 : char *atttypdefn;
12995 : char *attlen;
12996 : char *attalign;
12997 : bool attisdropped;
12998 : Oid attcollation;
12999 :
13000 588 : attname = PQgetvalue(res, i, i_attname);
13001 588 : atttypdefn = PQgetvalue(res, i, i_atttypdefn);
13002 588 : attlen = PQgetvalue(res, i, i_attlen);
13003 588 : attalign = PQgetvalue(res, i, i_attalign);
13004 588 : attisdropped = (PQgetvalue(res, i, i_attisdropped)[0] == 't');
13005 588 : attcollation = atooid(PQgetvalue(res, i, i_attcollation));
13006 :
13007 588 : if (attisdropped && !dopt->binary_upgrade)
13008 16 : continue;
13009 :
13010 : /* Format properly if not first attr */
13011 572 : if (actual_atts++ > 0)
13012 300 : appendPQExpBufferChar(q, ',');
13013 572 : appendPQExpBufferStr(q, "\n\t");
13014 :
13015 572 : if (!attisdropped)
13016 : {
13017 568 : appendPQExpBuffer(q, "%s %s", fmtId(attname), atttypdefn);
13018 :
13019 : /* Add collation if not default for the column type */
13020 568 : if (OidIsValid(attcollation))
13021 : {
13022 : CollInfo *coll;
13023 :
13024 0 : coll = findCollationByOid(attcollation);
13025 0 : if (coll)
13026 0 : appendPQExpBuffer(q, " COLLATE %s",
13027 0 : fmtQualifiedDumpable(coll));
13028 : }
13029 : }
13030 : else
13031 : {
13032 : /*
13033 : * This is a dropped attribute and we're in binary_upgrade mode.
13034 : * Insert a placeholder for it in the CREATE TYPE command, and set
13035 : * length and alignment with direct UPDATE to the catalogs
13036 : * afterwards. See similar code in dumpTableSchema().
13037 : */
13038 4 : appendPQExpBuffer(q, "%s INTEGER /* dummy */", fmtId(attname));
13039 :
13040 : /* stash separately for insertion after the CREATE TYPE */
13041 4 : appendPQExpBufferStr(dropped,
13042 : "\n-- For binary upgrade, recreate dropped column.\n");
13043 4 : appendPQExpBuffer(dropped, "UPDATE pg_catalog.pg_attribute\n"
13044 : "SET attlen = %s, "
13045 : "attalign = '%s', attbyval = false\n"
13046 : "WHERE attname = ", attlen, attalign);
13047 4 : appendStringLiteralAH(dropped, attname, fout);
13048 4 : appendPQExpBufferStr(dropped, "\n AND attrelid = ");
13049 4 : appendStringLiteralAH(dropped, qualtypname, fout);
13050 4 : appendPQExpBufferStr(dropped, "::pg_catalog.regclass;\n");
13051 :
13052 4 : appendPQExpBuffer(dropped, "ALTER TYPE %s ",
13053 : qualtypname);
13054 4 : appendPQExpBuffer(dropped, "DROP ATTRIBUTE %s;\n",
13055 : fmtId(attname));
13056 : }
13057 : }
13058 272 : appendPQExpBufferStr(q, "\n);\n");
13059 272 : appendPQExpBufferStr(q, dropped->data);
13060 :
13061 272 : appendPQExpBuffer(delq, "DROP TYPE %s;\n", qualtypname);
13062 :
13063 272 : if (dopt->binary_upgrade)
13064 36 : binary_upgrade_extension_member(q, &tyinfo->dobj,
13065 : "TYPE", qtypname,
13066 36 : tyinfo->dobj.namespace->dobj.name);
13067 :
13068 272 : if (tyinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
13069 238 : ArchiveEntry(fout, tyinfo->dobj.catId, tyinfo->dobj.dumpId,
13070 238 : ARCHIVE_OPTS(.tag = tyinfo->dobj.name,
13071 : .namespace = tyinfo->dobj.namespace->dobj.name,
13072 : .owner = tyinfo->rolname,
13073 : .description = "TYPE",
13074 : .section = SECTION_PRE_DATA,
13075 : .createStmt = q->data,
13076 : .dropStmt = delq->data));
13077 :
13078 :
13079 : /* Dump Type Comments and Security Labels */
13080 272 : if (tyinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
13081 76 : dumpComment(fout, "TYPE", qtypname,
13082 76 : tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
13083 76 : tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
13084 :
13085 272 : if (tyinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
13086 0 : dumpSecLabel(fout, "TYPE", qtypname,
13087 0 : tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
13088 0 : tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
13089 :
13090 272 : if (tyinfo->dobj.dump & DUMP_COMPONENT_ACL)
13091 36 : dumpACL(fout, tyinfo->dobj.dumpId, InvalidDumpId, "TYPE",
13092 : qtypname, NULL,
13093 36 : tyinfo->dobj.namespace->dobj.name,
13094 36 : NULL, tyinfo->rolname, &tyinfo->dacl);
13095 :
13096 : /* Dump any per-column comments */
13097 272 : if (tyinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
13098 76 : dumpCompositeTypeColComments(fout, tyinfo, res);
13099 :
13100 272 : PQclear(res);
13101 272 : destroyPQExpBuffer(q);
13102 272 : destroyPQExpBuffer(dropped);
13103 272 : destroyPQExpBuffer(delq);
13104 272 : destroyPQExpBuffer(query);
13105 272 : free(qtypname);
13106 272 : free(qualtypname);
13107 272 : }
13108 :
13109 : /*
13110 : * dumpCompositeTypeColComments
13111 : * writes out to fout the queries to recreate comments on the columns of
13112 : * a user-defined stand-alone composite type.
13113 : *
13114 : * The caller has already made a query to collect the names and attnums
13115 : * of the type's columns, so we just pass that result into here rather
13116 : * than reading them again.
13117 : */
13118 : static void
13119 76 : dumpCompositeTypeColComments(Archive *fout, const TypeInfo *tyinfo,
13120 : PGresult *res)
13121 : {
13122 : CommentItem *comments;
13123 : int ncomments;
13124 : PQExpBuffer query;
13125 : PQExpBuffer target;
13126 : int i;
13127 : int ntups;
13128 : int i_attname;
13129 : int i_attnum;
13130 : int i_attisdropped;
13131 :
13132 : /* do nothing, if --no-comments is supplied */
13133 76 : if (fout->dopt->no_comments)
13134 0 : return;
13135 :
13136 : /* Search for comments associated with type's pg_class OID */
13137 76 : ncomments = findComments(RelationRelationId, tyinfo->typrelid,
13138 : &comments);
13139 :
13140 : /* If no comments exist, we're done */
13141 76 : if (ncomments <= 0)
13142 0 : return;
13143 :
13144 : /* Build COMMENT ON statements */
13145 76 : query = createPQExpBuffer();
13146 76 : target = createPQExpBuffer();
13147 :
13148 76 : ntups = PQntuples(res);
13149 76 : i_attnum = PQfnumber(res, "attnum");
13150 76 : i_attname = PQfnumber(res, "attname");
13151 76 : i_attisdropped = PQfnumber(res, "attisdropped");
13152 152 : while (ncomments > 0)
13153 : {
13154 : const char *attname;
13155 :
13156 76 : attname = NULL;
13157 76 : for (i = 0; i < ntups; i++)
13158 : {
13159 76 : if (atoi(PQgetvalue(res, i, i_attnum)) == comments->objsubid &&
13160 76 : PQgetvalue(res, i, i_attisdropped)[0] != 't')
13161 : {
13162 76 : attname = PQgetvalue(res, i, i_attname);
13163 76 : break;
13164 : }
13165 : }
13166 76 : if (attname) /* just in case we don't find it */
13167 : {
13168 76 : const char *descr = comments->descr;
13169 :
13170 76 : resetPQExpBuffer(target);
13171 76 : appendPQExpBuffer(target, "COLUMN %s.",
13172 76 : fmtId(tyinfo->dobj.name));
13173 76 : appendPQExpBufferStr(target, fmtId(attname));
13174 :
13175 76 : resetPQExpBuffer(query);
13176 76 : appendPQExpBuffer(query, "COMMENT ON COLUMN %s.",
13177 76 : fmtQualifiedDumpable(tyinfo));
13178 76 : appendPQExpBuffer(query, "%s IS ", fmtId(attname));
13179 76 : appendStringLiteralAH(query, descr, fout);
13180 76 : appendPQExpBufferStr(query, ";\n");
13181 :
13182 76 : ArchiveEntry(fout, nilCatalogId, createDumpId(),
13183 76 : ARCHIVE_OPTS(.tag = target->data,
13184 : .namespace = tyinfo->dobj.namespace->dobj.name,
13185 : .owner = tyinfo->rolname,
13186 : .description = "COMMENT",
13187 : .section = SECTION_NONE,
13188 : .createStmt = query->data,
13189 : .deps = &(tyinfo->dobj.dumpId),
13190 : .nDeps = 1));
13191 : }
13192 :
13193 76 : comments++;
13194 76 : ncomments--;
13195 : }
13196 :
13197 76 : destroyPQExpBuffer(query);
13198 76 : destroyPQExpBuffer(target);
13199 : }
13200 :
13201 : /*
13202 : * dumpShellType
13203 : * writes out to fout the queries to create a shell type
13204 : *
13205 : * We dump a shell definition in advance of the I/O functions for the type.
13206 : */
13207 : static void
13208 158 : dumpShellType(Archive *fout, const ShellTypeInfo *stinfo)
13209 : {
13210 158 : DumpOptions *dopt = fout->dopt;
13211 : PQExpBuffer q;
13212 :
13213 : /* Do nothing if not dumping schema */
13214 158 : if (!dopt->dumpSchema)
13215 12 : return;
13216 :
13217 146 : q = createPQExpBuffer();
13218 :
13219 : /*
13220 : * Note the lack of a DROP command for the shell type; any required DROP
13221 : * is driven off the base type entry, instead. This interacts with
13222 : * _printTocEntry()'s use of the presence of a DROP command to decide
13223 : * whether an entry needs an ALTER OWNER command. We don't want to alter
13224 : * the shell type's owner immediately on creation; that should happen only
13225 : * after it's filled in, otherwise the backend complains.
13226 : */
13227 :
13228 146 : if (dopt->binary_upgrade)
13229 16 : binary_upgrade_set_type_oids_by_type_oid(fout, q,
13230 16 : stinfo->baseType->dobj.catId.oid,
13231 : false, false);
13232 :
13233 146 : appendPQExpBuffer(q, "CREATE TYPE %s;\n",
13234 146 : fmtQualifiedDumpable(stinfo));
13235 :
13236 146 : if (stinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
13237 146 : ArchiveEntry(fout, stinfo->dobj.catId, stinfo->dobj.dumpId,
13238 146 : ARCHIVE_OPTS(.tag = stinfo->dobj.name,
13239 : .namespace = stinfo->dobj.namespace->dobj.name,
13240 : .owner = stinfo->baseType->rolname,
13241 : .description = "SHELL TYPE",
13242 : .section = SECTION_PRE_DATA,
13243 : .createStmt = q->data));
13244 :
13245 146 : destroyPQExpBuffer(q);
13246 : }
13247 :
13248 : /*
13249 : * dumpProcLang
13250 : * writes out to fout the queries to recreate a user-defined
13251 : * procedural language
13252 : */
13253 : static void
13254 188 : dumpProcLang(Archive *fout, const ProcLangInfo *plang)
13255 : {
13256 188 : DumpOptions *dopt = fout->dopt;
13257 : PQExpBuffer defqry;
13258 : PQExpBuffer delqry;
13259 : bool useParams;
13260 : char *qlanname;
13261 : FuncInfo *funcInfo;
13262 188 : FuncInfo *inlineInfo = NULL;
13263 188 : FuncInfo *validatorInfo = NULL;
13264 :
13265 : /* Do nothing if not dumping schema */
13266 188 : if (!dopt->dumpSchema)
13267 26 : return;
13268 :
13269 : /*
13270 : * Try to find the support function(s). It is not an error if we don't
13271 : * find them --- if the functions are in the pg_catalog schema, as is
13272 : * standard in 8.1 and up, then we won't have loaded them. (In this case
13273 : * we will emit a parameterless CREATE LANGUAGE command, which will
13274 : * require PL template knowledge in the backend to reload.)
13275 : */
13276 :
13277 162 : funcInfo = findFuncByOid(plang->lanplcallfoid);
13278 162 : if (funcInfo != NULL && !funcInfo->dobj.dump)
13279 4 : funcInfo = NULL; /* treat not-dumped same as not-found */
13280 :
13281 162 : if (OidIsValid(plang->laninline))
13282 : {
13283 88 : inlineInfo = findFuncByOid(plang->laninline);
13284 88 : if (inlineInfo != NULL && !inlineInfo->dobj.dump)
13285 2 : inlineInfo = NULL;
13286 : }
13287 :
13288 162 : if (OidIsValid(plang->lanvalidator))
13289 : {
13290 88 : validatorInfo = findFuncByOid(plang->lanvalidator);
13291 88 : if (validatorInfo != NULL && !validatorInfo->dobj.dump)
13292 2 : validatorInfo = NULL;
13293 : }
13294 :
13295 : /*
13296 : * If the functions are dumpable then emit a complete CREATE LANGUAGE with
13297 : * parameters. Otherwise, we'll write a parameterless command, which will
13298 : * be interpreted as CREATE EXTENSION.
13299 : */
13300 72 : useParams = (funcInfo != NULL &&
13301 306 : (inlineInfo != NULL || !OidIsValid(plang->laninline)) &&
13302 72 : (validatorInfo != NULL || !OidIsValid(plang->lanvalidator)));
13303 :
13304 162 : defqry = createPQExpBuffer();
13305 162 : delqry = createPQExpBuffer();
13306 :
13307 162 : qlanname = pg_strdup(fmtId(plang->dobj.name));
13308 :
13309 162 : appendPQExpBuffer(delqry, "DROP PROCEDURAL LANGUAGE %s;\n",
13310 : qlanname);
13311 :
13312 162 : if (useParams)
13313 : {
13314 72 : appendPQExpBuffer(defqry, "CREATE %sPROCEDURAL LANGUAGE %s",
13315 72 : plang->lanpltrusted ? "TRUSTED " : "",
13316 : qlanname);
13317 72 : appendPQExpBuffer(defqry, " HANDLER %s",
13318 72 : fmtQualifiedDumpable(funcInfo));
13319 72 : if (OidIsValid(plang->laninline))
13320 0 : appendPQExpBuffer(defqry, " INLINE %s",
13321 0 : fmtQualifiedDumpable(inlineInfo));
13322 72 : if (OidIsValid(plang->lanvalidator))
13323 0 : appendPQExpBuffer(defqry, " VALIDATOR %s",
13324 0 : fmtQualifiedDumpable(validatorInfo));
13325 : }
13326 : else
13327 : {
13328 : /*
13329 : * If not dumping parameters, then use CREATE OR REPLACE so that the
13330 : * command will not fail if the language is preinstalled in the target
13331 : * database.
13332 : *
13333 : * Modern servers will interpret this as CREATE EXTENSION IF NOT
13334 : * EXISTS; perhaps we should emit that instead? But it might just add
13335 : * confusion.
13336 : */
13337 90 : appendPQExpBuffer(defqry, "CREATE OR REPLACE PROCEDURAL LANGUAGE %s",
13338 : qlanname);
13339 : }
13340 162 : appendPQExpBufferStr(defqry, ";\n");
13341 :
13342 162 : if (dopt->binary_upgrade)
13343 4 : binary_upgrade_extension_member(defqry, &plang->dobj,
13344 : "LANGUAGE", qlanname, NULL);
13345 :
13346 162 : if (plang->dobj.dump & DUMP_COMPONENT_DEFINITION)
13347 74 : ArchiveEntry(fout, plang->dobj.catId, plang->dobj.dumpId,
13348 74 : ARCHIVE_OPTS(.tag = plang->dobj.name,
13349 : .owner = plang->lanowner,
13350 : .description = "PROCEDURAL LANGUAGE",
13351 : .section = SECTION_PRE_DATA,
13352 : .createStmt = defqry->data,
13353 : .dropStmt = delqry->data,
13354 : ));
13355 :
13356 : /* Dump Proc Lang Comments and Security Labels */
13357 162 : if (plang->dobj.dump & DUMP_COMPONENT_COMMENT)
13358 0 : dumpComment(fout, "LANGUAGE", qlanname,
13359 0 : NULL, plang->lanowner,
13360 0 : plang->dobj.catId, 0, plang->dobj.dumpId);
13361 :
13362 162 : if (plang->dobj.dump & DUMP_COMPONENT_SECLABEL)
13363 0 : dumpSecLabel(fout, "LANGUAGE", qlanname,
13364 0 : NULL, plang->lanowner,
13365 0 : plang->dobj.catId, 0, plang->dobj.dumpId);
13366 :
13367 162 : if (plang->lanpltrusted && plang->dobj.dump & DUMP_COMPONENT_ACL)
13368 88 : dumpACL(fout, plang->dobj.dumpId, InvalidDumpId, "LANGUAGE",
13369 : qlanname, NULL, NULL,
13370 88 : NULL, plang->lanowner, &plang->dacl);
13371 :
13372 162 : free(qlanname);
13373 :
13374 162 : destroyPQExpBuffer(defqry);
13375 162 : destroyPQExpBuffer(delqry);
13376 : }
13377 :
13378 : /*
13379 : * format_function_arguments: generate function name and argument list
13380 : *
13381 : * This is used when we can rely on pg_get_function_arguments to format
13382 : * the argument list. Note, however, that pg_get_function_arguments
13383 : * does not special-case zero-argument aggregates.
13384 : */
13385 : static char *
13386 8428 : format_function_arguments(const FuncInfo *finfo, const char *funcargs, bool is_agg)
13387 : {
13388 : PQExpBufferData fn;
13389 :
13390 8428 : initPQExpBuffer(&fn);
13391 8428 : appendPQExpBufferStr(&fn, fmtId(finfo->dobj.name));
13392 8428 : if (is_agg && finfo->nargs == 0)
13393 160 : appendPQExpBufferStr(&fn, "(*)");
13394 : else
13395 8268 : appendPQExpBuffer(&fn, "(%s)", funcargs);
13396 8428 : return fn.data;
13397 : }
13398 :
13399 : /*
13400 : * format_function_signature: generate function name and argument list
13401 : *
13402 : * Only a minimal list of input argument types is generated; this is
13403 : * sufficient to reference the function, but not to define it.
13404 : *
13405 : * If honor_quotes is false then the function name is never quoted.
13406 : * This is appropriate for use in TOC tags, but not in SQL commands.
13407 : */
13408 : static char *
13409 4466 : format_function_signature(Archive *fout, const FuncInfo *finfo, bool honor_quotes)
13410 : {
13411 : PQExpBufferData fn;
13412 : int j;
13413 :
13414 4466 : initPQExpBuffer(&fn);
13415 4466 : if (honor_quotes)
13416 834 : appendPQExpBuffer(&fn, "%s(", fmtId(finfo->dobj.name));
13417 : else
13418 3632 : appendPQExpBuffer(&fn, "%s(", finfo->dobj.name);
13419 8154 : for (j = 0; j < finfo->nargs; j++)
13420 : {
13421 3688 : if (j > 0)
13422 844 : appendPQExpBufferStr(&fn, ", ");
13423 :
13424 3688 : appendPQExpBufferStr(&fn,
13425 3688 : getFormattedTypeName(fout, finfo->argtypes[j],
13426 : zeroIsError));
13427 : }
13428 4466 : appendPQExpBufferChar(&fn, ')');
13429 4466 : return fn.data;
13430 : }
13431 :
13432 :
13433 : /*
13434 : * dumpFunc:
13435 : * dump out one function
13436 : */
13437 : static void
13438 3756 : dumpFunc(Archive *fout, const FuncInfo *finfo)
13439 : {
13440 3756 : DumpOptions *dopt = fout->dopt;
13441 : PQExpBuffer query;
13442 : PQExpBuffer q;
13443 : PQExpBuffer delqry;
13444 : PQExpBuffer asPart;
13445 : PGresult *res;
13446 : char *funcsig; /* identity signature */
13447 3756 : char *funcfullsig = NULL; /* full signature */
13448 : char *funcsig_tag;
13449 : char *qual_funcsig;
13450 : char *proretset;
13451 : char *prosrc;
13452 : char *probin;
13453 : char *prosqlbody;
13454 : char *funcargs;
13455 : char *funciargs;
13456 : char *funcresult;
13457 : char *protrftypes;
13458 : char *prokind;
13459 : char *provolatile;
13460 : char *proisstrict;
13461 : char *prosecdef;
13462 : char *proleakproof;
13463 : char *proconfig;
13464 : char *procost;
13465 : char *prorows;
13466 : char *prosupport;
13467 : char *proparallel;
13468 : char *lanname;
13469 3756 : char **configitems = NULL;
13470 3756 : int nconfigitems = 0;
13471 : const char *keyword;
13472 :
13473 : /* Do nothing if not dumping schema */
13474 3756 : if (!dopt->dumpSchema)
13475 124 : return;
13476 :
13477 3632 : query = createPQExpBuffer();
13478 3632 : q = createPQExpBuffer();
13479 3632 : delqry = createPQExpBuffer();
13480 3632 : asPart = createPQExpBuffer();
13481 :
13482 3632 : if (!fout->is_prepared[PREPQUERY_DUMPFUNC])
13483 : {
13484 : /* Set up query for function-specific details */
13485 144 : appendPQExpBufferStr(query,
13486 : "PREPARE dumpFunc(pg_catalog.oid) AS\n");
13487 :
13488 144 : appendPQExpBufferStr(query,
13489 : "SELECT\n"
13490 : "proretset,\n"
13491 : "prosrc,\n"
13492 : "probin,\n"
13493 : "provolatile,\n"
13494 : "proisstrict,\n"
13495 : "prosecdef,\n"
13496 : "lanname,\n"
13497 : "proconfig,\n"
13498 : "procost,\n"
13499 : "prorows,\n"
13500 : "pg_catalog.pg_get_function_arguments(p.oid) AS funcargs,\n"
13501 : "pg_catalog.pg_get_function_identity_arguments(p.oid) AS funciargs,\n"
13502 : "pg_catalog.pg_get_function_result(p.oid) AS funcresult,\n"
13503 : "proleakproof,\n");
13504 :
13505 144 : if (fout->remoteVersion >= 90500)
13506 144 : appendPQExpBufferStr(query,
13507 : "array_to_string(protrftypes, ' ') AS protrftypes,\n");
13508 : else
13509 0 : appendPQExpBufferStr(query,
13510 : "NULL AS protrftypes,\n");
13511 :
13512 144 : if (fout->remoteVersion >= 90600)
13513 144 : appendPQExpBufferStr(query,
13514 : "proparallel,\n");
13515 : else
13516 0 : appendPQExpBufferStr(query,
13517 : "'u' AS proparallel,\n");
13518 :
13519 144 : if (fout->remoteVersion >= 110000)
13520 144 : appendPQExpBufferStr(query,
13521 : "prokind,\n");
13522 : else
13523 0 : appendPQExpBufferStr(query,
13524 : "CASE WHEN proiswindow THEN 'w' ELSE 'f' END AS prokind,\n");
13525 :
13526 144 : if (fout->remoteVersion >= 120000)
13527 144 : appendPQExpBufferStr(query,
13528 : "prosupport,\n");
13529 : else
13530 0 : appendPQExpBufferStr(query,
13531 : "'-' AS prosupport,\n");
13532 :
13533 144 : if (fout->remoteVersion >= 140000)
13534 144 : appendPQExpBufferStr(query,
13535 : "pg_get_function_sqlbody(p.oid) AS prosqlbody\n");
13536 : else
13537 0 : appendPQExpBufferStr(query,
13538 : "NULL AS prosqlbody\n");
13539 :
13540 144 : appendPQExpBufferStr(query,
13541 : "FROM pg_catalog.pg_proc p, pg_catalog.pg_language l\n"
13542 : "WHERE p.oid = $1 "
13543 : "AND l.oid = p.prolang");
13544 :
13545 144 : ExecuteSqlStatement(fout, query->data);
13546 :
13547 144 : fout->is_prepared[PREPQUERY_DUMPFUNC] = true;
13548 : }
13549 :
13550 3632 : printfPQExpBuffer(query,
13551 : "EXECUTE dumpFunc('%u')",
13552 3632 : finfo->dobj.catId.oid);
13553 :
13554 3632 : res = ExecuteSqlQueryForSingleRow(fout, query->data);
13555 :
13556 3632 : proretset = PQgetvalue(res, 0, PQfnumber(res, "proretset"));
13557 3632 : if (PQgetisnull(res, 0, PQfnumber(res, "prosqlbody")))
13558 : {
13559 3524 : prosrc = PQgetvalue(res, 0, PQfnumber(res, "prosrc"));
13560 3524 : probin = PQgetvalue(res, 0, PQfnumber(res, "probin"));
13561 3524 : prosqlbody = NULL;
13562 : }
13563 : else
13564 : {
13565 108 : prosrc = NULL;
13566 108 : probin = NULL;
13567 108 : prosqlbody = PQgetvalue(res, 0, PQfnumber(res, "prosqlbody"));
13568 : }
13569 3632 : funcargs = PQgetvalue(res, 0, PQfnumber(res, "funcargs"));
13570 3632 : funciargs = PQgetvalue(res, 0, PQfnumber(res, "funciargs"));
13571 3632 : funcresult = PQgetvalue(res, 0, PQfnumber(res, "funcresult"));
13572 3632 : protrftypes = PQgetvalue(res, 0, PQfnumber(res, "protrftypes"));
13573 3632 : prokind = PQgetvalue(res, 0, PQfnumber(res, "prokind"));
13574 3632 : provolatile = PQgetvalue(res, 0, PQfnumber(res, "provolatile"));
13575 3632 : proisstrict = PQgetvalue(res, 0, PQfnumber(res, "proisstrict"));
13576 3632 : prosecdef = PQgetvalue(res, 0, PQfnumber(res, "prosecdef"));
13577 3632 : proleakproof = PQgetvalue(res, 0, PQfnumber(res, "proleakproof"));
13578 3632 : proconfig = PQgetvalue(res, 0, PQfnumber(res, "proconfig"));
13579 3632 : procost = PQgetvalue(res, 0, PQfnumber(res, "procost"));
13580 3632 : prorows = PQgetvalue(res, 0, PQfnumber(res, "prorows"));
13581 3632 : prosupport = PQgetvalue(res, 0, PQfnumber(res, "prosupport"));
13582 3632 : proparallel = PQgetvalue(res, 0, PQfnumber(res, "proparallel"));
13583 3632 : lanname = PQgetvalue(res, 0, PQfnumber(res, "lanname"));
13584 :
13585 : /*
13586 : * See backend/commands/functioncmds.c for details of how the 'AS' clause
13587 : * is used.
13588 : */
13589 3632 : if (prosqlbody)
13590 : {
13591 108 : appendPQExpBufferStr(asPart, prosqlbody);
13592 : }
13593 3524 : else if (probin[0] != '\0')
13594 : {
13595 286 : appendPQExpBufferStr(asPart, "AS ");
13596 286 : appendStringLiteralAH(asPart, probin, fout);
13597 286 : if (prosrc[0] != '\0')
13598 : {
13599 286 : appendPQExpBufferStr(asPart, ", ");
13600 :
13601 : /*
13602 : * where we have bin, use dollar quoting if allowed and src
13603 : * contains quote or backslash; else use regular quoting.
13604 : */
13605 286 : if (dopt->disable_dollar_quoting ||
13606 286 : (strchr(prosrc, '\'') == NULL && strchr(prosrc, '\\') == NULL))
13607 286 : appendStringLiteralAH(asPart, prosrc, fout);
13608 : else
13609 0 : appendStringLiteralDQ(asPart, prosrc, NULL);
13610 : }
13611 : }
13612 : else
13613 : {
13614 3238 : appendPQExpBufferStr(asPart, "AS ");
13615 : /* with no bin, dollar quote src unconditionally if allowed */
13616 3238 : if (dopt->disable_dollar_quoting)
13617 0 : appendStringLiteralAH(asPart, prosrc, fout);
13618 : else
13619 3238 : appendStringLiteralDQ(asPart, prosrc, NULL);
13620 : }
13621 :
13622 3632 : if (*proconfig)
13623 : {
13624 30 : if (!parsePGArray(proconfig, &configitems, &nconfigitems))
13625 0 : pg_fatal("could not parse %s array", "proconfig");
13626 : }
13627 : else
13628 : {
13629 3602 : configitems = NULL;
13630 3602 : nconfigitems = 0;
13631 : }
13632 :
13633 3632 : funcfullsig = format_function_arguments(finfo, funcargs, false);
13634 3632 : funcsig = format_function_arguments(finfo, funciargs, false);
13635 :
13636 3632 : funcsig_tag = format_function_signature(fout, finfo, false);
13637 :
13638 3632 : qual_funcsig = psprintf("%s.%s",
13639 3632 : fmtId(finfo->dobj.namespace->dobj.name),
13640 : funcsig);
13641 :
13642 3632 : if (prokind[0] == PROKIND_PROCEDURE)
13643 196 : keyword = "PROCEDURE";
13644 : else
13645 3436 : keyword = "FUNCTION"; /* works for window functions too */
13646 :
13647 3632 : appendPQExpBuffer(delqry, "DROP %s %s;\n",
13648 : keyword, qual_funcsig);
13649 :
13650 7264 : appendPQExpBuffer(q, "CREATE %s %s.%s",
13651 : keyword,
13652 3632 : fmtId(finfo->dobj.namespace->dobj.name),
13653 : funcfullsig ? funcfullsig :
13654 : funcsig);
13655 :
13656 3632 : if (prokind[0] == PROKIND_PROCEDURE)
13657 : /* no result type to output */ ;
13658 3436 : else if (funcresult)
13659 3436 : appendPQExpBuffer(q, " RETURNS %s", funcresult);
13660 : else
13661 0 : appendPQExpBuffer(q, " RETURNS %s%s",
13662 0 : (proretset[0] == 't') ? "SETOF " : "",
13663 0 : getFormattedTypeName(fout, finfo->prorettype,
13664 : zeroIsError));
13665 :
13666 3632 : appendPQExpBuffer(q, "\n LANGUAGE %s", fmtId(lanname));
13667 :
13668 3632 : if (*protrftypes)
13669 : {
13670 0 : Oid *typeids = pg_malloc(FUNC_MAX_ARGS * sizeof(Oid));
13671 : int i;
13672 :
13673 0 : appendPQExpBufferStr(q, " TRANSFORM ");
13674 0 : parseOidArray(protrftypes, typeids, FUNC_MAX_ARGS);
13675 0 : for (i = 0; typeids[i]; i++)
13676 : {
13677 0 : if (i != 0)
13678 0 : appendPQExpBufferStr(q, ", ");
13679 0 : appendPQExpBuffer(q, "FOR TYPE %s",
13680 0 : getFormattedTypeName(fout, typeids[i], zeroAsNone));
13681 : }
13682 :
13683 0 : free(typeids);
13684 : }
13685 :
13686 3632 : if (prokind[0] == PROKIND_WINDOW)
13687 10 : appendPQExpBufferStr(q, " WINDOW");
13688 :
13689 3632 : if (provolatile[0] != PROVOLATILE_VOLATILE)
13690 : {
13691 726 : if (provolatile[0] == PROVOLATILE_IMMUTABLE)
13692 684 : appendPQExpBufferStr(q, " IMMUTABLE");
13693 42 : else if (provolatile[0] == PROVOLATILE_STABLE)
13694 42 : appendPQExpBufferStr(q, " STABLE");
13695 0 : else if (provolatile[0] != PROVOLATILE_VOLATILE)
13696 0 : pg_fatal("unrecognized provolatile value for function \"%s\"",
13697 : finfo->dobj.name);
13698 : }
13699 :
13700 3632 : if (proisstrict[0] == 't')
13701 742 : appendPQExpBufferStr(q, " STRICT");
13702 :
13703 3632 : if (prosecdef[0] == 't')
13704 0 : appendPQExpBufferStr(q, " SECURITY DEFINER");
13705 :
13706 3632 : if (proleakproof[0] == 't')
13707 20 : appendPQExpBufferStr(q, " LEAKPROOF");
13708 :
13709 : /*
13710 : * COST and ROWS are emitted only if present and not default, so as not to
13711 : * break backwards-compatibility of the dump without need. Keep this code
13712 : * in sync with the defaults in functioncmds.c.
13713 : */
13714 3632 : if (strcmp(procost, "0") != 0)
13715 : {
13716 3632 : if (strcmp(lanname, "internal") == 0 || strcmp(lanname, "c") == 0)
13717 : {
13718 : /* default cost is 1 */
13719 792 : if (strcmp(procost, "1") != 0)
13720 0 : appendPQExpBuffer(q, " COST %s", procost);
13721 : }
13722 : else
13723 : {
13724 : /* default cost is 100 */
13725 2840 : if (strcmp(procost, "100") != 0)
13726 12 : appendPQExpBuffer(q, " COST %s", procost);
13727 : }
13728 : }
13729 3632 : if (proretset[0] == 't' &&
13730 386 : strcmp(prorows, "0") != 0 && strcmp(prorows, "1000") != 0)
13731 0 : appendPQExpBuffer(q, " ROWS %s", prorows);
13732 :
13733 3632 : if (strcmp(prosupport, "-") != 0)
13734 : {
13735 : /* We rely on regprocout to provide quoting and qualification */
13736 96 : appendPQExpBuffer(q, " SUPPORT %s", prosupport);
13737 : }
13738 :
13739 3632 : if (proparallel[0] != PROPARALLEL_UNSAFE)
13740 : {
13741 256 : if (proparallel[0] == PROPARALLEL_SAFE)
13742 246 : appendPQExpBufferStr(q, " PARALLEL SAFE");
13743 10 : else if (proparallel[0] == PROPARALLEL_RESTRICTED)
13744 10 : appendPQExpBufferStr(q, " PARALLEL RESTRICTED");
13745 0 : else if (proparallel[0] != PROPARALLEL_UNSAFE)
13746 0 : pg_fatal("unrecognized proparallel value for function \"%s\"",
13747 : finfo->dobj.name);
13748 : }
13749 :
13750 3702 : for (int i = 0; i < nconfigitems; i++)
13751 : {
13752 : /* we feel free to scribble on configitems[] here */
13753 70 : char *configitem = configitems[i];
13754 : char *pos;
13755 :
13756 70 : pos = strchr(configitem, '=');
13757 70 : if (pos == NULL)
13758 0 : continue;
13759 70 : *pos++ = '\0';
13760 70 : appendPQExpBuffer(q, "\n SET %s TO ", fmtId(configitem));
13761 :
13762 : /*
13763 : * Variables that are marked GUC_LIST_QUOTE were already fully quoted
13764 : * by flatten_set_variable_args() before they were put into the
13765 : * proconfig array. However, because the quoting rules used there
13766 : * aren't exactly like SQL's, we have to break the list value apart
13767 : * and then quote the elements as string literals. (The elements may
13768 : * be double-quoted as-is, but we can't just feed them to the SQL
13769 : * parser; it would do the wrong thing with elements that are
13770 : * zero-length or longer than NAMEDATALEN.)
13771 : *
13772 : * Variables that are not so marked should just be emitted as simple
13773 : * string literals. If the variable is not known to
13774 : * variable_is_guc_list_quote(), we'll do that; this makes it unsafe
13775 : * to use GUC_LIST_QUOTE for extension variables.
13776 : */
13777 70 : if (variable_is_guc_list_quote(configitem))
13778 : {
13779 : char **namelist;
13780 : char **nameptr;
13781 :
13782 : /* Parse string into list of identifiers */
13783 : /* this shouldn't fail really */
13784 20 : if (SplitGUCList(pos, ',', &namelist))
13785 : {
13786 70 : for (nameptr = namelist; *nameptr; nameptr++)
13787 : {
13788 50 : if (nameptr != namelist)
13789 30 : appendPQExpBufferStr(q, ", ");
13790 50 : appendStringLiteralAH(q, *nameptr, fout);
13791 : }
13792 : }
13793 20 : pg_free(namelist);
13794 : }
13795 : else
13796 50 : appendStringLiteralAH(q, pos, fout);
13797 : }
13798 :
13799 3632 : appendPQExpBuffer(q, "\n %s;\n", asPart->data);
13800 :
13801 3632 : append_depends_on_extension(fout, q, &finfo->dobj,
13802 : "pg_catalog.pg_proc", keyword,
13803 : qual_funcsig);
13804 :
13805 3632 : if (dopt->binary_upgrade)
13806 586 : binary_upgrade_extension_member(q, &finfo->dobj,
13807 : keyword, funcsig,
13808 586 : finfo->dobj.namespace->dobj.name);
13809 :
13810 3632 : if (finfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
13811 3416 : ArchiveEntry(fout, finfo->dobj.catId, finfo->dobj.dumpId,
13812 3416 : ARCHIVE_OPTS(.tag = funcsig_tag,
13813 : .namespace = finfo->dobj.namespace->dobj.name,
13814 : .owner = finfo->rolname,
13815 : .description = keyword,
13816 : .section = finfo->postponed_def ?
13817 : SECTION_POST_DATA : SECTION_PRE_DATA,
13818 : .createStmt = q->data,
13819 : .dropStmt = delqry->data));
13820 :
13821 : /* Dump Function Comments and Security Labels */
13822 3632 : if (finfo->dobj.dump & DUMP_COMPONENT_COMMENT)
13823 18 : dumpComment(fout, keyword, funcsig,
13824 18 : finfo->dobj.namespace->dobj.name, finfo->rolname,
13825 18 : finfo->dobj.catId, 0, finfo->dobj.dumpId);
13826 :
13827 3632 : if (finfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
13828 0 : dumpSecLabel(fout, keyword, funcsig,
13829 0 : finfo->dobj.namespace->dobj.name, finfo->rolname,
13830 0 : finfo->dobj.catId, 0, finfo->dobj.dumpId);
13831 :
13832 3632 : if (finfo->dobj.dump & DUMP_COMPONENT_ACL)
13833 224 : dumpACL(fout, finfo->dobj.dumpId, InvalidDumpId, keyword,
13834 : funcsig, NULL,
13835 224 : finfo->dobj.namespace->dobj.name,
13836 224 : NULL, finfo->rolname, &finfo->dacl);
13837 :
13838 3632 : PQclear(res);
13839 :
13840 3632 : destroyPQExpBuffer(query);
13841 3632 : destroyPQExpBuffer(q);
13842 3632 : destroyPQExpBuffer(delqry);
13843 3632 : destroyPQExpBuffer(asPart);
13844 3632 : free(funcsig);
13845 3632 : free(funcfullsig);
13846 3632 : free(funcsig_tag);
13847 3632 : free(qual_funcsig);
13848 3632 : free(configitems);
13849 : }
13850 :
13851 :
13852 : /*
13853 : * Dump a user-defined cast
13854 : */
13855 : static void
13856 146 : dumpCast(Archive *fout, const CastInfo *cast)
13857 : {
13858 146 : DumpOptions *dopt = fout->dopt;
13859 : PQExpBuffer defqry;
13860 : PQExpBuffer delqry;
13861 : PQExpBuffer labelq;
13862 : PQExpBuffer castargs;
13863 146 : FuncInfo *funcInfo = NULL;
13864 : const char *sourceType;
13865 : const char *targetType;
13866 :
13867 : /* Do nothing if not dumping schema */
13868 146 : if (!dopt->dumpSchema)
13869 12 : return;
13870 :
13871 : /* Cannot dump if we don't have the cast function's info */
13872 134 : if (OidIsValid(cast->castfunc))
13873 : {
13874 84 : funcInfo = findFuncByOid(cast->castfunc);
13875 84 : if (funcInfo == NULL)
13876 0 : pg_fatal("could not find function definition for function with OID %u",
13877 : cast->castfunc);
13878 : }
13879 :
13880 134 : defqry = createPQExpBuffer();
13881 134 : delqry = createPQExpBuffer();
13882 134 : labelq = createPQExpBuffer();
13883 134 : castargs = createPQExpBuffer();
13884 :
13885 134 : sourceType = getFormattedTypeName(fout, cast->castsource, zeroAsNone);
13886 134 : targetType = getFormattedTypeName(fout, cast->casttarget, zeroAsNone);
13887 134 : appendPQExpBuffer(delqry, "DROP CAST (%s AS %s);\n",
13888 : sourceType, targetType);
13889 :
13890 134 : appendPQExpBuffer(defqry, "CREATE CAST (%s AS %s) ",
13891 : sourceType, targetType);
13892 :
13893 134 : switch (cast->castmethod)
13894 : {
13895 50 : case COERCION_METHOD_BINARY:
13896 50 : appendPQExpBufferStr(defqry, "WITHOUT FUNCTION");
13897 50 : break;
13898 0 : case COERCION_METHOD_INOUT:
13899 0 : appendPQExpBufferStr(defqry, "WITH INOUT");
13900 0 : break;
13901 84 : case COERCION_METHOD_FUNCTION:
13902 84 : if (funcInfo)
13903 : {
13904 84 : char *fsig = format_function_signature(fout, funcInfo, true);
13905 :
13906 : /*
13907 : * Always qualify the function name (format_function_signature
13908 : * won't qualify it).
13909 : */
13910 84 : appendPQExpBuffer(defqry, "WITH FUNCTION %s.%s",
13911 84 : fmtId(funcInfo->dobj.namespace->dobj.name), fsig);
13912 84 : free(fsig);
13913 : }
13914 : else
13915 0 : pg_log_warning("bogus value in pg_cast.castfunc or pg_cast.castmethod field");
13916 84 : break;
13917 0 : default:
13918 0 : pg_log_warning("bogus value in pg_cast.castmethod field");
13919 : }
13920 :
13921 134 : if (cast->castcontext == 'a')
13922 74 : appendPQExpBufferStr(defqry, " AS ASSIGNMENT");
13923 60 : else if (cast->castcontext == 'i')
13924 20 : appendPQExpBufferStr(defqry, " AS IMPLICIT");
13925 134 : appendPQExpBufferStr(defqry, ";\n");
13926 :
13927 134 : appendPQExpBuffer(labelq, "CAST (%s AS %s)",
13928 : sourceType, targetType);
13929 :
13930 134 : appendPQExpBuffer(castargs, "(%s AS %s)",
13931 : sourceType, targetType);
13932 :
13933 134 : if (dopt->binary_upgrade)
13934 14 : binary_upgrade_extension_member(defqry, &cast->dobj,
13935 14 : "CAST", castargs->data, NULL);
13936 :
13937 134 : if (cast->dobj.dump & DUMP_COMPONENT_DEFINITION)
13938 134 : ArchiveEntry(fout, cast->dobj.catId, cast->dobj.dumpId,
13939 134 : ARCHIVE_OPTS(.tag = labelq->data,
13940 : .description = "CAST",
13941 : .section = SECTION_PRE_DATA,
13942 : .createStmt = defqry->data,
13943 : .dropStmt = delqry->data));
13944 :
13945 : /* Dump Cast Comments */
13946 134 : if (cast->dobj.dump & DUMP_COMPONENT_COMMENT)
13947 0 : dumpComment(fout, "CAST", castargs->data,
13948 : NULL, "",
13949 0 : cast->dobj.catId, 0, cast->dobj.dumpId);
13950 :
13951 134 : destroyPQExpBuffer(defqry);
13952 134 : destroyPQExpBuffer(delqry);
13953 134 : destroyPQExpBuffer(labelq);
13954 134 : destroyPQExpBuffer(castargs);
13955 : }
13956 :
13957 : /*
13958 : * Dump a transform
13959 : */
13960 : static void
13961 96 : dumpTransform(Archive *fout, const TransformInfo *transform)
13962 : {
13963 96 : DumpOptions *dopt = fout->dopt;
13964 : PQExpBuffer defqry;
13965 : PQExpBuffer delqry;
13966 : PQExpBuffer labelq;
13967 : PQExpBuffer transformargs;
13968 96 : FuncInfo *fromsqlFuncInfo = NULL;
13969 96 : FuncInfo *tosqlFuncInfo = NULL;
13970 : char *lanname;
13971 : const char *transformType;
13972 :
13973 : /* Do nothing if not dumping schema */
13974 96 : if (!dopt->dumpSchema)
13975 12 : return;
13976 :
13977 : /* Cannot dump if we don't have the transform functions' info */
13978 84 : if (OidIsValid(transform->trffromsql))
13979 : {
13980 84 : fromsqlFuncInfo = findFuncByOid(transform->trffromsql);
13981 84 : if (fromsqlFuncInfo == NULL)
13982 0 : pg_fatal("could not find function definition for function with OID %u",
13983 : transform->trffromsql);
13984 : }
13985 84 : if (OidIsValid(transform->trftosql))
13986 : {
13987 84 : tosqlFuncInfo = findFuncByOid(transform->trftosql);
13988 84 : if (tosqlFuncInfo == NULL)
13989 0 : pg_fatal("could not find function definition for function with OID %u",
13990 : transform->trftosql);
13991 : }
13992 :
13993 84 : defqry = createPQExpBuffer();
13994 84 : delqry = createPQExpBuffer();
13995 84 : labelq = createPQExpBuffer();
13996 84 : transformargs = createPQExpBuffer();
13997 :
13998 84 : lanname = get_language_name(fout, transform->trflang);
13999 84 : transformType = getFormattedTypeName(fout, transform->trftype, zeroAsNone);
14000 :
14001 84 : appendPQExpBuffer(delqry, "DROP TRANSFORM FOR %s LANGUAGE %s;\n",
14002 : transformType, lanname);
14003 :
14004 84 : appendPQExpBuffer(defqry, "CREATE TRANSFORM FOR %s LANGUAGE %s (",
14005 : transformType, lanname);
14006 :
14007 84 : if (!transform->trffromsql && !transform->trftosql)
14008 0 : pg_log_warning("bogus transform definition, at least one of trffromsql and trftosql should be nonzero");
14009 :
14010 84 : if (transform->trffromsql)
14011 : {
14012 84 : if (fromsqlFuncInfo)
14013 : {
14014 84 : char *fsig = format_function_signature(fout, fromsqlFuncInfo, true);
14015 :
14016 : /*
14017 : * Always qualify the function name (format_function_signature
14018 : * won't qualify it).
14019 : */
14020 84 : appendPQExpBuffer(defqry, "FROM SQL WITH FUNCTION %s.%s",
14021 84 : fmtId(fromsqlFuncInfo->dobj.namespace->dobj.name), fsig);
14022 84 : free(fsig);
14023 : }
14024 : else
14025 0 : pg_log_warning("bogus value in pg_transform.trffromsql field");
14026 : }
14027 :
14028 84 : if (transform->trftosql)
14029 : {
14030 84 : if (transform->trffromsql)
14031 84 : appendPQExpBufferStr(defqry, ", ");
14032 :
14033 84 : if (tosqlFuncInfo)
14034 : {
14035 84 : char *fsig = format_function_signature(fout, tosqlFuncInfo, true);
14036 :
14037 : /*
14038 : * Always qualify the function name (format_function_signature
14039 : * won't qualify it).
14040 : */
14041 84 : appendPQExpBuffer(defqry, "TO SQL WITH FUNCTION %s.%s",
14042 84 : fmtId(tosqlFuncInfo->dobj.namespace->dobj.name), fsig);
14043 84 : free(fsig);
14044 : }
14045 : else
14046 0 : pg_log_warning("bogus value in pg_transform.trftosql field");
14047 : }
14048 :
14049 84 : appendPQExpBufferStr(defqry, ");\n");
14050 :
14051 84 : appendPQExpBuffer(labelq, "TRANSFORM FOR %s LANGUAGE %s",
14052 : transformType, lanname);
14053 :
14054 84 : appendPQExpBuffer(transformargs, "FOR %s LANGUAGE %s",
14055 : transformType, lanname);
14056 :
14057 84 : if (dopt->binary_upgrade)
14058 4 : binary_upgrade_extension_member(defqry, &transform->dobj,
14059 4 : "TRANSFORM", transformargs->data, NULL);
14060 :
14061 84 : if (transform->dobj.dump & DUMP_COMPONENT_DEFINITION)
14062 84 : ArchiveEntry(fout, transform->dobj.catId, transform->dobj.dumpId,
14063 84 : ARCHIVE_OPTS(.tag = labelq->data,
14064 : .description = "TRANSFORM",
14065 : .section = SECTION_PRE_DATA,
14066 : .createStmt = defqry->data,
14067 : .dropStmt = delqry->data,
14068 : .deps = transform->dobj.dependencies,
14069 : .nDeps = transform->dobj.nDeps));
14070 :
14071 : /* Dump Transform Comments */
14072 84 : if (transform->dobj.dump & DUMP_COMPONENT_COMMENT)
14073 0 : dumpComment(fout, "TRANSFORM", transformargs->data,
14074 : NULL, "",
14075 0 : transform->dobj.catId, 0, transform->dobj.dumpId);
14076 :
14077 84 : free(lanname);
14078 84 : destroyPQExpBuffer(defqry);
14079 84 : destroyPQExpBuffer(delqry);
14080 84 : destroyPQExpBuffer(labelq);
14081 84 : destroyPQExpBuffer(transformargs);
14082 : }
14083 :
14084 :
14085 : /*
14086 : * dumpOpr
14087 : * write out a single operator definition
14088 : */
14089 : static void
14090 5020 : dumpOpr(Archive *fout, const OprInfo *oprinfo)
14091 : {
14092 5020 : DumpOptions *dopt = fout->dopt;
14093 : PQExpBuffer query;
14094 : PQExpBuffer q;
14095 : PQExpBuffer delq;
14096 : PQExpBuffer oprid;
14097 : PQExpBuffer details;
14098 : PGresult *res;
14099 : int i_oprkind;
14100 : int i_oprcode;
14101 : int i_oprleft;
14102 : int i_oprright;
14103 : int i_oprcom;
14104 : int i_oprnegate;
14105 : int i_oprrest;
14106 : int i_oprjoin;
14107 : int i_oprcanmerge;
14108 : int i_oprcanhash;
14109 : char *oprkind;
14110 : char *oprcode;
14111 : char *oprleft;
14112 : char *oprright;
14113 : char *oprcom;
14114 : char *oprnegate;
14115 : char *oprrest;
14116 : char *oprjoin;
14117 : char *oprcanmerge;
14118 : char *oprcanhash;
14119 : char *oprregproc;
14120 : char *oprref;
14121 :
14122 : /* Do nothing if not dumping schema */
14123 5020 : if (!dopt->dumpSchema)
14124 12 : return;
14125 :
14126 : /*
14127 : * some operators are invalid because they were the result of user
14128 : * defining operators before commutators exist
14129 : */
14130 5008 : if (!OidIsValid(oprinfo->oprcode))
14131 28 : return;
14132 :
14133 4980 : query = createPQExpBuffer();
14134 4980 : q = createPQExpBuffer();
14135 4980 : delq = createPQExpBuffer();
14136 4980 : oprid = createPQExpBuffer();
14137 4980 : details = createPQExpBuffer();
14138 :
14139 4980 : if (!fout->is_prepared[PREPQUERY_DUMPOPR])
14140 : {
14141 : /* Set up query for operator-specific details */
14142 92 : appendPQExpBufferStr(query,
14143 : "PREPARE dumpOpr(pg_catalog.oid) AS\n"
14144 : "SELECT oprkind, "
14145 : "oprcode::pg_catalog.regprocedure, "
14146 : "oprleft::pg_catalog.regtype, "
14147 : "oprright::pg_catalog.regtype, "
14148 : "oprcom, "
14149 : "oprnegate, "
14150 : "oprrest::pg_catalog.regprocedure, "
14151 : "oprjoin::pg_catalog.regprocedure, "
14152 : "oprcanmerge, oprcanhash "
14153 : "FROM pg_catalog.pg_operator "
14154 : "WHERE oid = $1");
14155 :
14156 92 : ExecuteSqlStatement(fout, query->data);
14157 :
14158 92 : fout->is_prepared[PREPQUERY_DUMPOPR] = true;
14159 : }
14160 :
14161 4980 : printfPQExpBuffer(query,
14162 : "EXECUTE dumpOpr('%u')",
14163 4980 : oprinfo->dobj.catId.oid);
14164 :
14165 4980 : res = ExecuteSqlQueryForSingleRow(fout, query->data);
14166 :
14167 4980 : i_oprkind = PQfnumber(res, "oprkind");
14168 4980 : i_oprcode = PQfnumber(res, "oprcode");
14169 4980 : i_oprleft = PQfnumber(res, "oprleft");
14170 4980 : i_oprright = PQfnumber(res, "oprright");
14171 4980 : i_oprcom = PQfnumber(res, "oprcom");
14172 4980 : i_oprnegate = PQfnumber(res, "oprnegate");
14173 4980 : i_oprrest = PQfnumber(res, "oprrest");
14174 4980 : i_oprjoin = PQfnumber(res, "oprjoin");
14175 4980 : i_oprcanmerge = PQfnumber(res, "oprcanmerge");
14176 4980 : i_oprcanhash = PQfnumber(res, "oprcanhash");
14177 :
14178 4980 : oprkind = PQgetvalue(res, 0, i_oprkind);
14179 4980 : oprcode = PQgetvalue(res, 0, i_oprcode);
14180 4980 : oprleft = PQgetvalue(res, 0, i_oprleft);
14181 4980 : oprright = PQgetvalue(res, 0, i_oprright);
14182 4980 : oprcom = PQgetvalue(res, 0, i_oprcom);
14183 4980 : oprnegate = PQgetvalue(res, 0, i_oprnegate);
14184 4980 : oprrest = PQgetvalue(res, 0, i_oprrest);
14185 4980 : oprjoin = PQgetvalue(res, 0, i_oprjoin);
14186 4980 : oprcanmerge = PQgetvalue(res, 0, i_oprcanmerge);
14187 4980 : oprcanhash = PQgetvalue(res, 0, i_oprcanhash);
14188 :
14189 : /* In PG14 upwards postfix operator support does not exist anymore. */
14190 4980 : if (strcmp(oprkind, "r") == 0)
14191 0 : pg_log_warning("postfix operators are not supported anymore (operator \"%s\")",
14192 : oprcode);
14193 :
14194 4980 : oprregproc = convertRegProcReference(oprcode);
14195 4980 : if (oprregproc)
14196 : {
14197 4980 : appendPQExpBuffer(details, " FUNCTION = %s", oprregproc);
14198 4980 : free(oprregproc);
14199 : }
14200 :
14201 4980 : appendPQExpBuffer(oprid, "%s (",
14202 4980 : oprinfo->dobj.name);
14203 :
14204 : /*
14205 : * right unary means there's a left arg and left unary means there's a
14206 : * right arg. (Although the "r" case is dead code for PG14 and later,
14207 : * continue to support it in case we're dumping from an old server.)
14208 : */
14209 4980 : if (strcmp(oprkind, "r") == 0 ||
14210 4980 : strcmp(oprkind, "b") == 0)
14211 : {
14212 4694 : appendPQExpBuffer(details, ",\n LEFTARG = %s", oprleft);
14213 4694 : appendPQExpBufferStr(oprid, oprleft);
14214 : }
14215 : else
14216 286 : appendPQExpBufferStr(oprid, "NONE");
14217 :
14218 4980 : if (strcmp(oprkind, "l") == 0 ||
14219 4694 : strcmp(oprkind, "b") == 0)
14220 : {
14221 4980 : appendPQExpBuffer(details, ",\n RIGHTARG = %s", oprright);
14222 4980 : appendPQExpBuffer(oprid, ", %s)", oprright);
14223 : }
14224 : else
14225 0 : appendPQExpBufferStr(oprid, ", NONE)");
14226 :
14227 4980 : oprref = getFormattedOperatorName(oprcom);
14228 4980 : if (oprref)
14229 : {
14230 3322 : appendPQExpBuffer(details, ",\n COMMUTATOR = %s", oprref);
14231 3322 : free(oprref);
14232 : }
14233 :
14234 4980 : oprref = getFormattedOperatorName(oprnegate);
14235 4980 : if (oprref)
14236 : {
14237 2326 : appendPQExpBuffer(details, ",\n NEGATOR = %s", oprref);
14238 2326 : free(oprref);
14239 : }
14240 :
14241 4980 : if (strcmp(oprcanmerge, "t") == 0)
14242 370 : appendPQExpBufferStr(details, ",\n MERGES");
14243 :
14244 4980 : if (strcmp(oprcanhash, "t") == 0)
14245 276 : appendPQExpBufferStr(details, ",\n HASHES");
14246 :
14247 4980 : oprregproc = convertRegProcReference(oprrest);
14248 4980 : if (oprregproc)
14249 : {
14250 3028 : appendPQExpBuffer(details, ",\n RESTRICT = %s", oprregproc);
14251 3028 : free(oprregproc);
14252 : }
14253 :
14254 4980 : oprregproc = convertRegProcReference(oprjoin);
14255 4980 : if (oprregproc)
14256 : {
14257 3028 : appendPQExpBuffer(details, ",\n JOIN = %s", oprregproc);
14258 3028 : free(oprregproc);
14259 : }
14260 :
14261 4980 : appendPQExpBuffer(delq, "DROP OPERATOR %s.%s;\n",
14262 4980 : fmtId(oprinfo->dobj.namespace->dobj.name),
14263 : oprid->data);
14264 :
14265 4980 : appendPQExpBuffer(q, "CREATE OPERATOR %s.%s (\n%s\n);\n",
14266 4980 : fmtId(oprinfo->dobj.namespace->dobj.name),
14267 4980 : oprinfo->dobj.name, details->data);
14268 :
14269 4980 : if (dopt->binary_upgrade)
14270 24 : binary_upgrade_extension_member(q, &oprinfo->dobj,
14271 24 : "OPERATOR", oprid->data,
14272 24 : oprinfo->dobj.namespace->dobj.name);
14273 :
14274 4980 : if (oprinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
14275 4980 : ArchiveEntry(fout, oprinfo->dobj.catId, oprinfo->dobj.dumpId,
14276 4980 : ARCHIVE_OPTS(.tag = oprinfo->dobj.name,
14277 : .namespace = oprinfo->dobj.namespace->dobj.name,
14278 : .owner = oprinfo->rolname,
14279 : .description = "OPERATOR",
14280 : .section = SECTION_PRE_DATA,
14281 : .createStmt = q->data,
14282 : .dropStmt = delq->data));
14283 :
14284 : /* Dump Operator Comments */
14285 4980 : if (oprinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
14286 4794 : dumpComment(fout, "OPERATOR", oprid->data,
14287 4794 : oprinfo->dobj.namespace->dobj.name, oprinfo->rolname,
14288 4794 : oprinfo->dobj.catId, 0, oprinfo->dobj.dumpId);
14289 :
14290 4980 : PQclear(res);
14291 :
14292 4980 : destroyPQExpBuffer(query);
14293 4980 : destroyPQExpBuffer(q);
14294 4980 : destroyPQExpBuffer(delq);
14295 4980 : destroyPQExpBuffer(oprid);
14296 4980 : destroyPQExpBuffer(details);
14297 : }
14298 :
14299 : /*
14300 : * Convert a function reference obtained from pg_operator
14301 : *
14302 : * Returns allocated string of what to print, or NULL if function references
14303 : * is InvalidOid. Returned string is expected to be free'd by the caller.
14304 : *
14305 : * The input is a REGPROCEDURE display; we have to strip the argument-types
14306 : * part.
14307 : */
14308 : static char *
14309 14940 : convertRegProcReference(const char *proc)
14310 : {
14311 : char *name;
14312 : char *paren;
14313 : bool inquote;
14314 :
14315 : /* In all cases "-" means a null reference */
14316 14940 : if (strcmp(proc, "-") == 0)
14317 3904 : return NULL;
14318 :
14319 11036 : name = pg_strdup(proc);
14320 : /* find non-double-quoted left paren */
14321 11036 : inquote = false;
14322 132948 : for (paren = name; *paren; paren++)
14323 : {
14324 132948 : if (*paren == '(' && !inquote)
14325 : {
14326 11036 : *paren = '\0';
14327 11036 : break;
14328 : }
14329 121912 : if (*paren == '"')
14330 100 : inquote = !inquote;
14331 : }
14332 11036 : return name;
14333 : }
14334 :
14335 : /*
14336 : * getFormattedOperatorName - retrieve the operator name for the
14337 : * given operator OID (presented in string form).
14338 : *
14339 : * Returns an allocated string, or NULL if the given OID is invalid.
14340 : * Caller is responsible for free'ing result string.
14341 : *
14342 : * What we produce has the format "OPERATOR(schema.oprname)". This is only
14343 : * useful in commands where the operator's argument types can be inferred from
14344 : * context. We always schema-qualify the name, though. The predecessor to
14345 : * this code tried to skip the schema qualification if possible, but that led
14346 : * to wrong results in corner cases, such as if an operator and its negator
14347 : * are in different schemas.
14348 : */
14349 : static char *
14350 10542 : getFormattedOperatorName(const char *oproid)
14351 : {
14352 : OprInfo *oprInfo;
14353 :
14354 : /* In all cases "0" means a null reference */
14355 10542 : if (strcmp(oproid, "0") == 0)
14356 4894 : return NULL;
14357 :
14358 5648 : oprInfo = findOprByOid(atooid(oproid));
14359 5648 : if (oprInfo == NULL)
14360 : {
14361 0 : pg_log_warning("could not find operator with OID %s",
14362 : oproid);
14363 0 : return NULL;
14364 : }
14365 :
14366 5648 : return psprintf("OPERATOR(%s.%s)",
14367 5648 : fmtId(oprInfo->dobj.namespace->dobj.name),
14368 : oprInfo->dobj.name);
14369 : }
14370 :
14371 : /*
14372 : * Convert a function OID obtained from pg_ts_parser or pg_ts_template
14373 : *
14374 : * It is sufficient to use REGPROC rather than REGPROCEDURE, since the
14375 : * argument lists of these functions are predetermined. Note that the
14376 : * caller should ensure we are in the proper schema, because the results
14377 : * are search path dependent!
14378 : */
14379 : static char *
14380 470 : convertTSFunction(Archive *fout, Oid funcOid)
14381 : {
14382 : char *result;
14383 : char query[128];
14384 : PGresult *res;
14385 :
14386 470 : snprintf(query, sizeof(query),
14387 : "SELECT '%u'::pg_catalog.regproc", funcOid);
14388 470 : res = ExecuteSqlQueryForSingleRow(fout, query);
14389 :
14390 470 : result = pg_strdup(PQgetvalue(res, 0, 0));
14391 :
14392 470 : PQclear(res);
14393 :
14394 470 : return result;
14395 : }
14396 :
14397 : /*
14398 : * dumpAccessMethod
14399 : * write out a single access method definition
14400 : */
14401 : static void
14402 184 : dumpAccessMethod(Archive *fout, const AccessMethodInfo *aminfo)
14403 : {
14404 184 : DumpOptions *dopt = fout->dopt;
14405 : PQExpBuffer q;
14406 : PQExpBuffer delq;
14407 : char *qamname;
14408 :
14409 : /* Do nothing if not dumping schema */
14410 184 : if (!dopt->dumpSchema)
14411 24 : return;
14412 :
14413 160 : q = createPQExpBuffer();
14414 160 : delq = createPQExpBuffer();
14415 :
14416 160 : qamname = pg_strdup(fmtId(aminfo->dobj.name));
14417 :
14418 160 : appendPQExpBuffer(q, "CREATE ACCESS METHOD %s ", qamname);
14419 :
14420 160 : switch (aminfo->amtype)
14421 : {
14422 76 : case AMTYPE_INDEX:
14423 76 : appendPQExpBufferStr(q, "TYPE INDEX ");
14424 76 : break;
14425 84 : case AMTYPE_TABLE:
14426 84 : appendPQExpBufferStr(q, "TYPE TABLE ");
14427 84 : break;
14428 0 : default:
14429 0 : pg_log_warning("invalid type \"%c\" of access method \"%s\"",
14430 : aminfo->amtype, qamname);
14431 0 : destroyPQExpBuffer(q);
14432 0 : destroyPQExpBuffer(delq);
14433 0 : free(qamname);
14434 0 : return;
14435 : }
14436 :
14437 160 : appendPQExpBuffer(q, "HANDLER %s;\n", aminfo->amhandler);
14438 :
14439 160 : appendPQExpBuffer(delq, "DROP ACCESS METHOD %s;\n",
14440 : qamname);
14441 :
14442 160 : if (dopt->binary_upgrade)
14443 8 : binary_upgrade_extension_member(q, &aminfo->dobj,
14444 : "ACCESS METHOD", qamname, NULL);
14445 :
14446 160 : if (aminfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
14447 160 : ArchiveEntry(fout, aminfo->dobj.catId, aminfo->dobj.dumpId,
14448 160 : ARCHIVE_OPTS(.tag = aminfo->dobj.name,
14449 : .description = "ACCESS METHOD",
14450 : .section = SECTION_PRE_DATA,
14451 : .createStmt = q->data,
14452 : .dropStmt = delq->data));
14453 :
14454 : /* Dump Access Method Comments */
14455 160 : if (aminfo->dobj.dump & DUMP_COMPONENT_COMMENT)
14456 0 : dumpComment(fout, "ACCESS METHOD", qamname,
14457 : NULL, "",
14458 0 : aminfo->dobj.catId, 0, aminfo->dobj.dumpId);
14459 :
14460 160 : destroyPQExpBuffer(q);
14461 160 : destroyPQExpBuffer(delq);
14462 160 : free(qamname);
14463 : }
14464 :
14465 : /*
14466 : * dumpOpclass
14467 : * write out a single operator class definition
14468 : */
14469 : static void
14470 1356 : dumpOpclass(Archive *fout, const OpclassInfo *opcinfo)
14471 : {
14472 1356 : DumpOptions *dopt = fout->dopt;
14473 : PQExpBuffer query;
14474 : PQExpBuffer q;
14475 : PQExpBuffer delq;
14476 : PQExpBuffer nameusing;
14477 : PGresult *res;
14478 : int ntups;
14479 : int i_opcintype;
14480 : int i_opckeytype;
14481 : int i_opcdefault;
14482 : int i_opcfamily;
14483 : int i_opcfamilyname;
14484 : int i_opcfamilynsp;
14485 : int i_amname;
14486 : int i_amopstrategy;
14487 : int i_amopopr;
14488 : int i_sortfamily;
14489 : int i_sortfamilynsp;
14490 : int i_amprocnum;
14491 : int i_amproc;
14492 : int i_amproclefttype;
14493 : int i_amprocrighttype;
14494 : char *opcintype;
14495 : char *opckeytype;
14496 : char *opcdefault;
14497 : char *opcfamily;
14498 : char *opcfamilyname;
14499 : char *opcfamilynsp;
14500 : char *amname;
14501 : char *amopstrategy;
14502 : char *amopopr;
14503 : char *sortfamily;
14504 : char *sortfamilynsp;
14505 : char *amprocnum;
14506 : char *amproc;
14507 : char *amproclefttype;
14508 : char *amprocrighttype;
14509 : bool needComma;
14510 : int i;
14511 :
14512 : /* Do nothing if not dumping schema */
14513 1356 : if (!dopt->dumpSchema)
14514 36 : return;
14515 :
14516 1320 : query = createPQExpBuffer();
14517 1320 : q = createPQExpBuffer();
14518 1320 : delq = createPQExpBuffer();
14519 1320 : nameusing = createPQExpBuffer();
14520 :
14521 : /* Get additional fields from the pg_opclass row */
14522 1320 : appendPQExpBuffer(query, "SELECT opcintype::pg_catalog.regtype, "
14523 : "opckeytype::pg_catalog.regtype, "
14524 : "opcdefault, opcfamily, "
14525 : "opfname AS opcfamilyname, "
14526 : "nspname AS opcfamilynsp, "
14527 : "(SELECT amname FROM pg_catalog.pg_am WHERE oid = opcmethod) AS amname "
14528 : "FROM pg_catalog.pg_opclass c "
14529 : "LEFT JOIN pg_catalog.pg_opfamily f ON f.oid = opcfamily "
14530 : "LEFT JOIN pg_catalog.pg_namespace n ON n.oid = opfnamespace "
14531 : "WHERE c.oid = '%u'::pg_catalog.oid",
14532 1320 : opcinfo->dobj.catId.oid);
14533 :
14534 1320 : res = ExecuteSqlQueryForSingleRow(fout, query->data);
14535 :
14536 1320 : i_opcintype = PQfnumber(res, "opcintype");
14537 1320 : i_opckeytype = PQfnumber(res, "opckeytype");
14538 1320 : i_opcdefault = PQfnumber(res, "opcdefault");
14539 1320 : i_opcfamily = PQfnumber(res, "opcfamily");
14540 1320 : i_opcfamilyname = PQfnumber(res, "opcfamilyname");
14541 1320 : i_opcfamilynsp = PQfnumber(res, "opcfamilynsp");
14542 1320 : i_amname = PQfnumber(res, "amname");
14543 :
14544 : /* opcintype may still be needed after we PQclear res */
14545 1320 : opcintype = pg_strdup(PQgetvalue(res, 0, i_opcintype));
14546 1320 : opckeytype = PQgetvalue(res, 0, i_opckeytype);
14547 1320 : opcdefault = PQgetvalue(res, 0, i_opcdefault);
14548 : /* opcfamily will still be needed after we PQclear res */
14549 1320 : opcfamily = pg_strdup(PQgetvalue(res, 0, i_opcfamily));
14550 1320 : opcfamilyname = PQgetvalue(res, 0, i_opcfamilyname);
14551 1320 : opcfamilynsp = PQgetvalue(res, 0, i_opcfamilynsp);
14552 : /* amname will still be needed after we PQclear res */
14553 1320 : amname = pg_strdup(PQgetvalue(res, 0, i_amname));
14554 :
14555 1320 : appendPQExpBuffer(delq, "DROP OPERATOR CLASS %s",
14556 1320 : fmtQualifiedDumpable(opcinfo));
14557 1320 : appendPQExpBuffer(delq, " USING %s;\n",
14558 : fmtId(amname));
14559 :
14560 : /* Build the fixed portion of the CREATE command */
14561 1320 : appendPQExpBuffer(q, "CREATE OPERATOR CLASS %s\n ",
14562 1320 : fmtQualifiedDumpable(opcinfo));
14563 1320 : if (strcmp(opcdefault, "t") == 0)
14564 714 : appendPQExpBufferStr(q, "DEFAULT ");
14565 1320 : appendPQExpBuffer(q, "FOR TYPE %s USING %s",
14566 : opcintype,
14567 : fmtId(amname));
14568 1320 : if (strlen(opcfamilyname) > 0)
14569 : {
14570 1320 : appendPQExpBufferStr(q, " FAMILY ");
14571 1320 : appendPQExpBuffer(q, "%s.", fmtId(opcfamilynsp));
14572 1320 : appendPQExpBufferStr(q, fmtId(opcfamilyname));
14573 : }
14574 1320 : appendPQExpBufferStr(q, " AS\n ");
14575 :
14576 1320 : needComma = false;
14577 :
14578 1320 : if (strcmp(opckeytype, "-") != 0)
14579 : {
14580 504 : appendPQExpBuffer(q, "STORAGE %s",
14581 : opckeytype);
14582 504 : needComma = true;
14583 : }
14584 :
14585 1320 : PQclear(res);
14586 :
14587 : /*
14588 : * Now fetch and print the OPERATOR entries (pg_amop rows).
14589 : *
14590 : * Print only those opfamily members that are tied to the opclass by
14591 : * pg_depend entries.
14592 : */
14593 1320 : resetPQExpBuffer(query);
14594 1320 : appendPQExpBuffer(query, "SELECT amopstrategy, "
14595 : "amopopr::pg_catalog.regoperator, "
14596 : "opfname AS sortfamily, "
14597 : "nspname AS sortfamilynsp "
14598 : "FROM pg_catalog.pg_amop ao JOIN pg_catalog.pg_depend ON "
14599 : "(classid = 'pg_catalog.pg_amop'::pg_catalog.regclass AND objid = ao.oid) "
14600 : "LEFT JOIN pg_catalog.pg_opfamily f ON f.oid = amopsortfamily "
14601 : "LEFT JOIN pg_catalog.pg_namespace n ON n.oid = opfnamespace "
14602 : "WHERE refclassid = 'pg_catalog.pg_opclass'::pg_catalog.regclass "
14603 : "AND refobjid = '%u'::pg_catalog.oid "
14604 : "AND amopfamily = '%s'::pg_catalog.oid "
14605 : "ORDER BY amopstrategy",
14606 1320 : opcinfo->dobj.catId.oid,
14607 : opcfamily);
14608 :
14609 1320 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
14610 :
14611 1320 : ntups = PQntuples(res);
14612 :
14613 1320 : i_amopstrategy = PQfnumber(res, "amopstrategy");
14614 1320 : i_amopopr = PQfnumber(res, "amopopr");
14615 1320 : i_sortfamily = PQfnumber(res, "sortfamily");
14616 1320 : i_sortfamilynsp = PQfnumber(res, "sortfamilynsp");
14617 :
14618 1796 : for (i = 0; i < ntups; i++)
14619 : {
14620 476 : amopstrategy = PQgetvalue(res, i, i_amopstrategy);
14621 476 : amopopr = PQgetvalue(res, i, i_amopopr);
14622 476 : sortfamily = PQgetvalue(res, i, i_sortfamily);
14623 476 : sortfamilynsp = PQgetvalue(res, i, i_sortfamilynsp);
14624 :
14625 476 : if (needComma)
14626 304 : appendPQExpBufferStr(q, " ,\n ");
14627 :
14628 476 : appendPQExpBuffer(q, "OPERATOR %s %s",
14629 : amopstrategy, amopopr);
14630 :
14631 476 : if (strlen(sortfamily) > 0)
14632 : {
14633 0 : appendPQExpBufferStr(q, " FOR ORDER BY ");
14634 0 : appendPQExpBuffer(q, "%s.", fmtId(sortfamilynsp));
14635 0 : appendPQExpBufferStr(q, fmtId(sortfamily));
14636 : }
14637 :
14638 476 : needComma = true;
14639 : }
14640 :
14641 1320 : PQclear(res);
14642 :
14643 : /*
14644 : * Now fetch and print the FUNCTION entries (pg_amproc rows).
14645 : *
14646 : * Print only those opfamily members that are tied to the opclass by
14647 : * pg_depend entries.
14648 : *
14649 : * We print the amproclefttype/amprocrighttype even though in most cases
14650 : * the backend could deduce the right values, because of the corner case
14651 : * of a btree sort support function for a cross-type comparison.
14652 : */
14653 1320 : resetPQExpBuffer(query);
14654 :
14655 1320 : appendPQExpBuffer(query, "SELECT amprocnum, "
14656 : "amproc::pg_catalog.regprocedure, "
14657 : "amproclefttype::pg_catalog.regtype, "
14658 : "amprocrighttype::pg_catalog.regtype "
14659 : "FROM pg_catalog.pg_amproc ap, pg_catalog.pg_depend "
14660 : "WHERE refclassid = 'pg_catalog.pg_opclass'::pg_catalog.regclass "
14661 : "AND refobjid = '%u'::pg_catalog.oid "
14662 : "AND classid = 'pg_catalog.pg_amproc'::pg_catalog.regclass "
14663 : "AND objid = ap.oid "
14664 : "ORDER BY amprocnum",
14665 1320 : opcinfo->dobj.catId.oid);
14666 :
14667 1320 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
14668 :
14669 1320 : ntups = PQntuples(res);
14670 :
14671 1320 : i_amprocnum = PQfnumber(res, "amprocnum");
14672 1320 : i_amproc = PQfnumber(res, "amproc");
14673 1320 : i_amproclefttype = PQfnumber(res, "amproclefttype");
14674 1320 : i_amprocrighttype = PQfnumber(res, "amprocrighttype");
14675 :
14676 1396 : for (i = 0; i < ntups; i++)
14677 : {
14678 76 : amprocnum = PQgetvalue(res, i, i_amprocnum);
14679 76 : amproc = PQgetvalue(res, i, i_amproc);
14680 76 : amproclefttype = PQgetvalue(res, i, i_amproclefttype);
14681 76 : amprocrighttype = PQgetvalue(res, i, i_amprocrighttype);
14682 :
14683 76 : if (needComma)
14684 76 : appendPQExpBufferStr(q, " ,\n ");
14685 :
14686 76 : appendPQExpBuffer(q, "FUNCTION %s", amprocnum);
14687 :
14688 76 : if (*amproclefttype && *amprocrighttype)
14689 76 : appendPQExpBuffer(q, " (%s, %s)", amproclefttype, amprocrighttype);
14690 :
14691 76 : appendPQExpBuffer(q, " %s", amproc);
14692 :
14693 76 : needComma = true;
14694 : }
14695 :
14696 1320 : PQclear(res);
14697 :
14698 : /*
14699 : * If needComma is still false it means we haven't added anything after
14700 : * the AS keyword. To avoid printing broken SQL, append a dummy STORAGE
14701 : * clause with the same datatype. This isn't sanctioned by the
14702 : * documentation, but actually DefineOpClass will treat it as a no-op.
14703 : */
14704 1320 : if (!needComma)
14705 644 : appendPQExpBuffer(q, "STORAGE %s", opcintype);
14706 :
14707 1320 : appendPQExpBufferStr(q, ";\n");
14708 :
14709 1320 : appendPQExpBufferStr(nameusing, fmtId(opcinfo->dobj.name));
14710 1320 : appendPQExpBuffer(nameusing, " USING %s",
14711 : fmtId(amname));
14712 :
14713 1320 : if (dopt->binary_upgrade)
14714 12 : binary_upgrade_extension_member(q, &opcinfo->dobj,
14715 12 : "OPERATOR CLASS", nameusing->data,
14716 12 : opcinfo->dobj.namespace->dobj.name);
14717 :
14718 1320 : if (opcinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
14719 1320 : ArchiveEntry(fout, opcinfo->dobj.catId, opcinfo->dobj.dumpId,
14720 1320 : ARCHIVE_OPTS(.tag = opcinfo->dobj.name,
14721 : .namespace = opcinfo->dobj.namespace->dobj.name,
14722 : .owner = opcinfo->rolname,
14723 : .description = "OPERATOR CLASS",
14724 : .section = SECTION_PRE_DATA,
14725 : .createStmt = q->data,
14726 : .dropStmt = delq->data));
14727 :
14728 : /* Dump Operator Class Comments */
14729 1320 : if (opcinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
14730 0 : dumpComment(fout, "OPERATOR CLASS", nameusing->data,
14731 0 : opcinfo->dobj.namespace->dobj.name, opcinfo->rolname,
14732 0 : opcinfo->dobj.catId, 0, opcinfo->dobj.dumpId);
14733 :
14734 1320 : free(opcintype);
14735 1320 : free(opcfamily);
14736 1320 : free(amname);
14737 1320 : destroyPQExpBuffer(query);
14738 1320 : destroyPQExpBuffer(q);
14739 1320 : destroyPQExpBuffer(delq);
14740 1320 : destroyPQExpBuffer(nameusing);
14741 : }
14742 :
14743 : /*
14744 : * dumpOpfamily
14745 : * write out a single operator family definition
14746 : *
14747 : * Note: this also dumps any "loose" operator members that aren't bound to a
14748 : * specific opclass within the opfamily.
14749 : */
14750 : static void
14751 1122 : dumpOpfamily(Archive *fout, const OpfamilyInfo *opfinfo)
14752 : {
14753 1122 : DumpOptions *dopt = fout->dopt;
14754 : PQExpBuffer query;
14755 : PQExpBuffer q;
14756 : PQExpBuffer delq;
14757 : PQExpBuffer nameusing;
14758 : PGresult *res;
14759 : PGresult *res_ops;
14760 : PGresult *res_procs;
14761 : int ntups;
14762 : int i_amname;
14763 : int i_amopstrategy;
14764 : int i_amopopr;
14765 : int i_sortfamily;
14766 : int i_sortfamilynsp;
14767 : int i_amprocnum;
14768 : int i_amproc;
14769 : int i_amproclefttype;
14770 : int i_amprocrighttype;
14771 : char *amname;
14772 : char *amopstrategy;
14773 : char *amopopr;
14774 : char *sortfamily;
14775 : char *sortfamilynsp;
14776 : char *amprocnum;
14777 : char *amproc;
14778 : char *amproclefttype;
14779 : char *amprocrighttype;
14780 : bool needComma;
14781 : int i;
14782 :
14783 : /* Do nothing if not dumping schema */
14784 1122 : if (!dopt->dumpSchema)
14785 24 : return;
14786 :
14787 1098 : query = createPQExpBuffer();
14788 1098 : q = createPQExpBuffer();
14789 1098 : delq = createPQExpBuffer();
14790 1098 : nameusing = createPQExpBuffer();
14791 :
14792 : /*
14793 : * Fetch only those opfamily members that are tied directly to the
14794 : * opfamily by pg_depend entries.
14795 : */
14796 1098 : appendPQExpBuffer(query, "SELECT amopstrategy, "
14797 : "amopopr::pg_catalog.regoperator, "
14798 : "opfname AS sortfamily, "
14799 : "nspname AS sortfamilynsp "
14800 : "FROM pg_catalog.pg_amop ao JOIN pg_catalog.pg_depend ON "
14801 : "(classid = 'pg_catalog.pg_amop'::pg_catalog.regclass AND objid = ao.oid) "
14802 : "LEFT JOIN pg_catalog.pg_opfamily f ON f.oid = amopsortfamily "
14803 : "LEFT JOIN pg_catalog.pg_namespace n ON n.oid = opfnamespace "
14804 : "WHERE refclassid = 'pg_catalog.pg_opfamily'::pg_catalog.regclass "
14805 : "AND refobjid = '%u'::pg_catalog.oid "
14806 : "AND amopfamily = '%u'::pg_catalog.oid "
14807 : "ORDER BY amopstrategy",
14808 1098 : opfinfo->dobj.catId.oid,
14809 1098 : opfinfo->dobj.catId.oid);
14810 :
14811 1098 : res_ops = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
14812 :
14813 1098 : resetPQExpBuffer(query);
14814 :
14815 1098 : appendPQExpBuffer(query, "SELECT amprocnum, "
14816 : "amproc::pg_catalog.regprocedure, "
14817 : "amproclefttype::pg_catalog.regtype, "
14818 : "amprocrighttype::pg_catalog.regtype "
14819 : "FROM pg_catalog.pg_amproc ap, pg_catalog.pg_depend "
14820 : "WHERE refclassid = 'pg_catalog.pg_opfamily'::pg_catalog.regclass "
14821 : "AND refobjid = '%u'::pg_catalog.oid "
14822 : "AND classid = 'pg_catalog.pg_amproc'::pg_catalog.regclass "
14823 : "AND objid = ap.oid "
14824 : "ORDER BY amprocnum",
14825 1098 : opfinfo->dobj.catId.oid);
14826 :
14827 1098 : res_procs = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
14828 :
14829 : /* Get additional fields from the pg_opfamily row */
14830 1098 : resetPQExpBuffer(query);
14831 :
14832 1098 : appendPQExpBuffer(query, "SELECT "
14833 : "(SELECT amname FROM pg_catalog.pg_am WHERE oid = opfmethod) AS amname "
14834 : "FROM pg_catalog.pg_opfamily "
14835 : "WHERE oid = '%u'::pg_catalog.oid",
14836 1098 : opfinfo->dobj.catId.oid);
14837 :
14838 1098 : res = ExecuteSqlQueryForSingleRow(fout, query->data);
14839 :
14840 1098 : i_amname = PQfnumber(res, "amname");
14841 :
14842 : /* amname will still be needed after we PQclear res */
14843 1098 : amname = pg_strdup(PQgetvalue(res, 0, i_amname));
14844 :
14845 1098 : appendPQExpBuffer(delq, "DROP OPERATOR FAMILY %s",
14846 1098 : fmtQualifiedDumpable(opfinfo));
14847 1098 : appendPQExpBuffer(delq, " USING %s;\n",
14848 : fmtId(amname));
14849 :
14850 : /* Build the fixed portion of the CREATE command */
14851 1098 : appendPQExpBuffer(q, "CREATE OPERATOR FAMILY %s",
14852 1098 : fmtQualifiedDumpable(opfinfo));
14853 1098 : appendPQExpBuffer(q, " USING %s;\n",
14854 : fmtId(amname));
14855 :
14856 1098 : PQclear(res);
14857 :
14858 : /* Do we need an ALTER to add loose members? */
14859 1098 : if (PQntuples(res_ops) > 0 || PQntuples(res_procs) > 0)
14860 : {
14861 106 : appendPQExpBuffer(q, "ALTER OPERATOR FAMILY %s",
14862 106 : fmtQualifiedDumpable(opfinfo));
14863 106 : appendPQExpBuffer(q, " USING %s ADD\n ",
14864 : fmtId(amname));
14865 :
14866 106 : needComma = false;
14867 :
14868 : /*
14869 : * Now fetch and print the OPERATOR entries (pg_amop rows).
14870 : */
14871 106 : ntups = PQntuples(res_ops);
14872 :
14873 106 : i_amopstrategy = PQfnumber(res_ops, "amopstrategy");
14874 106 : i_amopopr = PQfnumber(res_ops, "amopopr");
14875 106 : i_sortfamily = PQfnumber(res_ops, "sortfamily");
14876 106 : i_sortfamilynsp = PQfnumber(res_ops, "sortfamilynsp");
14877 :
14878 486 : for (i = 0; i < ntups; i++)
14879 : {
14880 380 : amopstrategy = PQgetvalue(res_ops, i, i_amopstrategy);
14881 380 : amopopr = PQgetvalue(res_ops, i, i_amopopr);
14882 380 : sortfamily = PQgetvalue(res_ops, i, i_sortfamily);
14883 380 : sortfamilynsp = PQgetvalue(res_ops, i, i_sortfamilynsp);
14884 :
14885 380 : if (needComma)
14886 304 : appendPQExpBufferStr(q, " ,\n ");
14887 :
14888 380 : appendPQExpBuffer(q, "OPERATOR %s %s",
14889 : amopstrategy, amopopr);
14890 :
14891 380 : if (strlen(sortfamily) > 0)
14892 : {
14893 0 : appendPQExpBufferStr(q, " FOR ORDER BY ");
14894 0 : appendPQExpBuffer(q, "%s.", fmtId(sortfamilynsp));
14895 0 : appendPQExpBufferStr(q, fmtId(sortfamily));
14896 : }
14897 :
14898 380 : needComma = true;
14899 : }
14900 :
14901 : /*
14902 : * Now fetch and print the FUNCTION entries (pg_amproc rows).
14903 : */
14904 106 : ntups = PQntuples(res_procs);
14905 :
14906 106 : i_amprocnum = PQfnumber(res_procs, "amprocnum");
14907 106 : i_amproc = PQfnumber(res_procs, "amproc");
14908 106 : i_amproclefttype = PQfnumber(res_procs, "amproclefttype");
14909 106 : i_amprocrighttype = PQfnumber(res_procs, "amprocrighttype");
14910 :
14911 516 : for (i = 0; i < ntups; i++)
14912 : {
14913 410 : amprocnum = PQgetvalue(res_procs, i, i_amprocnum);
14914 410 : amproc = PQgetvalue(res_procs, i, i_amproc);
14915 410 : amproclefttype = PQgetvalue(res_procs, i, i_amproclefttype);
14916 410 : amprocrighttype = PQgetvalue(res_procs, i, i_amprocrighttype);
14917 :
14918 410 : if (needComma)
14919 380 : appendPQExpBufferStr(q, " ,\n ");
14920 :
14921 410 : appendPQExpBuffer(q, "FUNCTION %s (%s, %s) %s",
14922 : amprocnum, amproclefttype, amprocrighttype,
14923 : amproc);
14924 :
14925 410 : needComma = true;
14926 : }
14927 :
14928 106 : appendPQExpBufferStr(q, ";\n");
14929 : }
14930 :
14931 1098 : appendPQExpBufferStr(nameusing, fmtId(opfinfo->dobj.name));
14932 1098 : appendPQExpBuffer(nameusing, " USING %s",
14933 : fmtId(amname));
14934 :
14935 1098 : if (dopt->binary_upgrade)
14936 18 : binary_upgrade_extension_member(q, &opfinfo->dobj,
14937 18 : "OPERATOR FAMILY", nameusing->data,
14938 18 : opfinfo->dobj.namespace->dobj.name);
14939 :
14940 1098 : if (opfinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
14941 1098 : ArchiveEntry(fout, opfinfo->dobj.catId, opfinfo->dobj.dumpId,
14942 1098 : ARCHIVE_OPTS(.tag = opfinfo->dobj.name,
14943 : .namespace = opfinfo->dobj.namespace->dobj.name,
14944 : .owner = opfinfo->rolname,
14945 : .description = "OPERATOR FAMILY",
14946 : .section = SECTION_PRE_DATA,
14947 : .createStmt = q->data,
14948 : .dropStmt = delq->data));
14949 :
14950 : /* Dump Operator Family Comments */
14951 1098 : if (opfinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
14952 0 : dumpComment(fout, "OPERATOR FAMILY", nameusing->data,
14953 0 : opfinfo->dobj.namespace->dobj.name, opfinfo->rolname,
14954 0 : opfinfo->dobj.catId, 0, opfinfo->dobj.dumpId);
14955 :
14956 1098 : free(amname);
14957 1098 : PQclear(res_ops);
14958 1098 : PQclear(res_procs);
14959 1098 : destroyPQExpBuffer(query);
14960 1098 : destroyPQExpBuffer(q);
14961 1098 : destroyPQExpBuffer(delq);
14962 1098 : destroyPQExpBuffer(nameusing);
14963 : }
14964 :
14965 : /*
14966 : * dumpCollation
14967 : * write out a single collation definition
14968 : */
14969 : static void
14970 5098 : dumpCollation(Archive *fout, const CollInfo *collinfo)
14971 : {
14972 5098 : DumpOptions *dopt = fout->dopt;
14973 : PQExpBuffer query;
14974 : PQExpBuffer q;
14975 : PQExpBuffer delq;
14976 : char *qcollname;
14977 : PGresult *res;
14978 : int i_collprovider;
14979 : int i_collisdeterministic;
14980 : int i_collcollate;
14981 : int i_collctype;
14982 : int i_colllocale;
14983 : int i_collicurules;
14984 : const char *collprovider;
14985 : const char *collcollate;
14986 : const char *collctype;
14987 : const char *colllocale;
14988 : const char *collicurules;
14989 :
14990 : /* Do nothing if not dumping schema */
14991 5098 : if (!dopt->dumpSchema)
14992 24 : return;
14993 :
14994 5074 : query = createPQExpBuffer();
14995 5074 : q = createPQExpBuffer();
14996 5074 : delq = createPQExpBuffer();
14997 :
14998 5074 : qcollname = pg_strdup(fmtId(collinfo->dobj.name));
14999 :
15000 : /* Get collation-specific details */
15001 5074 : appendPQExpBufferStr(query, "SELECT ");
15002 :
15003 5074 : if (fout->remoteVersion >= 100000)
15004 5074 : appendPQExpBufferStr(query,
15005 : "collprovider, "
15006 : "collversion, ");
15007 : else
15008 0 : appendPQExpBufferStr(query,
15009 : "'c' AS collprovider, "
15010 : "NULL AS collversion, ");
15011 :
15012 5074 : if (fout->remoteVersion >= 120000)
15013 5074 : appendPQExpBufferStr(query,
15014 : "collisdeterministic, ");
15015 : else
15016 0 : appendPQExpBufferStr(query,
15017 : "true AS collisdeterministic, ");
15018 :
15019 5074 : if (fout->remoteVersion >= 170000)
15020 5074 : appendPQExpBufferStr(query,
15021 : "colllocale, ");
15022 0 : else if (fout->remoteVersion >= 150000)
15023 0 : appendPQExpBufferStr(query,
15024 : "colliculocale AS colllocale, ");
15025 : else
15026 0 : appendPQExpBufferStr(query,
15027 : "NULL AS colllocale, ");
15028 :
15029 5074 : if (fout->remoteVersion >= 160000)
15030 5074 : appendPQExpBufferStr(query,
15031 : "collicurules, ");
15032 : else
15033 0 : appendPQExpBufferStr(query,
15034 : "NULL AS collicurules, ");
15035 :
15036 5074 : appendPQExpBuffer(query,
15037 : "collcollate, "
15038 : "collctype "
15039 : "FROM pg_catalog.pg_collation c "
15040 : "WHERE c.oid = '%u'::pg_catalog.oid",
15041 5074 : collinfo->dobj.catId.oid);
15042 :
15043 5074 : res = ExecuteSqlQueryForSingleRow(fout, query->data);
15044 :
15045 5074 : i_collprovider = PQfnumber(res, "collprovider");
15046 5074 : i_collisdeterministic = PQfnumber(res, "collisdeterministic");
15047 5074 : i_collcollate = PQfnumber(res, "collcollate");
15048 5074 : i_collctype = PQfnumber(res, "collctype");
15049 5074 : i_colllocale = PQfnumber(res, "colllocale");
15050 5074 : i_collicurules = PQfnumber(res, "collicurules");
15051 :
15052 5074 : collprovider = PQgetvalue(res, 0, i_collprovider);
15053 :
15054 5074 : if (!PQgetisnull(res, 0, i_collcollate))
15055 104 : collcollate = PQgetvalue(res, 0, i_collcollate);
15056 : else
15057 4970 : collcollate = NULL;
15058 :
15059 5074 : if (!PQgetisnull(res, 0, i_collctype))
15060 104 : collctype = PQgetvalue(res, 0, i_collctype);
15061 : else
15062 4970 : collctype = NULL;
15063 :
15064 : /*
15065 : * Before version 15, collcollate and collctype were of type NAME and
15066 : * non-nullable. Treat empty strings as NULL for consistency.
15067 : */
15068 5074 : if (fout->remoteVersion < 150000)
15069 : {
15070 0 : if (collcollate[0] == '\0')
15071 0 : collcollate = NULL;
15072 0 : if (collctype[0] == '\0')
15073 0 : collctype = NULL;
15074 : }
15075 :
15076 5074 : if (!PQgetisnull(res, 0, i_colllocale))
15077 4964 : colllocale = PQgetvalue(res, 0, i_colllocale);
15078 : else
15079 110 : colllocale = NULL;
15080 :
15081 5074 : if (!PQgetisnull(res, 0, i_collicurules))
15082 0 : collicurules = PQgetvalue(res, 0, i_collicurules);
15083 : else
15084 5074 : collicurules = NULL;
15085 :
15086 5074 : appendPQExpBuffer(delq, "DROP COLLATION %s;\n",
15087 5074 : fmtQualifiedDumpable(collinfo));
15088 :
15089 5074 : appendPQExpBuffer(q, "CREATE COLLATION %s (",
15090 5074 : fmtQualifiedDumpable(collinfo));
15091 :
15092 5074 : appendPQExpBufferStr(q, "provider = ");
15093 5074 : if (collprovider[0] == 'b')
15094 38 : appendPQExpBufferStr(q, "builtin");
15095 5036 : else if (collprovider[0] == 'c')
15096 104 : appendPQExpBufferStr(q, "libc");
15097 4932 : else if (collprovider[0] == 'i')
15098 4926 : appendPQExpBufferStr(q, "icu");
15099 6 : else if (collprovider[0] == 'd')
15100 : /* to allow dumping pg_catalog; not accepted on input */
15101 6 : appendPQExpBufferStr(q, "default");
15102 : else
15103 0 : pg_fatal("unrecognized collation provider: %s",
15104 : collprovider);
15105 :
15106 5074 : if (strcmp(PQgetvalue(res, 0, i_collisdeterministic), "f") == 0)
15107 0 : appendPQExpBufferStr(q, ", deterministic = false");
15108 :
15109 5074 : if (collprovider[0] == 'd')
15110 : {
15111 6 : if (collcollate || collctype || colllocale || collicurules)
15112 0 : pg_log_warning("invalid collation \"%s\"", qcollname);
15113 :
15114 : /* no locale -- the default collation cannot be reloaded anyway */
15115 : }
15116 5068 : else if (collprovider[0] == 'b')
15117 : {
15118 38 : if (collcollate || collctype || !colllocale || collicurules)
15119 0 : pg_log_warning("invalid collation \"%s\"", qcollname);
15120 :
15121 38 : appendPQExpBufferStr(q, ", locale = ");
15122 38 : appendStringLiteralAH(q, colllocale ? colllocale : "",
15123 : fout);
15124 : }
15125 5030 : else if (collprovider[0] == 'i')
15126 : {
15127 4926 : if (fout->remoteVersion >= 150000)
15128 : {
15129 4926 : if (collcollate || collctype || !colllocale)
15130 0 : pg_log_warning("invalid collation \"%s\"", qcollname);
15131 :
15132 4926 : appendPQExpBufferStr(q, ", locale = ");
15133 4926 : appendStringLiteralAH(q, colllocale ? colllocale : "",
15134 : fout);
15135 : }
15136 : else
15137 : {
15138 0 : if (!collcollate || !collctype || colllocale ||
15139 0 : strcmp(collcollate, collctype) != 0)
15140 0 : pg_log_warning("invalid collation \"%s\"", qcollname);
15141 :
15142 0 : appendPQExpBufferStr(q, ", locale = ");
15143 0 : appendStringLiteralAH(q, collcollate ? collcollate : "", fout);
15144 : }
15145 :
15146 4926 : if (collicurules)
15147 : {
15148 0 : appendPQExpBufferStr(q, ", rules = ");
15149 0 : appendStringLiteralAH(q, collicurules ? collicurules : "", fout);
15150 : }
15151 : }
15152 104 : else if (collprovider[0] == 'c')
15153 : {
15154 104 : if (colllocale || collicurules || !collcollate || !collctype)
15155 0 : pg_log_warning("invalid collation \"%s\"", qcollname);
15156 :
15157 104 : if (collcollate && collctype && strcmp(collcollate, collctype) == 0)
15158 : {
15159 104 : appendPQExpBufferStr(q, ", locale = ");
15160 104 : appendStringLiteralAH(q, collcollate ? collcollate : "", fout);
15161 : }
15162 : else
15163 : {
15164 0 : appendPQExpBufferStr(q, ", lc_collate = ");
15165 0 : appendStringLiteralAH(q, collcollate ? collcollate : "", fout);
15166 0 : appendPQExpBufferStr(q, ", lc_ctype = ");
15167 0 : appendStringLiteralAH(q, collctype ? collctype : "", fout);
15168 : }
15169 : }
15170 : else
15171 0 : pg_fatal("unrecognized collation provider: %s", collprovider);
15172 :
15173 : /*
15174 : * For binary upgrade, carry over the collation version. For normal
15175 : * dump/restore, omit the version, so that it is computed upon restore.
15176 : */
15177 5074 : if (dopt->binary_upgrade)
15178 : {
15179 : int i_collversion;
15180 :
15181 10 : i_collversion = PQfnumber(res, "collversion");
15182 10 : if (!PQgetisnull(res, 0, i_collversion))
15183 : {
15184 8 : appendPQExpBufferStr(q, ", version = ");
15185 8 : appendStringLiteralAH(q,
15186 : PQgetvalue(res, 0, i_collversion),
15187 : fout);
15188 : }
15189 : }
15190 :
15191 5074 : appendPQExpBufferStr(q, ");\n");
15192 :
15193 5074 : if (dopt->binary_upgrade)
15194 10 : binary_upgrade_extension_member(q, &collinfo->dobj,
15195 : "COLLATION", qcollname,
15196 10 : collinfo->dobj.namespace->dobj.name);
15197 :
15198 5074 : if (collinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
15199 5074 : ArchiveEntry(fout, collinfo->dobj.catId, collinfo->dobj.dumpId,
15200 5074 : ARCHIVE_OPTS(.tag = collinfo->dobj.name,
15201 : .namespace = collinfo->dobj.namespace->dobj.name,
15202 : .owner = collinfo->rolname,
15203 : .description = "COLLATION",
15204 : .section = SECTION_PRE_DATA,
15205 : .createStmt = q->data,
15206 : .dropStmt = delq->data));
15207 :
15208 : /* Dump Collation Comments */
15209 5074 : if (collinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
15210 4874 : dumpComment(fout, "COLLATION", qcollname,
15211 4874 : collinfo->dobj.namespace->dobj.name, collinfo->rolname,
15212 4874 : collinfo->dobj.catId, 0, collinfo->dobj.dumpId);
15213 :
15214 5074 : PQclear(res);
15215 :
15216 5074 : destroyPQExpBuffer(query);
15217 5074 : destroyPQExpBuffer(q);
15218 5074 : destroyPQExpBuffer(delq);
15219 5074 : free(qcollname);
15220 : }
15221 :
15222 : /*
15223 : * dumpConversion
15224 : * write out a single conversion definition
15225 : */
15226 : static void
15227 856 : dumpConversion(Archive *fout, const ConvInfo *convinfo)
15228 : {
15229 856 : DumpOptions *dopt = fout->dopt;
15230 : PQExpBuffer query;
15231 : PQExpBuffer q;
15232 : PQExpBuffer delq;
15233 : char *qconvname;
15234 : PGresult *res;
15235 : int i_conforencoding;
15236 : int i_contoencoding;
15237 : int i_conproc;
15238 : int i_condefault;
15239 : const char *conforencoding;
15240 : const char *contoencoding;
15241 : const char *conproc;
15242 : bool condefault;
15243 :
15244 : /* Do nothing if not dumping schema */
15245 856 : if (!dopt->dumpSchema)
15246 12 : return;
15247 :
15248 844 : query = createPQExpBuffer();
15249 844 : q = createPQExpBuffer();
15250 844 : delq = createPQExpBuffer();
15251 :
15252 844 : qconvname = pg_strdup(fmtId(convinfo->dobj.name));
15253 :
15254 : /* Get conversion-specific details */
15255 844 : appendPQExpBuffer(query, "SELECT "
15256 : "pg_catalog.pg_encoding_to_char(conforencoding) AS conforencoding, "
15257 : "pg_catalog.pg_encoding_to_char(contoencoding) AS contoencoding, "
15258 : "conproc, condefault "
15259 : "FROM pg_catalog.pg_conversion c "
15260 : "WHERE c.oid = '%u'::pg_catalog.oid",
15261 844 : convinfo->dobj.catId.oid);
15262 :
15263 844 : res = ExecuteSqlQueryForSingleRow(fout, query->data);
15264 :
15265 844 : i_conforencoding = PQfnumber(res, "conforencoding");
15266 844 : i_contoencoding = PQfnumber(res, "contoencoding");
15267 844 : i_conproc = PQfnumber(res, "conproc");
15268 844 : i_condefault = PQfnumber(res, "condefault");
15269 :
15270 844 : conforencoding = PQgetvalue(res, 0, i_conforencoding);
15271 844 : contoencoding = PQgetvalue(res, 0, i_contoencoding);
15272 844 : conproc = PQgetvalue(res, 0, i_conproc);
15273 844 : condefault = (PQgetvalue(res, 0, i_condefault)[0] == 't');
15274 :
15275 844 : appendPQExpBuffer(delq, "DROP CONVERSION %s;\n",
15276 844 : fmtQualifiedDumpable(convinfo));
15277 :
15278 844 : appendPQExpBuffer(q, "CREATE %sCONVERSION %s FOR ",
15279 : (condefault) ? "DEFAULT " : "",
15280 844 : fmtQualifiedDumpable(convinfo));
15281 844 : appendStringLiteralAH(q, conforencoding, fout);
15282 844 : appendPQExpBufferStr(q, " TO ");
15283 844 : appendStringLiteralAH(q, contoencoding, fout);
15284 : /* regproc output is already sufficiently quoted */
15285 844 : appendPQExpBuffer(q, " FROM %s;\n", conproc);
15286 :
15287 844 : if (dopt->binary_upgrade)
15288 2 : binary_upgrade_extension_member(q, &convinfo->dobj,
15289 : "CONVERSION", qconvname,
15290 2 : convinfo->dobj.namespace->dobj.name);
15291 :
15292 844 : if (convinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
15293 844 : ArchiveEntry(fout, convinfo->dobj.catId, convinfo->dobj.dumpId,
15294 844 : ARCHIVE_OPTS(.tag = convinfo->dobj.name,
15295 : .namespace = convinfo->dobj.namespace->dobj.name,
15296 : .owner = convinfo->rolname,
15297 : .description = "CONVERSION",
15298 : .section = SECTION_PRE_DATA,
15299 : .createStmt = q->data,
15300 : .dropStmt = delq->data));
15301 :
15302 : /* Dump Conversion Comments */
15303 844 : if (convinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
15304 844 : dumpComment(fout, "CONVERSION", qconvname,
15305 844 : convinfo->dobj.namespace->dobj.name, convinfo->rolname,
15306 844 : convinfo->dobj.catId, 0, convinfo->dobj.dumpId);
15307 :
15308 844 : PQclear(res);
15309 :
15310 844 : destroyPQExpBuffer(query);
15311 844 : destroyPQExpBuffer(q);
15312 844 : destroyPQExpBuffer(delq);
15313 844 : free(qconvname);
15314 : }
15315 :
15316 : /*
15317 : * format_aggregate_signature: generate aggregate name and argument list
15318 : *
15319 : * The argument type names are qualified if needed. The aggregate name
15320 : * is never qualified.
15321 : */
15322 : static char *
15323 582 : format_aggregate_signature(const AggInfo *agginfo, Archive *fout, bool honor_quotes)
15324 : {
15325 : PQExpBufferData buf;
15326 : int j;
15327 :
15328 582 : initPQExpBuffer(&buf);
15329 582 : if (honor_quotes)
15330 0 : appendPQExpBufferStr(&buf, fmtId(agginfo->aggfn.dobj.name));
15331 : else
15332 582 : appendPQExpBufferStr(&buf, agginfo->aggfn.dobj.name);
15333 :
15334 582 : if (agginfo->aggfn.nargs == 0)
15335 80 : appendPQExpBufferStr(&buf, "(*)");
15336 : else
15337 : {
15338 502 : appendPQExpBufferChar(&buf, '(');
15339 1094 : for (j = 0; j < agginfo->aggfn.nargs; j++)
15340 592 : appendPQExpBuffer(&buf, "%s%s",
15341 : (j > 0) ? ", " : "",
15342 : getFormattedTypeName(fout,
15343 592 : agginfo->aggfn.argtypes[j],
15344 : zeroIsError));
15345 502 : appendPQExpBufferChar(&buf, ')');
15346 : }
15347 582 : return buf.data;
15348 : }
15349 :
15350 : /*
15351 : * dumpAgg
15352 : * write out a single aggregate definition
15353 : */
15354 : static void
15355 596 : dumpAgg(Archive *fout, const AggInfo *agginfo)
15356 : {
15357 596 : DumpOptions *dopt = fout->dopt;
15358 : PQExpBuffer query;
15359 : PQExpBuffer q;
15360 : PQExpBuffer delq;
15361 : PQExpBuffer details;
15362 : char *aggsig; /* identity signature */
15363 596 : char *aggfullsig = NULL; /* full signature */
15364 : char *aggsig_tag;
15365 : PGresult *res;
15366 : int i_agginitval;
15367 : int i_aggminitval;
15368 : const char *aggtransfn;
15369 : const char *aggfinalfn;
15370 : const char *aggcombinefn;
15371 : const char *aggserialfn;
15372 : const char *aggdeserialfn;
15373 : const char *aggmtransfn;
15374 : const char *aggminvtransfn;
15375 : const char *aggmfinalfn;
15376 : bool aggfinalextra;
15377 : bool aggmfinalextra;
15378 : char aggfinalmodify;
15379 : char aggmfinalmodify;
15380 : const char *aggsortop;
15381 : char *aggsortconvop;
15382 : char aggkind;
15383 : const char *aggtranstype;
15384 : const char *aggtransspace;
15385 : const char *aggmtranstype;
15386 : const char *aggmtransspace;
15387 : const char *agginitval;
15388 : const char *aggminitval;
15389 : const char *proparallel;
15390 : char defaultfinalmodify;
15391 :
15392 : /* Do nothing if not dumping schema */
15393 596 : if (!dopt->dumpSchema)
15394 14 : return;
15395 :
15396 582 : query = createPQExpBuffer();
15397 582 : q = createPQExpBuffer();
15398 582 : delq = createPQExpBuffer();
15399 582 : details = createPQExpBuffer();
15400 :
15401 582 : if (!fout->is_prepared[PREPQUERY_DUMPAGG])
15402 : {
15403 : /* Set up query for aggregate-specific details */
15404 122 : appendPQExpBufferStr(query,
15405 : "PREPARE dumpAgg(pg_catalog.oid) AS\n");
15406 :
15407 122 : appendPQExpBufferStr(query,
15408 : "SELECT "
15409 : "aggtransfn,\n"
15410 : "aggfinalfn,\n"
15411 : "aggtranstype::pg_catalog.regtype,\n"
15412 : "agginitval,\n"
15413 : "aggsortop,\n"
15414 : "pg_catalog.pg_get_function_arguments(p.oid) AS funcargs,\n"
15415 : "pg_catalog.pg_get_function_identity_arguments(p.oid) AS funciargs,\n");
15416 :
15417 122 : if (fout->remoteVersion >= 90400)
15418 122 : appendPQExpBufferStr(query,
15419 : "aggkind,\n"
15420 : "aggmtransfn,\n"
15421 : "aggminvtransfn,\n"
15422 : "aggmfinalfn,\n"
15423 : "aggmtranstype::pg_catalog.regtype,\n"
15424 : "aggfinalextra,\n"
15425 : "aggmfinalextra,\n"
15426 : "aggtransspace,\n"
15427 : "aggmtransspace,\n"
15428 : "aggminitval,\n");
15429 : else
15430 0 : appendPQExpBufferStr(query,
15431 : "'n' AS aggkind,\n"
15432 : "'-' AS aggmtransfn,\n"
15433 : "'-' AS aggminvtransfn,\n"
15434 : "'-' AS aggmfinalfn,\n"
15435 : "0 AS aggmtranstype,\n"
15436 : "false AS aggfinalextra,\n"
15437 : "false AS aggmfinalextra,\n"
15438 : "0 AS aggtransspace,\n"
15439 : "0 AS aggmtransspace,\n"
15440 : "NULL AS aggminitval,\n");
15441 :
15442 122 : if (fout->remoteVersion >= 90600)
15443 122 : appendPQExpBufferStr(query,
15444 : "aggcombinefn,\n"
15445 : "aggserialfn,\n"
15446 : "aggdeserialfn,\n"
15447 : "proparallel,\n");
15448 : else
15449 0 : appendPQExpBufferStr(query,
15450 : "'-' AS aggcombinefn,\n"
15451 : "'-' AS aggserialfn,\n"
15452 : "'-' AS aggdeserialfn,\n"
15453 : "'u' AS proparallel,\n");
15454 :
15455 122 : if (fout->remoteVersion >= 110000)
15456 122 : appendPQExpBufferStr(query,
15457 : "aggfinalmodify,\n"
15458 : "aggmfinalmodify\n");
15459 : else
15460 0 : appendPQExpBufferStr(query,
15461 : "'0' AS aggfinalmodify,\n"
15462 : "'0' AS aggmfinalmodify\n");
15463 :
15464 122 : appendPQExpBufferStr(query,
15465 : "FROM pg_catalog.pg_aggregate a, pg_catalog.pg_proc p "
15466 : "WHERE a.aggfnoid = p.oid "
15467 : "AND p.oid = $1");
15468 :
15469 122 : ExecuteSqlStatement(fout, query->data);
15470 :
15471 122 : fout->is_prepared[PREPQUERY_DUMPAGG] = true;
15472 : }
15473 :
15474 582 : printfPQExpBuffer(query,
15475 : "EXECUTE dumpAgg('%u')",
15476 582 : agginfo->aggfn.dobj.catId.oid);
15477 :
15478 582 : res = ExecuteSqlQueryForSingleRow(fout, query->data);
15479 :
15480 582 : i_agginitval = PQfnumber(res, "agginitval");
15481 582 : i_aggminitval = PQfnumber(res, "aggminitval");
15482 :
15483 582 : aggtransfn = PQgetvalue(res, 0, PQfnumber(res, "aggtransfn"));
15484 582 : aggfinalfn = PQgetvalue(res, 0, PQfnumber(res, "aggfinalfn"));
15485 582 : aggcombinefn = PQgetvalue(res, 0, PQfnumber(res, "aggcombinefn"));
15486 582 : aggserialfn = PQgetvalue(res, 0, PQfnumber(res, "aggserialfn"));
15487 582 : aggdeserialfn = PQgetvalue(res, 0, PQfnumber(res, "aggdeserialfn"));
15488 582 : aggmtransfn = PQgetvalue(res, 0, PQfnumber(res, "aggmtransfn"));
15489 582 : aggminvtransfn = PQgetvalue(res, 0, PQfnumber(res, "aggminvtransfn"));
15490 582 : aggmfinalfn = PQgetvalue(res, 0, PQfnumber(res, "aggmfinalfn"));
15491 582 : aggfinalextra = (PQgetvalue(res, 0, PQfnumber(res, "aggfinalextra"))[0] == 't');
15492 582 : aggmfinalextra = (PQgetvalue(res, 0, PQfnumber(res, "aggmfinalextra"))[0] == 't');
15493 582 : aggfinalmodify = PQgetvalue(res, 0, PQfnumber(res, "aggfinalmodify"))[0];
15494 582 : aggmfinalmodify = PQgetvalue(res, 0, PQfnumber(res, "aggmfinalmodify"))[0];
15495 582 : aggsortop = PQgetvalue(res, 0, PQfnumber(res, "aggsortop"));
15496 582 : aggkind = PQgetvalue(res, 0, PQfnumber(res, "aggkind"))[0];
15497 582 : aggtranstype = PQgetvalue(res, 0, PQfnumber(res, "aggtranstype"));
15498 582 : aggtransspace = PQgetvalue(res, 0, PQfnumber(res, "aggtransspace"));
15499 582 : aggmtranstype = PQgetvalue(res, 0, PQfnumber(res, "aggmtranstype"));
15500 582 : aggmtransspace = PQgetvalue(res, 0, PQfnumber(res, "aggmtransspace"));
15501 582 : agginitval = PQgetvalue(res, 0, i_agginitval);
15502 582 : aggminitval = PQgetvalue(res, 0, i_aggminitval);
15503 582 : proparallel = PQgetvalue(res, 0, PQfnumber(res, "proparallel"));
15504 :
15505 : {
15506 : char *funcargs;
15507 : char *funciargs;
15508 :
15509 582 : funcargs = PQgetvalue(res, 0, PQfnumber(res, "funcargs"));
15510 582 : funciargs = PQgetvalue(res, 0, PQfnumber(res, "funciargs"));
15511 582 : aggfullsig = format_function_arguments(&agginfo->aggfn, funcargs, true);
15512 582 : aggsig = format_function_arguments(&agginfo->aggfn, funciargs, true);
15513 : }
15514 :
15515 582 : aggsig_tag = format_aggregate_signature(agginfo, fout, false);
15516 :
15517 : /* identify default modify flag for aggkind (must match DefineAggregate) */
15518 582 : defaultfinalmodify = (aggkind == AGGKIND_NORMAL) ? AGGMODIFY_READ_ONLY : AGGMODIFY_READ_WRITE;
15519 : /* replace omitted flags for old versions */
15520 582 : if (aggfinalmodify == '0')
15521 0 : aggfinalmodify = defaultfinalmodify;
15522 582 : if (aggmfinalmodify == '0')
15523 0 : aggmfinalmodify = defaultfinalmodify;
15524 :
15525 : /* regproc and regtype output is already sufficiently quoted */
15526 582 : appendPQExpBuffer(details, " SFUNC = %s,\n STYPE = %s",
15527 : aggtransfn, aggtranstype);
15528 :
15529 582 : if (strcmp(aggtransspace, "0") != 0)
15530 : {
15531 10 : appendPQExpBuffer(details, ",\n SSPACE = %s",
15532 : aggtransspace);
15533 : }
15534 :
15535 582 : if (!PQgetisnull(res, 0, i_agginitval))
15536 : {
15537 426 : appendPQExpBufferStr(details, ",\n INITCOND = ");
15538 426 : appendStringLiteralAH(details, agginitval, fout);
15539 : }
15540 :
15541 582 : if (strcmp(aggfinalfn, "-") != 0)
15542 : {
15543 276 : appendPQExpBuffer(details, ",\n FINALFUNC = %s",
15544 : aggfinalfn);
15545 276 : if (aggfinalextra)
15546 20 : appendPQExpBufferStr(details, ",\n FINALFUNC_EXTRA");
15547 276 : if (aggfinalmodify != defaultfinalmodify)
15548 : {
15549 76 : switch (aggfinalmodify)
15550 : {
15551 0 : case AGGMODIFY_READ_ONLY:
15552 0 : appendPQExpBufferStr(details, ",\n FINALFUNC_MODIFY = READ_ONLY");
15553 0 : break;
15554 76 : case AGGMODIFY_SHAREABLE:
15555 76 : appendPQExpBufferStr(details, ",\n FINALFUNC_MODIFY = SHAREABLE");
15556 76 : break;
15557 0 : case AGGMODIFY_READ_WRITE:
15558 0 : appendPQExpBufferStr(details, ",\n FINALFUNC_MODIFY = READ_WRITE");
15559 0 : break;
15560 0 : default:
15561 0 : pg_fatal("unrecognized aggfinalmodify value for aggregate \"%s\"",
15562 : agginfo->aggfn.dobj.name);
15563 : break;
15564 : }
15565 : }
15566 : }
15567 :
15568 582 : if (strcmp(aggcombinefn, "-") != 0)
15569 0 : appendPQExpBuffer(details, ",\n COMBINEFUNC = %s", aggcombinefn);
15570 :
15571 582 : if (strcmp(aggserialfn, "-") != 0)
15572 0 : appendPQExpBuffer(details, ",\n SERIALFUNC = %s", aggserialfn);
15573 :
15574 582 : if (strcmp(aggdeserialfn, "-") != 0)
15575 0 : appendPQExpBuffer(details, ",\n DESERIALFUNC = %s", aggdeserialfn);
15576 :
15577 582 : if (strcmp(aggmtransfn, "-") != 0)
15578 : {
15579 60 : appendPQExpBuffer(details, ",\n MSFUNC = %s,\n MINVFUNC = %s,\n MSTYPE = %s",
15580 : aggmtransfn,
15581 : aggminvtransfn,
15582 : aggmtranstype);
15583 : }
15584 :
15585 582 : if (strcmp(aggmtransspace, "0") != 0)
15586 : {
15587 0 : appendPQExpBuffer(details, ",\n MSSPACE = %s",
15588 : aggmtransspace);
15589 : }
15590 :
15591 582 : if (!PQgetisnull(res, 0, i_aggminitval))
15592 : {
15593 20 : appendPQExpBufferStr(details, ",\n MINITCOND = ");
15594 20 : appendStringLiteralAH(details, aggminitval, fout);
15595 : }
15596 :
15597 582 : if (strcmp(aggmfinalfn, "-") != 0)
15598 : {
15599 0 : appendPQExpBuffer(details, ",\n MFINALFUNC = %s",
15600 : aggmfinalfn);
15601 0 : if (aggmfinalextra)
15602 0 : appendPQExpBufferStr(details, ",\n MFINALFUNC_EXTRA");
15603 0 : if (aggmfinalmodify != defaultfinalmodify)
15604 : {
15605 0 : switch (aggmfinalmodify)
15606 : {
15607 0 : case AGGMODIFY_READ_ONLY:
15608 0 : appendPQExpBufferStr(details, ",\n MFINALFUNC_MODIFY = READ_ONLY");
15609 0 : break;
15610 0 : case AGGMODIFY_SHAREABLE:
15611 0 : appendPQExpBufferStr(details, ",\n MFINALFUNC_MODIFY = SHAREABLE");
15612 0 : break;
15613 0 : case AGGMODIFY_READ_WRITE:
15614 0 : appendPQExpBufferStr(details, ",\n MFINALFUNC_MODIFY = READ_WRITE");
15615 0 : break;
15616 0 : default:
15617 0 : pg_fatal("unrecognized aggmfinalmodify value for aggregate \"%s\"",
15618 : agginfo->aggfn.dobj.name);
15619 : break;
15620 : }
15621 : }
15622 : }
15623 :
15624 582 : aggsortconvop = getFormattedOperatorName(aggsortop);
15625 582 : if (aggsortconvop)
15626 : {
15627 0 : appendPQExpBuffer(details, ",\n SORTOP = %s",
15628 : aggsortconvop);
15629 0 : free(aggsortconvop);
15630 : }
15631 :
15632 582 : if (aggkind == AGGKIND_HYPOTHETICAL)
15633 10 : appendPQExpBufferStr(details, ",\n HYPOTHETICAL");
15634 :
15635 582 : if (proparallel[0] != PROPARALLEL_UNSAFE)
15636 : {
15637 10 : if (proparallel[0] == PROPARALLEL_SAFE)
15638 10 : appendPQExpBufferStr(details, ",\n PARALLEL = safe");
15639 0 : else if (proparallel[0] == PROPARALLEL_RESTRICTED)
15640 0 : appendPQExpBufferStr(details, ",\n PARALLEL = restricted");
15641 0 : else if (proparallel[0] != PROPARALLEL_UNSAFE)
15642 0 : pg_fatal("unrecognized proparallel value for function \"%s\"",
15643 : agginfo->aggfn.dobj.name);
15644 : }
15645 :
15646 582 : appendPQExpBuffer(delq, "DROP AGGREGATE %s.%s;\n",
15647 582 : fmtId(agginfo->aggfn.dobj.namespace->dobj.name),
15648 : aggsig);
15649 :
15650 1164 : appendPQExpBuffer(q, "CREATE AGGREGATE %s.%s (\n%s\n);\n",
15651 582 : fmtId(agginfo->aggfn.dobj.namespace->dobj.name),
15652 : aggfullsig ? aggfullsig : aggsig, details->data);
15653 :
15654 582 : if (dopt->binary_upgrade)
15655 98 : binary_upgrade_extension_member(q, &agginfo->aggfn.dobj,
15656 : "AGGREGATE", aggsig,
15657 98 : agginfo->aggfn.dobj.namespace->dobj.name);
15658 :
15659 582 : if (agginfo->aggfn.dobj.dump & DUMP_COMPONENT_DEFINITION)
15660 548 : ArchiveEntry(fout, agginfo->aggfn.dobj.catId,
15661 548 : agginfo->aggfn.dobj.dumpId,
15662 548 : ARCHIVE_OPTS(.tag = aggsig_tag,
15663 : .namespace = agginfo->aggfn.dobj.namespace->dobj.name,
15664 : .owner = agginfo->aggfn.rolname,
15665 : .description = "AGGREGATE",
15666 : .section = SECTION_PRE_DATA,
15667 : .createStmt = q->data,
15668 : .dropStmt = delq->data));
15669 :
15670 : /* Dump Aggregate Comments */
15671 582 : if (agginfo->aggfn.dobj.dump & DUMP_COMPONENT_COMMENT)
15672 20 : dumpComment(fout, "AGGREGATE", aggsig,
15673 20 : agginfo->aggfn.dobj.namespace->dobj.name,
15674 20 : agginfo->aggfn.rolname,
15675 20 : agginfo->aggfn.dobj.catId, 0, agginfo->aggfn.dobj.dumpId);
15676 :
15677 582 : if (agginfo->aggfn.dobj.dump & DUMP_COMPONENT_SECLABEL)
15678 0 : dumpSecLabel(fout, "AGGREGATE", aggsig,
15679 0 : agginfo->aggfn.dobj.namespace->dobj.name,
15680 0 : agginfo->aggfn.rolname,
15681 0 : agginfo->aggfn.dobj.catId, 0, agginfo->aggfn.dobj.dumpId);
15682 :
15683 : /*
15684 : * Since there is no GRANT ON AGGREGATE syntax, we have to make the ACL
15685 : * command look like a function's GRANT; in particular this affects the
15686 : * syntax for zero-argument aggregates and ordered-set aggregates.
15687 : */
15688 582 : free(aggsig);
15689 :
15690 582 : aggsig = format_function_signature(fout, &agginfo->aggfn, true);
15691 :
15692 582 : if (agginfo->aggfn.dobj.dump & DUMP_COMPONENT_ACL)
15693 36 : dumpACL(fout, agginfo->aggfn.dobj.dumpId, InvalidDumpId,
15694 : "FUNCTION", aggsig, NULL,
15695 36 : agginfo->aggfn.dobj.namespace->dobj.name,
15696 36 : NULL, agginfo->aggfn.rolname, &agginfo->aggfn.dacl);
15697 :
15698 582 : free(aggsig);
15699 582 : free(aggfullsig);
15700 582 : free(aggsig_tag);
15701 :
15702 582 : PQclear(res);
15703 :
15704 582 : destroyPQExpBuffer(query);
15705 582 : destroyPQExpBuffer(q);
15706 582 : destroyPQExpBuffer(delq);
15707 582 : destroyPQExpBuffer(details);
15708 : }
15709 :
15710 : /*
15711 : * dumpTSParser
15712 : * write out a single text search parser
15713 : */
15714 : static void
15715 94 : dumpTSParser(Archive *fout, const TSParserInfo *prsinfo)
15716 : {
15717 94 : DumpOptions *dopt = fout->dopt;
15718 : PQExpBuffer q;
15719 : PQExpBuffer delq;
15720 : char *qprsname;
15721 :
15722 : /* Do nothing if not dumping schema */
15723 94 : if (!dopt->dumpSchema)
15724 12 : return;
15725 :
15726 82 : q = createPQExpBuffer();
15727 82 : delq = createPQExpBuffer();
15728 :
15729 82 : qprsname = pg_strdup(fmtId(prsinfo->dobj.name));
15730 :
15731 82 : appendPQExpBuffer(q, "CREATE TEXT SEARCH PARSER %s (\n",
15732 82 : fmtQualifiedDumpable(prsinfo));
15733 :
15734 82 : appendPQExpBuffer(q, " START = %s,\n",
15735 82 : convertTSFunction(fout, prsinfo->prsstart));
15736 82 : appendPQExpBuffer(q, " GETTOKEN = %s,\n",
15737 82 : convertTSFunction(fout, prsinfo->prstoken));
15738 82 : appendPQExpBuffer(q, " END = %s,\n",
15739 82 : convertTSFunction(fout, prsinfo->prsend));
15740 82 : if (prsinfo->prsheadline != InvalidOid)
15741 6 : appendPQExpBuffer(q, " HEADLINE = %s,\n",
15742 6 : convertTSFunction(fout, prsinfo->prsheadline));
15743 82 : appendPQExpBuffer(q, " LEXTYPES = %s );\n",
15744 82 : convertTSFunction(fout, prsinfo->prslextype));
15745 :
15746 82 : appendPQExpBuffer(delq, "DROP TEXT SEARCH PARSER %s;\n",
15747 82 : fmtQualifiedDumpable(prsinfo));
15748 :
15749 82 : if (dopt->binary_upgrade)
15750 2 : binary_upgrade_extension_member(q, &prsinfo->dobj,
15751 : "TEXT SEARCH PARSER", qprsname,
15752 2 : prsinfo->dobj.namespace->dobj.name);
15753 :
15754 82 : if (prsinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
15755 82 : ArchiveEntry(fout, prsinfo->dobj.catId, prsinfo->dobj.dumpId,
15756 82 : ARCHIVE_OPTS(.tag = prsinfo->dobj.name,
15757 : .namespace = prsinfo->dobj.namespace->dobj.name,
15758 : .description = "TEXT SEARCH PARSER",
15759 : .section = SECTION_PRE_DATA,
15760 : .createStmt = q->data,
15761 : .dropStmt = delq->data));
15762 :
15763 : /* Dump Parser Comments */
15764 82 : if (prsinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
15765 82 : dumpComment(fout, "TEXT SEARCH PARSER", qprsname,
15766 82 : prsinfo->dobj.namespace->dobj.name, "",
15767 82 : prsinfo->dobj.catId, 0, prsinfo->dobj.dumpId);
15768 :
15769 82 : destroyPQExpBuffer(q);
15770 82 : destroyPQExpBuffer(delq);
15771 82 : free(qprsname);
15772 : }
15773 :
15774 : /*
15775 : * dumpTSDictionary
15776 : * write out a single text search dictionary
15777 : */
15778 : static void
15779 358 : dumpTSDictionary(Archive *fout, const TSDictInfo *dictinfo)
15780 : {
15781 358 : DumpOptions *dopt = fout->dopt;
15782 : PQExpBuffer q;
15783 : PQExpBuffer delq;
15784 : PQExpBuffer query;
15785 : char *qdictname;
15786 : PGresult *res;
15787 : char *nspname;
15788 : char *tmplname;
15789 :
15790 : /* Do nothing if not dumping schema */
15791 358 : if (!dopt->dumpSchema)
15792 12 : return;
15793 :
15794 346 : q = createPQExpBuffer();
15795 346 : delq = createPQExpBuffer();
15796 346 : query = createPQExpBuffer();
15797 :
15798 346 : qdictname = pg_strdup(fmtId(dictinfo->dobj.name));
15799 :
15800 : /* Fetch name and namespace of the dictionary's template */
15801 346 : appendPQExpBuffer(query, "SELECT nspname, tmplname "
15802 : "FROM pg_ts_template p, pg_namespace n "
15803 : "WHERE p.oid = '%u' AND n.oid = tmplnamespace",
15804 346 : dictinfo->dicttemplate);
15805 346 : res = ExecuteSqlQueryForSingleRow(fout, query->data);
15806 346 : nspname = PQgetvalue(res, 0, 0);
15807 346 : tmplname = PQgetvalue(res, 0, 1);
15808 :
15809 346 : appendPQExpBuffer(q, "CREATE TEXT SEARCH DICTIONARY %s (\n",
15810 346 : fmtQualifiedDumpable(dictinfo));
15811 :
15812 346 : appendPQExpBufferStr(q, " TEMPLATE = ");
15813 346 : appendPQExpBuffer(q, "%s.", fmtId(nspname));
15814 346 : appendPQExpBufferStr(q, fmtId(tmplname));
15815 :
15816 346 : PQclear(res);
15817 :
15818 : /* the dictinitoption can be dumped straight into the command */
15819 346 : if (dictinfo->dictinitoption)
15820 264 : appendPQExpBuffer(q, ",\n %s", dictinfo->dictinitoption);
15821 :
15822 346 : appendPQExpBufferStr(q, " );\n");
15823 :
15824 346 : appendPQExpBuffer(delq, "DROP TEXT SEARCH DICTIONARY %s;\n",
15825 346 : fmtQualifiedDumpable(dictinfo));
15826 :
15827 346 : if (dopt->binary_upgrade)
15828 20 : binary_upgrade_extension_member(q, &dictinfo->dobj,
15829 : "TEXT SEARCH DICTIONARY", qdictname,
15830 20 : dictinfo->dobj.namespace->dobj.name);
15831 :
15832 346 : if (dictinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
15833 346 : ArchiveEntry(fout, dictinfo->dobj.catId, dictinfo->dobj.dumpId,
15834 346 : ARCHIVE_OPTS(.tag = dictinfo->dobj.name,
15835 : .namespace = dictinfo->dobj.namespace->dobj.name,
15836 : .owner = dictinfo->rolname,
15837 : .description = "TEXT SEARCH DICTIONARY",
15838 : .section = SECTION_PRE_DATA,
15839 : .createStmt = q->data,
15840 : .dropStmt = delq->data));
15841 :
15842 : /* Dump Dictionary Comments */
15843 346 : if (dictinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
15844 256 : dumpComment(fout, "TEXT SEARCH DICTIONARY", qdictname,
15845 256 : dictinfo->dobj.namespace->dobj.name, dictinfo->rolname,
15846 256 : dictinfo->dobj.catId, 0, dictinfo->dobj.dumpId);
15847 :
15848 346 : destroyPQExpBuffer(q);
15849 346 : destroyPQExpBuffer(delq);
15850 346 : destroyPQExpBuffer(query);
15851 346 : free(qdictname);
15852 : }
15853 :
15854 : /*
15855 : * dumpTSTemplate
15856 : * write out a single text search template
15857 : */
15858 : static void
15859 118 : dumpTSTemplate(Archive *fout, const TSTemplateInfo *tmplinfo)
15860 : {
15861 118 : DumpOptions *dopt = fout->dopt;
15862 : PQExpBuffer q;
15863 : PQExpBuffer delq;
15864 : char *qtmplname;
15865 :
15866 : /* Do nothing if not dumping schema */
15867 118 : if (!dopt->dumpSchema)
15868 12 : return;
15869 :
15870 106 : q = createPQExpBuffer();
15871 106 : delq = createPQExpBuffer();
15872 :
15873 106 : qtmplname = pg_strdup(fmtId(tmplinfo->dobj.name));
15874 :
15875 106 : appendPQExpBuffer(q, "CREATE TEXT SEARCH TEMPLATE %s (\n",
15876 106 : fmtQualifiedDumpable(tmplinfo));
15877 :
15878 106 : if (tmplinfo->tmplinit != InvalidOid)
15879 30 : appendPQExpBuffer(q, " INIT = %s,\n",
15880 30 : convertTSFunction(fout, tmplinfo->tmplinit));
15881 106 : appendPQExpBuffer(q, " LEXIZE = %s );\n",
15882 106 : convertTSFunction(fout, tmplinfo->tmpllexize));
15883 :
15884 106 : appendPQExpBuffer(delq, "DROP TEXT SEARCH TEMPLATE %s;\n",
15885 106 : fmtQualifiedDumpable(tmplinfo));
15886 :
15887 106 : if (dopt->binary_upgrade)
15888 2 : binary_upgrade_extension_member(q, &tmplinfo->dobj,
15889 : "TEXT SEARCH TEMPLATE", qtmplname,
15890 2 : tmplinfo->dobj.namespace->dobj.name);
15891 :
15892 106 : if (tmplinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
15893 106 : ArchiveEntry(fout, tmplinfo->dobj.catId, tmplinfo->dobj.dumpId,
15894 106 : ARCHIVE_OPTS(.tag = tmplinfo->dobj.name,
15895 : .namespace = tmplinfo->dobj.namespace->dobj.name,
15896 : .description = "TEXT SEARCH TEMPLATE",
15897 : .section = SECTION_PRE_DATA,
15898 : .createStmt = q->data,
15899 : .dropStmt = delq->data));
15900 :
15901 : /* Dump Template Comments */
15902 106 : if (tmplinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
15903 106 : dumpComment(fout, "TEXT SEARCH TEMPLATE", qtmplname,
15904 106 : tmplinfo->dobj.namespace->dobj.name, "",
15905 106 : tmplinfo->dobj.catId, 0, tmplinfo->dobj.dumpId);
15906 :
15907 106 : destroyPQExpBuffer(q);
15908 106 : destroyPQExpBuffer(delq);
15909 106 : free(qtmplname);
15910 : }
15911 :
15912 : /*
15913 : * dumpTSConfig
15914 : * write out a single text search configuration
15915 : */
15916 : static void
15917 308 : dumpTSConfig(Archive *fout, const TSConfigInfo *cfginfo)
15918 : {
15919 308 : DumpOptions *dopt = fout->dopt;
15920 : PQExpBuffer q;
15921 : PQExpBuffer delq;
15922 : PQExpBuffer query;
15923 : char *qcfgname;
15924 : PGresult *res;
15925 : char *nspname;
15926 : char *prsname;
15927 : int ntups,
15928 : i;
15929 : int i_tokenname;
15930 : int i_dictname;
15931 :
15932 : /* Do nothing if not dumping schema */
15933 308 : if (!dopt->dumpSchema)
15934 12 : return;
15935 :
15936 296 : q = createPQExpBuffer();
15937 296 : delq = createPQExpBuffer();
15938 296 : query = createPQExpBuffer();
15939 :
15940 296 : qcfgname = pg_strdup(fmtId(cfginfo->dobj.name));
15941 :
15942 : /* Fetch name and namespace of the config's parser */
15943 296 : appendPQExpBuffer(query, "SELECT nspname, prsname "
15944 : "FROM pg_ts_parser p, pg_namespace n "
15945 : "WHERE p.oid = '%u' AND n.oid = prsnamespace",
15946 296 : cfginfo->cfgparser);
15947 296 : res = ExecuteSqlQueryForSingleRow(fout, query->data);
15948 296 : nspname = PQgetvalue(res, 0, 0);
15949 296 : prsname = PQgetvalue(res, 0, 1);
15950 :
15951 296 : appendPQExpBuffer(q, "CREATE TEXT SEARCH CONFIGURATION %s (\n",
15952 296 : fmtQualifiedDumpable(cfginfo));
15953 :
15954 296 : appendPQExpBuffer(q, " PARSER = %s.", fmtId(nspname));
15955 296 : appendPQExpBuffer(q, "%s );\n", fmtId(prsname));
15956 :
15957 296 : PQclear(res);
15958 :
15959 296 : resetPQExpBuffer(query);
15960 296 : appendPQExpBuffer(query,
15961 : "SELECT\n"
15962 : " ( SELECT alias FROM pg_catalog.ts_token_type('%u'::pg_catalog.oid) AS t\n"
15963 : " WHERE t.tokid = m.maptokentype ) AS tokenname,\n"
15964 : " m.mapdict::pg_catalog.regdictionary AS dictname\n"
15965 : "FROM pg_catalog.pg_ts_config_map AS m\n"
15966 : "WHERE m.mapcfg = '%u'\n"
15967 : "ORDER BY m.mapcfg, m.maptokentype, m.mapseqno",
15968 296 : cfginfo->cfgparser, cfginfo->dobj.catId.oid);
15969 :
15970 296 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
15971 296 : ntups = PQntuples(res);
15972 :
15973 296 : i_tokenname = PQfnumber(res, "tokenname");
15974 296 : i_dictname = PQfnumber(res, "dictname");
15975 :
15976 6190 : for (i = 0; i < ntups; i++)
15977 : {
15978 5894 : char *tokenname = PQgetvalue(res, i, i_tokenname);
15979 5894 : char *dictname = PQgetvalue(res, i, i_dictname);
15980 :
15981 5894 : if (i == 0 ||
15982 5598 : strcmp(tokenname, PQgetvalue(res, i - 1, i_tokenname)) != 0)
15983 : {
15984 : /* starting a new token type, so start a new command */
15985 5624 : if (i > 0)
15986 5328 : appendPQExpBufferStr(q, ";\n");
15987 5624 : appendPQExpBuffer(q, "\nALTER TEXT SEARCH CONFIGURATION %s\n",
15988 5624 : fmtQualifiedDumpable(cfginfo));
15989 : /* tokenname needs quoting, dictname does NOT */
15990 5624 : appendPQExpBuffer(q, " ADD MAPPING FOR %s WITH %s",
15991 : fmtId(tokenname), dictname);
15992 : }
15993 : else
15994 270 : appendPQExpBuffer(q, ", %s", dictname);
15995 : }
15996 :
15997 296 : if (ntups > 0)
15998 296 : appendPQExpBufferStr(q, ";\n");
15999 :
16000 296 : PQclear(res);
16001 :
16002 296 : appendPQExpBuffer(delq, "DROP TEXT SEARCH CONFIGURATION %s;\n",
16003 296 : fmtQualifiedDumpable(cfginfo));
16004 :
16005 296 : if (dopt->binary_upgrade)
16006 10 : binary_upgrade_extension_member(q, &cfginfo->dobj,
16007 : "TEXT SEARCH CONFIGURATION", qcfgname,
16008 10 : cfginfo->dobj.namespace->dobj.name);
16009 :
16010 296 : if (cfginfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
16011 296 : ArchiveEntry(fout, cfginfo->dobj.catId, cfginfo->dobj.dumpId,
16012 296 : ARCHIVE_OPTS(.tag = cfginfo->dobj.name,
16013 : .namespace = cfginfo->dobj.namespace->dobj.name,
16014 : .owner = cfginfo->rolname,
16015 : .description = "TEXT SEARCH CONFIGURATION",
16016 : .section = SECTION_PRE_DATA,
16017 : .createStmt = q->data,
16018 : .dropStmt = delq->data));
16019 :
16020 : /* Dump Configuration Comments */
16021 296 : if (cfginfo->dobj.dump & DUMP_COMPONENT_COMMENT)
16022 256 : dumpComment(fout, "TEXT SEARCH CONFIGURATION", qcfgname,
16023 256 : cfginfo->dobj.namespace->dobj.name, cfginfo->rolname,
16024 256 : cfginfo->dobj.catId, 0, cfginfo->dobj.dumpId);
16025 :
16026 296 : destroyPQExpBuffer(q);
16027 296 : destroyPQExpBuffer(delq);
16028 296 : destroyPQExpBuffer(query);
16029 296 : free(qcfgname);
16030 : }
16031 :
16032 : /*
16033 : * dumpForeignDataWrapper
16034 : * write out a single foreign-data wrapper definition
16035 : */
16036 : static void
16037 116 : dumpForeignDataWrapper(Archive *fout, const FdwInfo *fdwinfo)
16038 : {
16039 116 : DumpOptions *dopt = fout->dopt;
16040 : PQExpBuffer q;
16041 : PQExpBuffer delq;
16042 : char *qfdwname;
16043 :
16044 : /* Do nothing if not dumping schema */
16045 116 : if (!dopt->dumpSchema)
16046 14 : return;
16047 :
16048 102 : q = createPQExpBuffer();
16049 102 : delq = createPQExpBuffer();
16050 :
16051 102 : qfdwname = pg_strdup(fmtId(fdwinfo->dobj.name));
16052 :
16053 102 : appendPQExpBuffer(q, "CREATE FOREIGN DATA WRAPPER %s",
16054 : qfdwname);
16055 :
16056 102 : if (strcmp(fdwinfo->fdwhandler, "-") != 0)
16057 0 : appendPQExpBuffer(q, " HANDLER %s", fdwinfo->fdwhandler);
16058 :
16059 102 : if (strcmp(fdwinfo->fdwvalidator, "-") != 0)
16060 0 : appendPQExpBuffer(q, " VALIDATOR %s", fdwinfo->fdwvalidator);
16061 :
16062 102 : if (strlen(fdwinfo->fdwoptions) > 0)
16063 0 : appendPQExpBuffer(q, " OPTIONS (\n %s\n)", fdwinfo->fdwoptions);
16064 :
16065 102 : appendPQExpBufferStr(q, ";\n");
16066 :
16067 102 : appendPQExpBuffer(delq, "DROP FOREIGN DATA WRAPPER %s;\n",
16068 : qfdwname);
16069 :
16070 102 : if (dopt->binary_upgrade)
16071 4 : binary_upgrade_extension_member(q, &fdwinfo->dobj,
16072 : "FOREIGN DATA WRAPPER", qfdwname,
16073 : NULL);
16074 :
16075 102 : if (fdwinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
16076 102 : ArchiveEntry(fout, fdwinfo->dobj.catId, fdwinfo->dobj.dumpId,
16077 102 : ARCHIVE_OPTS(.tag = fdwinfo->dobj.name,
16078 : .owner = fdwinfo->rolname,
16079 : .description = "FOREIGN DATA WRAPPER",
16080 : .section = SECTION_PRE_DATA,
16081 : .createStmt = q->data,
16082 : .dropStmt = delq->data));
16083 :
16084 : /* Dump Foreign Data Wrapper Comments */
16085 102 : if (fdwinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
16086 0 : dumpComment(fout, "FOREIGN DATA WRAPPER", qfdwname,
16087 0 : NULL, fdwinfo->rolname,
16088 0 : fdwinfo->dobj.catId, 0, fdwinfo->dobj.dumpId);
16089 :
16090 : /* Handle the ACL */
16091 102 : if (fdwinfo->dobj.dump & DUMP_COMPONENT_ACL)
16092 74 : dumpACL(fout, fdwinfo->dobj.dumpId, InvalidDumpId,
16093 : "FOREIGN DATA WRAPPER", qfdwname, NULL, NULL,
16094 74 : NULL, fdwinfo->rolname, &fdwinfo->dacl);
16095 :
16096 102 : free(qfdwname);
16097 :
16098 102 : destroyPQExpBuffer(q);
16099 102 : destroyPQExpBuffer(delq);
16100 : }
16101 :
16102 : /*
16103 : * dumpForeignServer
16104 : * write out a foreign server definition
16105 : */
16106 : static void
16107 124 : dumpForeignServer(Archive *fout, const ForeignServerInfo *srvinfo)
16108 : {
16109 124 : DumpOptions *dopt = fout->dopt;
16110 : PQExpBuffer q;
16111 : PQExpBuffer delq;
16112 : PQExpBuffer query;
16113 : PGresult *res;
16114 : char *qsrvname;
16115 : char *fdwname;
16116 :
16117 : /* Do nothing if not dumping schema */
16118 124 : if (!dopt->dumpSchema)
16119 18 : return;
16120 :
16121 106 : q = createPQExpBuffer();
16122 106 : delq = createPQExpBuffer();
16123 106 : query = createPQExpBuffer();
16124 :
16125 106 : qsrvname = pg_strdup(fmtId(srvinfo->dobj.name));
16126 :
16127 : /* look up the foreign-data wrapper */
16128 106 : appendPQExpBuffer(query, "SELECT fdwname "
16129 : "FROM pg_foreign_data_wrapper w "
16130 : "WHERE w.oid = '%u'",
16131 106 : srvinfo->srvfdw);
16132 106 : res = ExecuteSqlQueryForSingleRow(fout, query->data);
16133 106 : fdwname = PQgetvalue(res, 0, 0);
16134 :
16135 106 : appendPQExpBuffer(q, "CREATE SERVER %s", qsrvname);
16136 106 : if (srvinfo->srvtype && strlen(srvinfo->srvtype) > 0)
16137 : {
16138 0 : appendPQExpBufferStr(q, " TYPE ");
16139 0 : appendStringLiteralAH(q, srvinfo->srvtype, fout);
16140 : }
16141 106 : if (srvinfo->srvversion && strlen(srvinfo->srvversion) > 0)
16142 : {
16143 0 : appendPQExpBufferStr(q, " VERSION ");
16144 0 : appendStringLiteralAH(q, srvinfo->srvversion, fout);
16145 : }
16146 :
16147 106 : appendPQExpBufferStr(q, " FOREIGN DATA WRAPPER ");
16148 106 : appendPQExpBufferStr(q, fmtId(fdwname));
16149 :
16150 106 : if (srvinfo->srvoptions && strlen(srvinfo->srvoptions) > 0)
16151 0 : appendPQExpBuffer(q, " OPTIONS (\n %s\n)", srvinfo->srvoptions);
16152 :
16153 106 : appendPQExpBufferStr(q, ";\n");
16154 :
16155 106 : appendPQExpBuffer(delq, "DROP SERVER %s;\n",
16156 : qsrvname);
16157 :
16158 106 : if (dopt->binary_upgrade)
16159 4 : binary_upgrade_extension_member(q, &srvinfo->dobj,
16160 : "SERVER", qsrvname, NULL);
16161 :
16162 106 : if (srvinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
16163 106 : ArchiveEntry(fout, srvinfo->dobj.catId, srvinfo->dobj.dumpId,
16164 106 : ARCHIVE_OPTS(.tag = srvinfo->dobj.name,
16165 : .owner = srvinfo->rolname,
16166 : .description = "SERVER",
16167 : .section = SECTION_PRE_DATA,
16168 : .createStmt = q->data,
16169 : .dropStmt = delq->data));
16170 :
16171 : /* Dump Foreign Server Comments */
16172 106 : if (srvinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
16173 0 : dumpComment(fout, "SERVER", qsrvname,
16174 0 : NULL, srvinfo->rolname,
16175 0 : srvinfo->dobj.catId, 0, srvinfo->dobj.dumpId);
16176 :
16177 : /* Handle the ACL */
16178 106 : if (srvinfo->dobj.dump & DUMP_COMPONENT_ACL)
16179 74 : dumpACL(fout, srvinfo->dobj.dumpId, InvalidDumpId,
16180 : "FOREIGN SERVER", qsrvname, NULL, NULL,
16181 74 : NULL, srvinfo->rolname, &srvinfo->dacl);
16182 :
16183 : /* Dump user mappings */
16184 106 : if (srvinfo->dobj.dump & DUMP_COMPONENT_USERMAP)
16185 106 : dumpUserMappings(fout,
16186 106 : srvinfo->dobj.name, NULL,
16187 106 : srvinfo->rolname,
16188 106 : srvinfo->dobj.catId, srvinfo->dobj.dumpId);
16189 :
16190 106 : PQclear(res);
16191 :
16192 106 : free(qsrvname);
16193 :
16194 106 : destroyPQExpBuffer(q);
16195 106 : destroyPQExpBuffer(delq);
16196 106 : destroyPQExpBuffer(query);
16197 : }
16198 :
16199 : /*
16200 : * dumpUserMappings
16201 : *
16202 : * This routine is used to dump any user mappings associated with the
16203 : * server handed to this routine. Should be called after ArchiveEntry()
16204 : * for the server.
16205 : */
16206 : static void
16207 106 : dumpUserMappings(Archive *fout,
16208 : const char *servername, const char *namespace,
16209 : const char *owner,
16210 : CatalogId catalogId, DumpId dumpId)
16211 : {
16212 : PQExpBuffer q;
16213 : PQExpBuffer delq;
16214 : PQExpBuffer query;
16215 : PQExpBuffer tag;
16216 : PGresult *res;
16217 : int ntups;
16218 : int i_usename;
16219 : int i_umoptions;
16220 : int i;
16221 :
16222 106 : q = createPQExpBuffer();
16223 106 : tag = createPQExpBuffer();
16224 106 : delq = createPQExpBuffer();
16225 106 : query = createPQExpBuffer();
16226 :
16227 : /*
16228 : * We read from the publicly accessible view pg_user_mappings, so as not
16229 : * to fail if run by a non-superuser. Note that the view will show
16230 : * umoptions as null if the user hasn't got privileges for the associated
16231 : * server; this means that pg_dump will dump such a mapping, but with no
16232 : * OPTIONS clause. A possible alternative is to skip such mappings
16233 : * altogether, but it's not clear that that's an improvement.
16234 : */
16235 106 : appendPQExpBuffer(query,
16236 : "SELECT usename, "
16237 : "array_to_string(ARRAY("
16238 : "SELECT quote_ident(option_name) || ' ' || "
16239 : "quote_literal(option_value) "
16240 : "FROM pg_options_to_table(umoptions) "
16241 : "ORDER BY option_name"
16242 : "), E',\n ') AS umoptions "
16243 : "FROM pg_user_mappings "
16244 : "WHERE srvid = '%u' "
16245 : "ORDER BY usename",
16246 : catalogId.oid);
16247 :
16248 106 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
16249 :
16250 106 : ntups = PQntuples(res);
16251 106 : i_usename = PQfnumber(res, "usename");
16252 106 : i_umoptions = PQfnumber(res, "umoptions");
16253 :
16254 180 : for (i = 0; i < ntups; i++)
16255 : {
16256 : char *usename;
16257 : char *umoptions;
16258 :
16259 74 : usename = PQgetvalue(res, i, i_usename);
16260 74 : umoptions = PQgetvalue(res, i, i_umoptions);
16261 :
16262 74 : resetPQExpBuffer(q);
16263 74 : appendPQExpBuffer(q, "CREATE USER MAPPING FOR %s", fmtId(usename));
16264 74 : appendPQExpBuffer(q, " SERVER %s", fmtId(servername));
16265 :
16266 74 : if (umoptions && strlen(umoptions) > 0)
16267 0 : appendPQExpBuffer(q, " OPTIONS (\n %s\n)", umoptions);
16268 :
16269 74 : appendPQExpBufferStr(q, ";\n");
16270 :
16271 74 : resetPQExpBuffer(delq);
16272 74 : appendPQExpBuffer(delq, "DROP USER MAPPING FOR %s", fmtId(usename));
16273 74 : appendPQExpBuffer(delq, " SERVER %s;\n", fmtId(servername));
16274 :
16275 74 : resetPQExpBuffer(tag);
16276 74 : appendPQExpBuffer(tag, "USER MAPPING %s SERVER %s",
16277 : usename, servername);
16278 :
16279 74 : ArchiveEntry(fout, nilCatalogId, createDumpId(),
16280 74 : ARCHIVE_OPTS(.tag = tag->data,
16281 : .namespace = namespace,
16282 : .owner = owner,
16283 : .description = "USER MAPPING",
16284 : .section = SECTION_PRE_DATA,
16285 : .createStmt = q->data,
16286 : .dropStmt = delq->data));
16287 : }
16288 :
16289 106 : PQclear(res);
16290 :
16291 106 : destroyPQExpBuffer(query);
16292 106 : destroyPQExpBuffer(delq);
16293 106 : destroyPQExpBuffer(tag);
16294 106 : destroyPQExpBuffer(q);
16295 106 : }
16296 :
16297 : /*
16298 : * Write out default privileges information
16299 : */
16300 : static void
16301 368 : dumpDefaultACL(Archive *fout, const DefaultACLInfo *daclinfo)
16302 : {
16303 368 : DumpOptions *dopt = fout->dopt;
16304 : PQExpBuffer q;
16305 : PQExpBuffer tag;
16306 : const char *type;
16307 :
16308 : /* Do nothing if not dumping schema, or if we're skipping ACLs */
16309 368 : if (!dopt->dumpSchema || dopt->aclsSkip)
16310 56 : return;
16311 :
16312 312 : q = createPQExpBuffer();
16313 312 : tag = createPQExpBuffer();
16314 :
16315 312 : switch (daclinfo->defaclobjtype)
16316 : {
16317 146 : case DEFACLOBJ_RELATION:
16318 146 : type = "TABLES";
16319 146 : break;
16320 0 : case DEFACLOBJ_SEQUENCE:
16321 0 : type = "SEQUENCES";
16322 0 : break;
16323 146 : case DEFACLOBJ_FUNCTION:
16324 146 : type = "FUNCTIONS";
16325 146 : break;
16326 20 : case DEFACLOBJ_TYPE:
16327 20 : type = "TYPES";
16328 20 : break;
16329 0 : case DEFACLOBJ_NAMESPACE:
16330 0 : type = "SCHEMAS";
16331 0 : break;
16332 0 : case DEFACLOBJ_LARGEOBJECT:
16333 0 : type = "LARGE OBJECTS";
16334 0 : break;
16335 0 : default:
16336 : /* shouldn't get here */
16337 0 : pg_fatal("unrecognized object type in default privileges: %d",
16338 : (int) daclinfo->defaclobjtype);
16339 : type = ""; /* keep compiler quiet */
16340 : }
16341 :
16342 312 : appendPQExpBuffer(tag, "DEFAULT PRIVILEGES FOR %s", type);
16343 :
16344 : /* build the actual command(s) for this tuple */
16345 312 : if (!buildDefaultACLCommands(type,
16346 312 : daclinfo->dobj.namespace != NULL ?
16347 148 : daclinfo->dobj.namespace->dobj.name : NULL,
16348 312 : daclinfo->dacl.acl,
16349 312 : daclinfo->dacl.acldefault,
16350 312 : daclinfo->defaclrole,
16351 : fout->remoteVersion,
16352 : q))
16353 0 : pg_fatal("could not parse default ACL list (%s)",
16354 : daclinfo->dacl.acl);
16355 :
16356 312 : if (daclinfo->dobj.dump & DUMP_COMPONENT_ACL)
16357 312 : ArchiveEntry(fout, daclinfo->dobj.catId, daclinfo->dobj.dumpId,
16358 312 : ARCHIVE_OPTS(.tag = tag->data,
16359 : .namespace = daclinfo->dobj.namespace ?
16360 : daclinfo->dobj.namespace->dobj.name : NULL,
16361 : .owner = daclinfo->defaclrole,
16362 : .description = "DEFAULT ACL",
16363 : .section = SECTION_POST_DATA,
16364 : .createStmt = q->data));
16365 :
16366 312 : destroyPQExpBuffer(tag);
16367 312 : destroyPQExpBuffer(q);
16368 : }
16369 :
16370 : /*----------
16371 : * Write out grant/revoke information
16372 : *
16373 : * 'objDumpId' is the dump ID of the underlying object.
16374 : * 'altDumpId' can be a second dumpId that the ACL entry must also depend on,
16375 : * or InvalidDumpId if there is no need for a second dependency.
16376 : * 'type' must be one of
16377 : * TABLE, SEQUENCE, FUNCTION, LANGUAGE, SCHEMA, DATABASE, TABLESPACE,
16378 : * FOREIGN DATA WRAPPER, SERVER, or LARGE OBJECT.
16379 : * 'name' is the formatted name of the object. Must be quoted etc. already.
16380 : * 'subname' is the formatted name of the sub-object, if any. Must be quoted.
16381 : * (Currently we assume that subname is only provided for table columns.)
16382 : * 'nspname' is the namespace the object is in (NULL if none).
16383 : * 'tag' is the tag to use for the ACL TOC entry; typically, this is NULL
16384 : * to use the default for the object type.
16385 : * 'owner' is the owner, NULL if there is no owner (for languages).
16386 : * 'dacl' is the DumpableAcl struct for the object.
16387 : *
16388 : * Returns the dump ID assigned to the ACL TocEntry, or InvalidDumpId if
16389 : * no ACL entry was created.
16390 : *----------
16391 : */
16392 : static DumpId
16393 57610 : dumpACL(Archive *fout, DumpId objDumpId, DumpId altDumpId,
16394 : const char *type, const char *name, const char *subname,
16395 : const char *nspname, const char *tag, const char *owner,
16396 : const DumpableAcl *dacl)
16397 : {
16398 57610 : DumpId aclDumpId = InvalidDumpId;
16399 57610 : DumpOptions *dopt = fout->dopt;
16400 57610 : const char *acls = dacl->acl;
16401 57610 : const char *acldefault = dacl->acldefault;
16402 57610 : char privtype = dacl->privtype;
16403 57610 : const char *initprivs = dacl->initprivs;
16404 : const char *baseacls;
16405 : PQExpBuffer sql;
16406 :
16407 : /* Do nothing if ACL dump is not enabled */
16408 57610 : if (dopt->aclsSkip)
16409 652 : return InvalidDumpId;
16410 :
16411 : /* --data-only skips ACLs *except* large object ACLs */
16412 56958 : if (!dopt->dumpSchema && strcmp(type, "LARGE OBJECT") != 0)
16413 0 : return InvalidDumpId;
16414 :
16415 56958 : sql = createPQExpBuffer();
16416 :
16417 : /*
16418 : * In binary upgrade mode, we don't run an extension's script but instead
16419 : * dump out the objects independently and then recreate them. To preserve
16420 : * any initial privileges which were set on extension objects, we need to
16421 : * compute the set of GRANT and REVOKE commands necessary to get from the
16422 : * default privileges of an object to its initial privileges as recorded
16423 : * in pg_init_privs.
16424 : *
16425 : * At restore time, we apply these commands after having called
16426 : * binary_upgrade_set_record_init_privs(true). That tells the backend to
16427 : * copy the results into pg_init_privs. This is how we preserve the
16428 : * contents of that catalog across binary upgrades.
16429 : */
16430 56958 : if (dopt->binary_upgrade && privtype == 'e' &&
16431 26 : initprivs && *initprivs != '\0')
16432 : {
16433 26 : appendPQExpBufferStr(sql, "SELECT pg_catalog.binary_upgrade_set_record_init_privs(true);\n");
16434 26 : if (!buildACLCommands(name, subname, nspname, type,
16435 : initprivs, acldefault, owner,
16436 : "", fout->remoteVersion, sql))
16437 0 : pg_fatal("could not parse initial ACL list (%s) or default (%s) for object \"%s\" (%s)",
16438 : initprivs, acldefault, name, type);
16439 26 : appendPQExpBufferStr(sql, "SELECT pg_catalog.binary_upgrade_set_record_init_privs(false);\n");
16440 : }
16441 :
16442 : /*
16443 : * Now figure the GRANT and REVOKE commands needed to get to the object's
16444 : * actual current ACL, starting from the initprivs if given, else from the
16445 : * object-type-specific default. Also, while buildACLCommands will assume
16446 : * that a NULL/empty acls string means it needn't do anything, what that
16447 : * actually represents is the object-type-specific default; so we need to
16448 : * substitute the acldefault string to get the right results in that case.
16449 : */
16450 56958 : if (initprivs && *initprivs != '\0')
16451 : {
16452 52920 : baseacls = initprivs;
16453 52920 : if (acls == NULL || *acls == '\0')
16454 34 : acls = acldefault;
16455 : }
16456 : else
16457 4038 : baseacls = acldefault;
16458 :
16459 56958 : if (!buildACLCommands(name, subname, nspname, type,
16460 : acls, baseacls, owner,
16461 : "", fout->remoteVersion, sql))
16462 0 : pg_fatal("could not parse ACL list (%s) or default (%s) for object \"%s\" (%s)",
16463 : acls, baseacls, name, type);
16464 :
16465 56958 : if (sql->len > 0)
16466 : {
16467 4180 : PQExpBuffer tagbuf = createPQExpBuffer();
16468 : DumpId aclDeps[2];
16469 4180 : int nDeps = 0;
16470 :
16471 4180 : if (tag)
16472 0 : appendPQExpBufferStr(tagbuf, tag);
16473 4180 : else if (subname)
16474 2478 : appendPQExpBuffer(tagbuf, "COLUMN %s.%s", name, subname);
16475 : else
16476 1702 : appendPQExpBuffer(tagbuf, "%s %s", type, name);
16477 :
16478 4180 : aclDeps[nDeps++] = objDumpId;
16479 4180 : if (altDumpId != InvalidDumpId)
16480 2298 : aclDeps[nDeps++] = altDumpId;
16481 :
16482 4180 : aclDumpId = createDumpId();
16483 :
16484 4180 : ArchiveEntry(fout, nilCatalogId, aclDumpId,
16485 4180 : ARCHIVE_OPTS(.tag = tagbuf->data,
16486 : .namespace = nspname,
16487 : .owner = owner,
16488 : .description = "ACL",
16489 : .section = SECTION_NONE,
16490 : .createStmt = sql->data,
16491 : .deps = aclDeps,
16492 : .nDeps = nDeps));
16493 :
16494 4180 : destroyPQExpBuffer(tagbuf);
16495 : }
16496 :
16497 56958 : destroyPQExpBuffer(sql);
16498 :
16499 56958 : return aclDumpId;
16500 : }
16501 :
16502 : /*
16503 : * dumpSecLabel
16504 : *
16505 : * This routine is used to dump any security labels associated with the
16506 : * object handed to this routine. The routine takes the object type
16507 : * and object name (ready to print, except for schema decoration), plus
16508 : * the namespace and owner of the object (for labeling the ArchiveEntry),
16509 : * plus catalog ID and subid which are the lookup key for pg_seclabel,
16510 : * plus the dump ID for the object (for setting a dependency).
16511 : * If a matching pg_seclabel entry is found, it is dumped.
16512 : *
16513 : * Note: although this routine takes a dumpId for dependency purposes,
16514 : * that purpose is just to mark the dependency in the emitted dump file
16515 : * for possible future use by pg_restore. We do NOT use it for determining
16516 : * ordering of the label in the dump file, because this routine is called
16517 : * after dependency sorting occurs. This routine should be called just after
16518 : * calling ArchiveEntry() for the specified object.
16519 : */
16520 : static void
16521 20 : dumpSecLabel(Archive *fout, const char *type, const char *name,
16522 : const char *namespace, const char *owner,
16523 : CatalogId catalogId, int subid, DumpId dumpId)
16524 : {
16525 20 : DumpOptions *dopt = fout->dopt;
16526 : SecLabelItem *labels;
16527 : int nlabels;
16528 : int i;
16529 : PQExpBuffer query;
16530 :
16531 : /* do nothing, if --no-security-labels is supplied */
16532 20 : if (dopt->no_security_labels)
16533 0 : return;
16534 :
16535 : /*
16536 : * Security labels are schema not data ... except large object labels are
16537 : * data
16538 : */
16539 20 : if (strcmp(type, "LARGE OBJECT") != 0)
16540 : {
16541 0 : if (!dopt->dumpSchema)
16542 0 : return;
16543 : }
16544 : else
16545 : {
16546 : /* We do dump large object security labels in binary-upgrade mode */
16547 20 : if (!dopt->dumpData && !dopt->binary_upgrade)
16548 0 : return;
16549 : }
16550 :
16551 : /* Search for security labels associated with catalogId, using table */
16552 20 : nlabels = findSecLabels(catalogId.tableoid, catalogId.oid, &labels);
16553 :
16554 20 : query = createPQExpBuffer();
16555 :
16556 30 : for (i = 0; i < nlabels; i++)
16557 : {
16558 : /*
16559 : * Ignore label entries for which the subid doesn't match.
16560 : */
16561 10 : if (labels[i].objsubid != subid)
16562 0 : continue;
16563 :
16564 10 : appendPQExpBuffer(query,
16565 : "SECURITY LABEL FOR %s ON %s ",
16566 10 : fmtId(labels[i].provider), type);
16567 10 : if (namespace && *namespace)
16568 0 : appendPQExpBuffer(query, "%s.", fmtId(namespace));
16569 10 : appendPQExpBuffer(query, "%s IS ", name);
16570 10 : appendStringLiteralAH(query, labels[i].label, fout);
16571 10 : appendPQExpBufferStr(query, ";\n");
16572 : }
16573 :
16574 20 : if (query->len > 0)
16575 : {
16576 10 : PQExpBuffer tag = createPQExpBuffer();
16577 :
16578 10 : appendPQExpBuffer(tag, "%s %s", type, name);
16579 10 : ArchiveEntry(fout, nilCatalogId, createDumpId(),
16580 10 : ARCHIVE_OPTS(.tag = tag->data,
16581 : .namespace = namespace,
16582 : .owner = owner,
16583 : .description = "SECURITY LABEL",
16584 : .section = SECTION_NONE,
16585 : .createStmt = query->data,
16586 : .deps = &dumpId,
16587 : .nDeps = 1));
16588 10 : destroyPQExpBuffer(tag);
16589 : }
16590 :
16591 20 : destroyPQExpBuffer(query);
16592 : }
16593 :
16594 : /*
16595 : * dumpTableSecLabel
16596 : *
16597 : * As above, but dump security label for both the specified table (or view)
16598 : * and its columns.
16599 : */
16600 : static void
16601 0 : dumpTableSecLabel(Archive *fout, const TableInfo *tbinfo, const char *reltypename)
16602 : {
16603 0 : DumpOptions *dopt = fout->dopt;
16604 : SecLabelItem *labels;
16605 : int nlabels;
16606 : int i;
16607 : PQExpBuffer query;
16608 : PQExpBuffer target;
16609 :
16610 : /* do nothing, if --no-security-labels is supplied */
16611 0 : if (dopt->no_security_labels)
16612 0 : return;
16613 :
16614 : /* SecLabel are SCHEMA not data */
16615 0 : if (!dopt->dumpSchema)
16616 0 : return;
16617 :
16618 : /* Search for comments associated with relation, using table */
16619 0 : nlabels = findSecLabels(tbinfo->dobj.catId.tableoid,
16620 0 : tbinfo->dobj.catId.oid,
16621 : &labels);
16622 :
16623 : /* If security labels exist, build SECURITY LABEL statements */
16624 0 : if (nlabels <= 0)
16625 0 : return;
16626 :
16627 0 : query = createPQExpBuffer();
16628 0 : target = createPQExpBuffer();
16629 :
16630 0 : for (i = 0; i < nlabels; i++)
16631 : {
16632 : const char *colname;
16633 0 : const char *provider = labels[i].provider;
16634 0 : const char *label = labels[i].label;
16635 0 : int objsubid = labels[i].objsubid;
16636 :
16637 0 : resetPQExpBuffer(target);
16638 0 : if (objsubid == 0)
16639 : {
16640 0 : appendPQExpBuffer(target, "%s %s", reltypename,
16641 0 : fmtQualifiedDumpable(tbinfo));
16642 : }
16643 : else
16644 : {
16645 0 : colname = getAttrName(objsubid, tbinfo);
16646 : /* first fmtXXX result must be consumed before calling again */
16647 0 : appendPQExpBuffer(target, "COLUMN %s",
16648 0 : fmtQualifiedDumpable(tbinfo));
16649 0 : appendPQExpBuffer(target, ".%s", fmtId(colname));
16650 : }
16651 0 : appendPQExpBuffer(query, "SECURITY LABEL FOR %s ON %s IS ",
16652 : fmtId(provider), target->data);
16653 0 : appendStringLiteralAH(query, label, fout);
16654 0 : appendPQExpBufferStr(query, ";\n");
16655 : }
16656 0 : if (query->len > 0)
16657 : {
16658 0 : resetPQExpBuffer(target);
16659 0 : appendPQExpBuffer(target, "%s %s", reltypename,
16660 0 : fmtId(tbinfo->dobj.name));
16661 0 : ArchiveEntry(fout, nilCatalogId, createDumpId(),
16662 0 : ARCHIVE_OPTS(.tag = target->data,
16663 : .namespace = tbinfo->dobj.namespace->dobj.name,
16664 : .owner = tbinfo->rolname,
16665 : .description = "SECURITY LABEL",
16666 : .section = SECTION_NONE,
16667 : .createStmt = query->data,
16668 : .deps = &(tbinfo->dobj.dumpId),
16669 : .nDeps = 1));
16670 : }
16671 0 : destroyPQExpBuffer(query);
16672 0 : destroyPQExpBuffer(target);
16673 : }
16674 :
16675 : /*
16676 : * findSecLabels
16677 : *
16678 : * Find the security label(s), if any, associated with the given object.
16679 : * All the objsubid values associated with the given classoid/objoid are
16680 : * found with one search.
16681 : */
16682 : static int
16683 20 : findSecLabels(Oid classoid, Oid objoid, SecLabelItem **items)
16684 : {
16685 20 : SecLabelItem *middle = NULL;
16686 : SecLabelItem *low;
16687 : SecLabelItem *high;
16688 : int nmatch;
16689 :
16690 20 : if (nseclabels <= 0) /* no labels, so no match is possible */
16691 : {
16692 0 : *items = NULL;
16693 0 : return 0;
16694 : }
16695 :
16696 : /*
16697 : * Do binary search to find some item matching the object.
16698 : */
16699 20 : low = &seclabels[0];
16700 20 : high = &seclabels[nseclabels - 1];
16701 30 : while (low <= high)
16702 : {
16703 20 : middle = low + (high - low) / 2;
16704 :
16705 20 : if (classoid < middle->classoid)
16706 0 : high = middle - 1;
16707 20 : else if (classoid > middle->classoid)
16708 0 : low = middle + 1;
16709 20 : else if (objoid < middle->objoid)
16710 10 : high = middle - 1;
16711 10 : else if (objoid > middle->objoid)
16712 0 : low = middle + 1;
16713 : else
16714 10 : break; /* found a match */
16715 : }
16716 :
16717 20 : if (low > high) /* no matches */
16718 : {
16719 10 : *items = NULL;
16720 10 : return 0;
16721 : }
16722 :
16723 : /*
16724 : * Now determine how many items match the object. The search loop
16725 : * invariant still holds: only items between low and high inclusive could
16726 : * match.
16727 : */
16728 10 : nmatch = 1;
16729 10 : while (middle > low)
16730 : {
16731 0 : if (classoid != middle[-1].classoid ||
16732 0 : objoid != middle[-1].objoid)
16733 : break;
16734 0 : middle--;
16735 0 : nmatch++;
16736 : }
16737 :
16738 10 : *items = middle;
16739 :
16740 10 : middle += nmatch;
16741 10 : while (middle <= high)
16742 : {
16743 0 : if (classoid != middle->classoid ||
16744 0 : objoid != middle->objoid)
16745 : break;
16746 0 : middle++;
16747 0 : nmatch++;
16748 : }
16749 :
16750 10 : return nmatch;
16751 : }
16752 :
16753 : /*
16754 : * collectSecLabels
16755 : *
16756 : * Construct a table of all security labels available for database objects;
16757 : * also set the has-seclabel component flag for each relevant object.
16758 : *
16759 : * The table is sorted by classoid/objid/objsubid for speed in lookup.
16760 : */
16761 : static void
16762 370 : collectSecLabels(Archive *fout)
16763 : {
16764 : PGresult *res;
16765 : PQExpBuffer query;
16766 : int i_label;
16767 : int i_provider;
16768 : int i_classoid;
16769 : int i_objoid;
16770 : int i_objsubid;
16771 : int ntups;
16772 : int i;
16773 : DumpableObject *dobj;
16774 :
16775 370 : query = createPQExpBuffer();
16776 :
16777 370 : appendPQExpBufferStr(query,
16778 : "SELECT label, provider, classoid, objoid, objsubid "
16779 : "FROM pg_catalog.pg_seclabels "
16780 : "ORDER BY classoid, objoid, objsubid");
16781 :
16782 370 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
16783 :
16784 : /* Construct lookup table containing OIDs in numeric form */
16785 370 : i_label = PQfnumber(res, "label");
16786 370 : i_provider = PQfnumber(res, "provider");
16787 370 : i_classoid = PQfnumber(res, "classoid");
16788 370 : i_objoid = PQfnumber(res, "objoid");
16789 370 : i_objsubid = PQfnumber(res, "objsubid");
16790 :
16791 370 : ntups = PQntuples(res);
16792 :
16793 370 : seclabels = (SecLabelItem *) pg_malloc(ntups * sizeof(SecLabelItem));
16794 370 : nseclabels = 0;
16795 370 : dobj = NULL;
16796 :
16797 380 : for (i = 0; i < ntups; i++)
16798 : {
16799 : CatalogId objId;
16800 : int subid;
16801 :
16802 10 : objId.tableoid = atooid(PQgetvalue(res, i, i_classoid));
16803 10 : objId.oid = atooid(PQgetvalue(res, i, i_objoid));
16804 10 : subid = atoi(PQgetvalue(res, i, i_objsubid));
16805 :
16806 : /* We needn't remember labels that don't match any dumpable object */
16807 10 : if (dobj == NULL ||
16808 0 : dobj->catId.tableoid != objId.tableoid ||
16809 0 : dobj->catId.oid != objId.oid)
16810 10 : dobj = findObjectByCatalogId(objId);
16811 10 : if (dobj == NULL)
16812 0 : continue;
16813 :
16814 : /*
16815 : * Labels on columns of composite types are linked to the type's
16816 : * pg_class entry, but we need to set the DUMP_COMPONENT_SECLABEL flag
16817 : * in the type's own DumpableObject.
16818 : */
16819 10 : if (subid != 0 && dobj->objType == DO_TABLE &&
16820 0 : ((TableInfo *) dobj)->relkind == RELKIND_COMPOSITE_TYPE)
16821 0 : {
16822 : TypeInfo *cTypeInfo;
16823 :
16824 0 : cTypeInfo = findTypeByOid(((TableInfo *) dobj)->reltype);
16825 0 : if (cTypeInfo)
16826 0 : cTypeInfo->dobj.components |= DUMP_COMPONENT_SECLABEL;
16827 : }
16828 : else
16829 10 : dobj->components |= DUMP_COMPONENT_SECLABEL;
16830 :
16831 10 : seclabels[nseclabels].label = pg_strdup(PQgetvalue(res, i, i_label));
16832 10 : seclabels[nseclabels].provider = pg_strdup(PQgetvalue(res, i, i_provider));
16833 10 : seclabels[nseclabels].classoid = objId.tableoid;
16834 10 : seclabels[nseclabels].objoid = objId.oid;
16835 10 : seclabels[nseclabels].objsubid = subid;
16836 10 : nseclabels++;
16837 : }
16838 :
16839 370 : PQclear(res);
16840 370 : destroyPQExpBuffer(query);
16841 370 : }
16842 :
16843 : /*
16844 : * dumpTable
16845 : * write out to fout the declarations (not data) of a user-defined table
16846 : */
16847 : static void
16848 62602 : dumpTable(Archive *fout, const TableInfo *tbinfo)
16849 : {
16850 62602 : DumpOptions *dopt = fout->dopt;
16851 62602 : DumpId tableAclDumpId = InvalidDumpId;
16852 : char *namecopy;
16853 :
16854 : /* Do nothing if not dumping schema */
16855 62602 : if (!dopt->dumpSchema)
16856 3052 : return;
16857 :
16858 59550 : if (tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
16859 : {
16860 13984 : if (tbinfo->relkind == RELKIND_SEQUENCE)
16861 786 : dumpSequence(fout, tbinfo);
16862 : else
16863 13198 : dumpTableSchema(fout, tbinfo);
16864 : }
16865 :
16866 : /* Handle the ACL here */
16867 59550 : namecopy = pg_strdup(fmtId(tbinfo->dobj.name));
16868 59550 : if (tbinfo->dobj.dump & DUMP_COMPONENT_ACL)
16869 : {
16870 47092 : const char *objtype =
16871 47092 : (tbinfo->relkind == RELKIND_SEQUENCE) ? "SEQUENCE" : "TABLE";
16872 :
16873 : tableAclDumpId =
16874 47092 : dumpACL(fout, tbinfo->dobj.dumpId, InvalidDumpId,
16875 : objtype, namecopy, NULL,
16876 47092 : tbinfo->dobj.namespace->dobj.name,
16877 47092 : NULL, tbinfo->rolname, &tbinfo->dacl);
16878 : }
16879 :
16880 : /*
16881 : * Handle column ACLs, if any. Note: we pull these with a separate query
16882 : * rather than trying to fetch them during getTableAttrs, so that we won't
16883 : * miss ACLs on system columns. Doing it this way also allows us to dump
16884 : * ACLs for catalogs that we didn't mark "interesting" back in getTables.
16885 : */
16886 59550 : if ((tbinfo->dobj.dump & DUMP_COMPONENT_ACL) && tbinfo->hascolumnACLs)
16887 : {
16888 596 : PQExpBuffer query = createPQExpBuffer();
16889 : PGresult *res;
16890 : int i;
16891 :
16892 596 : if (!fout->is_prepared[PREPQUERY_GETCOLUMNACLS])
16893 : {
16894 : /* Set up query for column ACLs */
16895 318 : appendPQExpBufferStr(query,
16896 : "PREPARE getColumnACLs(pg_catalog.oid) AS\n");
16897 :
16898 318 : if (fout->remoteVersion >= 90600)
16899 : {
16900 : /*
16901 : * In principle we should call acldefault('c', relowner) to
16902 : * get the default ACL for a column. However, we don't
16903 : * currently store the numeric OID of the relowner in
16904 : * TableInfo. We could convert the owner name using regrole,
16905 : * but that creates a risk of failure due to concurrent role
16906 : * renames. Given that the default ACL for columns is empty
16907 : * and is likely to stay that way, it's not worth extra cycles
16908 : * and risk to avoid hard-wiring that knowledge here.
16909 : */
16910 318 : appendPQExpBufferStr(query,
16911 : "SELECT at.attname, "
16912 : "at.attacl, "
16913 : "'{}' AS acldefault, "
16914 : "pip.privtype, pip.initprivs "
16915 : "FROM pg_catalog.pg_attribute at "
16916 : "LEFT JOIN pg_catalog.pg_init_privs pip ON "
16917 : "(at.attrelid = pip.objoid "
16918 : "AND pip.classoid = 'pg_catalog.pg_class'::pg_catalog.regclass "
16919 : "AND at.attnum = pip.objsubid) "
16920 : "WHERE at.attrelid = $1 AND "
16921 : "NOT at.attisdropped "
16922 : "AND (at.attacl IS NOT NULL OR pip.initprivs IS NOT NULL) "
16923 : "ORDER BY at.attnum");
16924 : }
16925 : else
16926 : {
16927 0 : appendPQExpBufferStr(query,
16928 : "SELECT attname, attacl, '{}' AS acldefault, "
16929 : "NULL AS privtype, NULL AS initprivs "
16930 : "FROM pg_catalog.pg_attribute "
16931 : "WHERE attrelid = $1 AND NOT attisdropped "
16932 : "AND attacl IS NOT NULL "
16933 : "ORDER BY attnum");
16934 : }
16935 :
16936 318 : ExecuteSqlStatement(fout, query->data);
16937 :
16938 318 : fout->is_prepared[PREPQUERY_GETCOLUMNACLS] = true;
16939 : }
16940 :
16941 596 : printfPQExpBuffer(query,
16942 : "EXECUTE getColumnACLs('%u')",
16943 596 : tbinfo->dobj.catId.oid);
16944 :
16945 596 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
16946 :
16947 9314 : for (i = 0; i < PQntuples(res); i++)
16948 : {
16949 8718 : char *attname = PQgetvalue(res, i, 0);
16950 8718 : char *attacl = PQgetvalue(res, i, 1);
16951 8718 : char *acldefault = PQgetvalue(res, i, 2);
16952 8718 : char privtype = *(PQgetvalue(res, i, 3));
16953 8718 : char *initprivs = PQgetvalue(res, i, 4);
16954 : DumpableAcl coldacl;
16955 : char *attnamecopy;
16956 :
16957 8718 : coldacl.acl = attacl;
16958 8718 : coldacl.acldefault = acldefault;
16959 8718 : coldacl.privtype = privtype;
16960 8718 : coldacl.initprivs = initprivs;
16961 8718 : attnamecopy = pg_strdup(fmtId(attname));
16962 :
16963 : /*
16964 : * Column's GRANT type is always TABLE. Each column ACL depends
16965 : * on the table-level ACL, since we can restore column ACLs in
16966 : * parallel but the table-level ACL has to be done first.
16967 : */
16968 8718 : dumpACL(fout, tbinfo->dobj.dumpId, tableAclDumpId,
16969 : "TABLE", namecopy, attnamecopy,
16970 8718 : tbinfo->dobj.namespace->dobj.name,
16971 8718 : NULL, tbinfo->rolname, &coldacl);
16972 8718 : free(attnamecopy);
16973 : }
16974 596 : PQclear(res);
16975 596 : destroyPQExpBuffer(query);
16976 : }
16977 :
16978 59550 : free(namecopy);
16979 : }
16980 :
16981 : /*
16982 : * Create the AS clause for a view or materialized view. The semicolon is
16983 : * stripped because a materialized view must add a WITH NO DATA clause.
16984 : *
16985 : * This returns a new buffer which must be freed by the caller.
16986 : */
16987 : static PQExpBuffer
16988 1918 : createViewAsClause(Archive *fout, const TableInfo *tbinfo)
16989 : {
16990 1918 : PQExpBuffer query = createPQExpBuffer();
16991 1918 : PQExpBuffer result = createPQExpBuffer();
16992 : PGresult *res;
16993 : int len;
16994 :
16995 : /* Fetch the view definition */
16996 1918 : appendPQExpBuffer(query,
16997 : "SELECT pg_catalog.pg_get_viewdef('%u'::pg_catalog.oid) AS viewdef",
16998 1918 : tbinfo->dobj.catId.oid);
16999 :
17000 1918 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
17001 :
17002 1918 : if (PQntuples(res) != 1)
17003 : {
17004 0 : if (PQntuples(res) < 1)
17005 0 : pg_fatal("query to obtain definition of view \"%s\" returned no data",
17006 : tbinfo->dobj.name);
17007 : else
17008 0 : pg_fatal("query to obtain definition of view \"%s\" returned more than one definition",
17009 : tbinfo->dobj.name);
17010 : }
17011 :
17012 1918 : len = PQgetlength(res, 0, 0);
17013 :
17014 1918 : if (len == 0)
17015 0 : pg_fatal("definition of view \"%s\" appears to be empty (length zero)",
17016 : tbinfo->dobj.name);
17017 :
17018 : /* Strip off the trailing semicolon so that other things may follow. */
17019 : Assert(PQgetvalue(res, 0, 0)[len - 1] == ';');
17020 1918 : appendBinaryPQExpBuffer(result, PQgetvalue(res, 0, 0), len - 1);
17021 :
17022 1918 : PQclear(res);
17023 1918 : destroyPQExpBuffer(query);
17024 :
17025 1918 : return result;
17026 : }
17027 :
17028 : /*
17029 : * Create a dummy AS clause for a view. This is used when the real view
17030 : * definition has to be postponed because of circular dependencies.
17031 : * We must duplicate the view's external properties -- column names and types
17032 : * (including collation) -- so that it works for subsequent references.
17033 : *
17034 : * This returns a new buffer which must be freed by the caller.
17035 : */
17036 : static PQExpBuffer
17037 40 : createDummyViewAsClause(Archive *fout, const TableInfo *tbinfo)
17038 : {
17039 40 : PQExpBuffer result = createPQExpBuffer();
17040 : int j;
17041 :
17042 40 : appendPQExpBufferStr(result, "SELECT");
17043 :
17044 80 : for (j = 0; j < tbinfo->numatts; j++)
17045 : {
17046 40 : if (j > 0)
17047 20 : appendPQExpBufferChar(result, ',');
17048 40 : appendPQExpBufferStr(result, "\n ");
17049 :
17050 40 : appendPQExpBuffer(result, "NULL::%s", tbinfo->atttypnames[j]);
17051 :
17052 : /*
17053 : * Must add collation if not default for the type, because CREATE OR
17054 : * REPLACE VIEW won't change it
17055 : */
17056 40 : if (OidIsValid(tbinfo->attcollation[j]))
17057 : {
17058 : CollInfo *coll;
17059 :
17060 0 : coll = findCollationByOid(tbinfo->attcollation[j]);
17061 0 : if (coll)
17062 0 : appendPQExpBuffer(result, " COLLATE %s",
17063 0 : fmtQualifiedDumpable(coll));
17064 : }
17065 :
17066 40 : appendPQExpBuffer(result, " AS %s", fmtId(tbinfo->attnames[j]));
17067 : }
17068 :
17069 40 : return result;
17070 : }
17071 :
17072 : /*
17073 : * dumpTableSchema
17074 : * write the declaration (not data) of one user-defined table or view
17075 : */
17076 : static void
17077 13198 : dumpTableSchema(Archive *fout, const TableInfo *tbinfo)
17078 : {
17079 13198 : DumpOptions *dopt = fout->dopt;
17080 13198 : PQExpBuffer q = createPQExpBuffer();
17081 13198 : PQExpBuffer delq = createPQExpBuffer();
17082 13198 : PQExpBuffer extra = createPQExpBuffer();
17083 : char *qrelname;
17084 : char *qualrelname;
17085 : int numParents;
17086 : TableInfo **parents;
17087 : int actual_atts; /* number of attrs in this CREATE statement */
17088 : const char *reltypename;
17089 : char *storage;
17090 : int j,
17091 : k;
17092 :
17093 : /* We had better have loaded per-column details about this table */
17094 : Assert(tbinfo->interesting);
17095 :
17096 13198 : qrelname = pg_strdup(fmtId(tbinfo->dobj.name));
17097 13198 : qualrelname = pg_strdup(fmtQualifiedDumpable(tbinfo));
17098 :
17099 13198 : if (tbinfo->hasoids)
17100 0 : pg_log_warning("WITH OIDS is not supported anymore (table \"%s\")",
17101 : qrelname);
17102 :
17103 13198 : if (dopt->binary_upgrade)
17104 1736 : binary_upgrade_set_type_oids_by_rel(fout, q, tbinfo);
17105 :
17106 : /* Is it a table or a view? */
17107 13198 : if (tbinfo->relkind == RELKIND_VIEW)
17108 : {
17109 : PQExpBuffer result;
17110 :
17111 : /*
17112 : * Note: keep this code in sync with the is_view case in dumpRule()
17113 : */
17114 :
17115 1078 : reltypename = "VIEW";
17116 :
17117 1078 : appendPQExpBuffer(delq, "DROP VIEW %s;\n", qualrelname);
17118 :
17119 1078 : if (dopt->binary_upgrade)
17120 104 : binary_upgrade_set_pg_class_oids(fout, q,
17121 104 : tbinfo->dobj.catId.oid);
17122 :
17123 1078 : appendPQExpBuffer(q, "CREATE VIEW %s", qualrelname);
17124 :
17125 1078 : if (tbinfo->dummy_view)
17126 20 : result = createDummyViewAsClause(fout, tbinfo);
17127 : else
17128 : {
17129 1058 : if (nonemptyReloptions(tbinfo->reloptions))
17130 : {
17131 134 : appendPQExpBufferStr(q, " WITH (");
17132 134 : appendReloptionsArrayAH(q, tbinfo->reloptions, "", fout);
17133 134 : appendPQExpBufferChar(q, ')');
17134 : }
17135 1058 : result = createViewAsClause(fout, tbinfo);
17136 : }
17137 1078 : appendPQExpBuffer(q, " AS\n%s", result->data);
17138 1078 : destroyPQExpBuffer(result);
17139 :
17140 1078 : if (tbinfo->checkoption != NULL && !tbinfo->dummy_view)
17141 76 : appendPQExpBuffer(q, "\n WITH %s CHECK OPTION", tbinfo->checkoption);
17142 1078 : appendPQExpBufferStr(q, ";\n");
17143 : }
17144 : else
17145 : {
17146 12120 : char *partkeydef = NULL;
17147 12120 : char *ftoptions = NULL;
17148 12120 : char *srvname = NULL;
17149 12120 : const char *foreign = "";
17150 :
17151 : /*
17152 : * Set reltypename, and collect any relkind-specific data that we
17153 : * didn't fetch during getTables().
17154 : */
17155 12120 : switch (tbinfo->relkind)
17156 : {
17157 1180 : case RELKIND_PARTITIONED_TABLE:
17158 : {
17159 1180 : PQExpBuffer query = createPQExpBuffer();
17160 : PGresult *res;
17161 :
17162 1180 : reltypename = "TABLE";
17163 :
17164 : /* retrieve partition key definition */
17165 1180 : appendPQExpBuffer(query,
17166 : "SELECT pg_get_partkeydef('%u')",
17167 1180 : tbinfo->dobj.catId.oid);
17168 1180 : res = ExecuteSqlQueryForSingleRow(fout, query->data);
17169 1180 : partkeydef = pg_strdup(PQgetvalue(res, 0, 0));
17170 1180 : PQclear(res);
17171 1180 : destroyPQExpBuffer(query);
17172 1180 : break;
17173 : }
17174 80 : case RELKIND_FOREIGN_TABLE:
17175 : {
17176 80 : PQExpBuffer query = createPQExpBuffer();
17177 : PGresult *res;
17178 : int i_srvname;
17179 : int i_ftoptions;
17180 :
17181 80 : reltypename = "FOREIGN TABLE";
17182 :
17183 : /* retrieve name of foreign server and generic options */
17184 80 : appendPQExpBuffer(query,
17185 : "SELECT fs.srvname, "
17186 : "pg_catalog.array_to_string(ARRAY("
17187 : "SELECT pg_catalog.quote_ident(option_name) || "
17188 : "' ' || pg_catalog.quote_literal(option_value) "
17189 : "FROM pg_catalog.pg_options_to_table(ftoptions) "
17190 : "ORDER BY option_name"
17191 : "), E',\n ') AS ftoptions "
17192 : "FROM pg_catalog.pg_foreign_table ft "
17193 : "JOIN pg_catalog.pg_foreign_server fs "
17194 : "ON (fs.oid = ft.ftserver) "
17195 : "WHERE ft.ftrelid = '%u'",
17196 80 : tbinfo->dobj.catId.oid);
17197 80 : res = ExecuteSqlQueryForSingleRow(fout, query->data);
17198 80 : i_srvname = PQfnumber(res, "srvname");
17199 80 : i_ftoptions = PQfnumber(res, "ftoptions");
17200 80 : srvname = pg_strdup(PQgetvalue(res, 0, i_srvname));
17201 80 : ftoptions = pg_strdup(PQgetvalue(res, 0, i_ftoptions));
17202 80 : PQclear(res);
17203 80 : destroyPQExpBuffer(query);
17204 :
17205 80 : foreign = "FOREIGN ";
17206 80 : break;
17207 : }
17208 840 : case RELKIND_MATVIEW:
17209 840 : reltypename = "MATERIALIZED VIEW";
17210 840 : break;
17211 10020 : default:
17212 10020 : reltypename = "TABLE";
17213 10020 : break;
17214 : }
17215 :
17216 12120 : numParents = tbinfo->numParents;
17217 12120 : parents = tbinfo->parents;
17218 :
17219 12120 : appendPQExpBuffer(delq, "DROP %s %s;\n", reltypename, qualrelname);
17220 :
17221 12120 : if (dopt->binary_upgrade)
17222 1632 : binary_upgrade_set_pg_class_oids(fout, q,
17223 1632 : tbinfo->dobj.catId.oid);
17224 :
17225 : /*
17226 : * PostgreSQL 18 has disabled UNLOGGED for partitioned tables, so
17227 : * ignore it when dumping if it was set in this case.
17228 : */
17229 12120 : appendPQExpBuffer(q, "CREATE %s%s %s",
17230 12120 : (tbinfo->relpersistence == RELPERSISTENCE_UNLOGGED &&
17231 40 : tbinfo->relkind != RELKIND_PARTITIONED_TABLE) ?
17232 : "UNLOGGED " : "",
17233 : reltypename,
17234 : qualrelname);
17235 :
17236 : /*
17237 : * Attach to type, if reloftype; except in case of a binary upgrade,
17238 : * we dump the table normally and attach it to the type afterward.
17239 : */
17240 12120 : if (OidIsValid(tbinfo->reloftype) && !dopt->binary_upgrade)
17241 48 : appendPQExpBuffer(q, " OF %s",
17242 48 : getFormattedTypeName(fout, tbinfo->reloftype,
17243 : zeroIsError));
17244 :
17245 12120 : if (tbinfo->relkind != RELKIND_MATVIEW)
17246 : {
17247 : /* Dump the attributes */
17248 11280 : actual_atts = 0;
17249 52036 : for (j = 0; j < tbinfo->numatts; j++)
17250 : {
17251 : /*
17252 : * Normally, dump if it's locally defined in this table, and
17253 : * not dropped. But for binary upgrade, we'll dump all the
17254 : * columns, and then fix up the dropped and nonlocal cases
17255 : * below.
17256 : */
17257 40756 : if (shouldPrintColumn(dopt, tbinfo, j))
17258 : {
17259 : bool print_default;
17260 : bool print_notnull;
17261 :
17262 : /*
17263 : * Default value --- suppress if to be printed separately
17264 : * or not at all.
17265 : */
17266 79634 : print_default = (tbinfo->attrdefs[j] != NULL &&
17267 40826 : tbinfo->attrdefs[j]->dobj.dump &&
17268 2124 : !tbinfo->attrdefs[j]->separate);
17269 :
17270 : /*
17271 : * Not Null constraint --- print it if it is locally
17272 : * defined, or if binary upgrade. (In the latter case, we
17273 : * reset conislocal below.)
17274 : */
17275 43394 : print_notnull = (tbinfo->notnull_constrs[j] != NULL &&
17276 4692 : (tbinfo->notnull_islocal[j] ||
17277 1322 : dopt->binary_upgrade ||
17278 1154 : tbinfo->ispartition));
17279 :
17280 : /*
17281 : * Skip column if fully defined by reloftype, except in
17282 : * binary upgrade
17283 : */
17284 38702 : if (OidIsValid(tbinfo->reloftype) &&
17285 100 : !print_default && !print_notnull &&
17286 60 : !dopt->binary_upgrade)
17287 48 : continue;
17288 :
17289 : /* Format properly if not first attr */
17290 38654 : if (actual_atts == 0)
17291 10564 : appendPQExpBufferStr(q, " (");
17292 : else
17293 28090 : appendPQExpBufferChar(q, ',');
17294 38654 : appendPQExpBufferStr(q, "\n ");
17295 38654 : actual_atts++;
17296 :
17297 : /* Attribute name */
17298 38654 : appendPQExpBufferStr(q, fmtId(tbinfo->attnames[j]));
17299 :
17300 38654 : if (tbinfo->attisdropped[j])
17301 : {
17302 : /*
17303 : * ALTER TABLE DROP COLUMN clears
17304 : * pg_attribute.atttypid, so we will not have gotten a
17305 : * valid type name; insert INTEGER as a stopgap. We'll
17306 : * clean things up later.
17307 : */
17308 168 : appendPQExpBufferStr(q, " INTEGER /* dummy */");
17309 : /* and skip to the next column */
17310 168 : continue;
17311 : }
17312 :
17313 : /*
17314 : * Attribute type; print it except when creating a typed
17315 : * table ('OF type_name'), but in binary-upgrade mode,
17316 : * print it in that case too.
17317 : */
17318 38486 : if (dopt->binary_upgrade || !OidIsValid(tbinfo->reloftype))
17319 : {
17320 38454 : appendPQExpBuffer(q, " %s",
17321 38454 : tbinfo->atttypnames[j]);
17322 : }
17323 :
17324 38486 : if (print_default)
17325 : {
17326 1840 : if (tbinfo->attgenerated[j] == ATTRIBUTE_GENERATED_STORED)
17327 590 : appendPQExpBuffer(q, " GENERATED ALWAYS AS (%s) STORED",
17328 590 : tbinfo->attrdefs[j]->adef_expr);
17329 1250 : else if (tbinfo->attgenerated[j] == ATTRIBUTE_GENERATED_VIRTUAL)
17330 458 : appendPQExpBuffer(q, " GENERATED ALWAYS AS (%s)",
17331 458 : tbinfo->attrdefs[j]->adef_expr);
17332 : else
17333 792 : appendPQExpBuffer(q, " DEFAULT %s",
17334 792 : tbinfo->attrdefs[j]->adef_expr);
17335 : }
17336 :
17337 38486 : if (print_notnull)
17338 : {
17339 4618 : if (tbinfo->notnull_constrs[j][0] == '\0')
17340 3242 : appendPQExpBufferStr(q, " NOT NULL");
17341 : else
17342 1376 : appendPQExpBuffer(q, " CONSTRAINT %s NOT NULL",
17343 1376 : fmtId(tbinfo->notnull_constrs[j]));
17344 :
17345 4618 : if (tbinfo->notnull_noinh[j])
17346 0 : appendPQExpBufferStr(q, " NO INHERIT");
17347 : }
17348 :
17349 : /* Add collation if not default for the type */
17350 38486 : if (OidIsValid(tbinfo->attcollation[j]))
17351 : {
17352 : CollInfo *coll;
17353 :
17354 394 : coll = findCollationByOid(tbinfo->attcollation[j]);
17355 394 : if (coll)
17356 394 : appendPQExpBuffer(q, " COLLATE %s",
17357 394 : fmtQualifiedDumpable(coll));
17358 : }
17359 : }
17360 :
17361 : /*
17362 : * On the other hand, if we choose not to print a column
17363 : * (likely because it is created by inheritance), but the
17364 : * column has a locally-defined not-null constraint, we need
17365 : * to dump the constraint as a standalone object.
17366 : *
17367 : * This syntax isn't SQL-conforming, but if you wanted
17368 : * standard output you wouldn't be creating non-standard
17369 : * objects to begin with.
17370 : */
17371 40540 : if (!shouldPrintColumn(dopt, tbinfo, j) &&
17372 2054 : !tbinfo->attisdropped[j] &&
17373 1316 : tbinfo->notnull_constrs[j] != NULL &&
17374 382 : tbinfo->notnull_islocal[j])
17375 : {
17376 : /* Format properly if not first attr */
17377 122 : if (actual_atts == 0)
17378 114 : appendPQExpBufferStr(q, " (");
17379 : else
17380 8 : appendPQExpBufferChar(q, ',');
17381 122 : appendPQExpBufferStr(q, "\n ");
17382 122 : actual_atts++;
17383 :
17384 122 : if (tbinfo->notnull_constrs[j][0] == '\0')
17385 8 : appendPQExpBuffer(q, "NOT NULL %s",
17386 8 : fmtId(tbinfo->attnames[j]));
17387 : else
17388 228 : appendPQExpBuffer(q, "CONSTRAINT %s NOT NULL %s",
17389 114 : tbinfo->notnull_constrs[j],
17390 114 : fmtId(tbinfo->attnames[j]));
17391 : }
17392 : }
17393 :
17394 : /*
17395 : * Add non-inherited CHECK constraints, if any.
17396 : *
17397 : * For partitions, we need to include check constraints even if
17398 : * they're not defined locally, because the ALTER TABLE ATTACH
17399 : * PARTITION that we'll emit later expects the constraint to be
17400 : * there. (No need to fix conislocal: ATTACH PARTITION does that)
17401 : */
17402 12566 : for (j = 0; j < tbinfo->ncheck; j++)
17403 : {
17404 1286 : ConstraintInfo *constr = &(tbinfo->checkexprs[j]);
17405 :
17406 1286 : if (constr->separate ||
17407 1146 : (!constr->conislocal && !tbinfo->ispartition))
17408 226 : continue;
17409 :
17410 1060 : if (actual_atts == 0)
17411 32 : appendPQExpBufferStr(q, " (\n ");
17412 : else
17413 1028 : appendPQExpBufferStr(q, ",\n ");
17414 :
17415 1060 : appendPQExpBuffer(q, "CONSTRAINT %s ",
17416 1060 : fmtId(constr->dobj.name));
17417 1060 : appendPQExpBufferStr(q, constr->condef);
17418 :
17419 1060 : actual_atts++;
17420 : }
17421 :
17422 11280 : if (actual_atts)
17423 10710 : appendPQExpBufferStr(q, "\n)");
17424 570 : else if (!(OidIsValid(tbinfo->reloftype) && !dopt->binary_upgrade))
17425 : {
17426 : /*
17427 : * No attributes? we must have a parenthesized attribute list,
17428 : * even though empty, when not using the OF TYPE syntax.
17429 : */
17430 546 : appendPQExpBufferStr(q, " (\n)");
17431 : }
17432 :
17433 : /*
17434 : * Emit the INHERITS clause (not for partitions), except in
17435 : * binary-upgrade mode.
17436 : */
17437 11280 : if (numParents > 0 && !tbinfo->ispartition &&
17438 1040 : !dopt->binary_upgrade)
17439 : {
17440 914 : appendPQExpBufferStr(q, "\nINHERITS (");
17441 1982 : for (k = 0; k < numParents; k++)
17442 : {
17443 1068 : TableInfo *parentRel = parents[k];
17444 :
17445 1068 : if (k > 0)
17446 154 : appendPQExpBufferStr(q, ", ");
17447 1068 : appendPQExpBufferStr(q, fmtQualifiedDumpable(parentRel));
17448 : }
17449 914 : appendPQExpBufferChar(q, ')');
17450 : }
17451 :
17452 11280 : if (tbinfo->relkind == RELKIND_PARTITIONED_TABLE)
17453 1180 : appendPQExpBuffer(q, "\nPARTITION BY %s", partkeydef);
17454 :
17455 11280 : if (tbinfo->relkind == RELKIND_FOREIGN_TABLE)
17456 80 : appendPQExpBuffer(q, "\nSERVER %s", fmtId(srvname));
17457 : }
17458 :
17459 23944 : if (nonemptyReloptions(tbinfo->reloptions) ||
17460 11824 : nonemptyReloptions(tbinfo->toast_reloptions))
17461 : {
17462 296 : bool addcomma = false;
17463 :
17464 296 : appendPQExpBufferStr(q, "\nWITH (");
17465 296 : if (nonemptyReloptions(tbinfo->reloptions))
17466 : {
17467 296 : addcomma = true;
17468 296 : appendReloptionsArrayAH(q, tbinfo->reloptions, "", fout);
17469 : }
17470 296 : if (nonemptyReloptions(tbinfo->toast_reloptions))
17471 : {
17472 10 : if (addcomma)
17473 10 : appendPQExpBufferStr(q, ", ");
17474 10 : appendReloptionsArrayAH(q, tbinfo->toast_reloptions, "toast.",
17475 : fout);
17476 : }
17477 296 : appendPQExpBufferChar(q, ')');
17478 : }
17479 :
17480 : /* Dump generic options if any */
17481 12120 : if (ftoptions && ftoptions[0])
17482 76 : appendPQExpBuffer(q, "\nOPTIONS (\n %s\n)", ftoptions);
17483 :
17484 : /*
17485 : * For materialized views, create the AS clause just like a view. At
17486 : * this point, we always mark the view as not populated.
17487 : */
17488 12120 : if (tbinfo->relkind == RELKIND_MATVIEW)
17489 : {
17490 : PQExpBuffer result;
17491 :
17492 840 : result = createViewAsClause(fout, tbinfo);
17493 840 : appendPQExpBuffer(q, " AS\n%s\n WITH NO DATA;\n",
17494 : result->data);
17495 840 : destroyPQExpBuffer(result);
17496 : }
17497 : else
17498 11280 : appendPQExpBufferStr(q, ";\n");
17499 :
17500 : /* Materialized views can depend on extensions */
17501 12120 : if (tbinfo->relkind == RELKIND_MATVIEW)
17502 840 : append_depends_on_extension(fout, q, &tbinfo->dobj,
17503 : "pg_catalog.pg_class",
17504 : "MATERIALIZED VIEW",
17505 : qualrelname);
17506 :
17507 : /*
17508 : * in binary upgrade mode, update the catalog with any missing values
17509 : * that might be present.
17510 : */
17511 12120 : if (dopt->binary_upgrade)
17512 : {
17513 7916 : for (j = 0; j < tbinfo->numatts; j++)
17514 : {
17515 6284 : if (tbinfo->attmissingval[j][0] != '\0')
17516 : {
17517 4 : appendPQExpBufferStr(q, "\n-- set missing value.\n");
17518 4 : appendPQExpBufferStr(q,
17519 : "SELECT pg_catalog.binary_upgrade_set_missing_value(");
17520 4 : appendStringLiteralAH(q, qualrelname, fout);
17521 4 : appendPQExpBufferStr(q, "::pg_catalog.regclass,");
17522 4 : appendStringLiteralAH(q, tbinfo->attnames[j], fout);
17523 4 : appendPQExpBufferChar(q, ',');
17524 4 : appendStringLiteralAH(q, tbinfo->attmissingval[j], fout);
17525 4 : appendPQExpBufferStr(q, ");\n\n");
17526 : }
17527 : }
17528 : }
17529 :
17530 : /*
17531 : * To create binary-compatible heap files, we have to ensure the same
17532 : * physical column order, including dropped columns, as in the
17533 : * original. Therefore, we create dropped columns above and drop them
17534 : * here, also updating their attlen/attalign values so that the
17535 : * dropped column can be skipped properly. (We do not bother with
17536 : * restoring the original attbyval setting.) Also, inheritance
17537 : * relationships are set up by doing ALTER TABLE INHERIT rather than
17538 : * using an INHERITS clause --- the latter would possibly mess up the
17539 : * column order. That also means we have to take care about setting
17540 : * attislocal correctly, plus fix up any inherited CHECK constraints.
17541 : * Analogously, we set up typed tables using ALTER TABLE / OF here.
17542 : *
17543 : * We process foreign and partitioned tables here, even though they
17544 : * lack heap storage, because they can participate in inheritance
17545 : * relationships and we want this stuff to be consistent across the
17546 : * inheritance tree. We can exclude indexes, toast tables, sequences
17547 : * and matviews, even though they have storage, because we don't
17548 : * support altering or dropping columns in them, nor can they be part
17549 : * of inheritance trees.
17550 : */
17551 12120 : if (dopt->binary_upgrade &&
17552 1632 : (tbinfo->relkind == RELKIND_RELATION ||
17553 220 : tbinfo->relkind == RELKIND_FOREIGN_TABLE ||
17554 218 : tbinfo->relkind == RELKIND_PARTITIONED_TABLE))
17555 : {
17556 : bool firstitem;
17557 : bool firstitem_extra;
17558 :
17559 : /*
17560 : * Drop any dropped columns. Merge the pg_attribute manipulations
17561 : * into a single SQL command, so that we don't cause repeated
17562 : * relcache flushes on the target table. Otherwise we risk O(N^2)
17563 : * relcache bloat while dropping N columns.
17564 : */
17565 1596 : resetPQExpBuffer(extra);
17566 1596 : firstitem = true;
17567 7836 : for (j = 0; j < tbinfo->numatts; j++)
17568 : {
17569 6240 : if (tbinfo->attisdropped[j])
17570 : {
17571 168 : if (firstitem)
17572 : {
17573 76 : appendPQExpBufferStr(q, "\n-- For binary upgrade, recreate dropped columns.\n"
17574 : "UPDATE pg_catalog.pg_attribute\n"
17575 : "SET attlen = v.dlen, "
17576 : "attalign = v.dalign, "
17577 : "attbyval = false\n"
17578 : "FROM (VALUES ");
17579 76 : firstitem = false;
17580 : }
17581 : else
17582 92 : appendPQExpBufferStr(q, ",\n ");
17583 168 : appendPQExpBufferChar(q, '(');
17584 168 : appendStringLiteralAH(q, tbinfo->attnames[j], fout);
17585 168 : appendPQExpBuffer(q, ", %d, '%c')",
17586 168 : tbinfo->attlen[j],
17587 168 : tbinfo->attalign[j]);
17588 : /* The ALTER ... DROP COLUMN commands must come after */
17589 168 : appendPQExpBuffer(extra, "ALTER %sTABLE ONLY %s ",
17590 : foreign, qualrelname);
17591 168 : appendPQExpBuffer(extra, "DROP COLUMN %s;\n",
17592 168 : fmtId(tbinfo->attnames[j]));
17593 : }
17594 : }
17595 1596 : if (!firstitem)
17596 : {
17597 76 : appendPQExpBufferStr(q, ") v(dname, dlen, dalign)\n"
17598 : "WHERE attrelid = ");
17599 76 : appendStringLiteralAH(q, qualrelname, fout);
17600 76 : appendPQExpBufferStr(q, "::pg_catalog.regclass\n"
17601 : " AND attname = v.dname;\n");
17602 : /* Now we can issue the actual DROP COLUMN commands */
17603 76 : appendBinaryPQExpBuffer(q, extra->data, extra->len);
17604 : }
17605 :
17606 : /*
17607 : * Fix up inherited columns. As above, do the pg_attribute
17608 : * manipulations in a single SQL command.
17609 : */
17610 1596 : firstitem = true;
17611 7836 : for (j = 0; j < tbinfo->numatts; j++)
17612 : {
17613 6240 : if (!tbinfo->attisdropped[j] &&
17614 6072 : !tbinfo->attislocal[j])
17615 : {
17616 1206 : if (firstitem)
17617 : {
17618 532 : appendPQExpBufferStr(q, "\n-- For binary upgrade, recreate inherited columns.\n");
17619 532 : appendPQExpBufferStr(q, "UPDATE pg_catalog.pg_attribute\n"
17620 : "SET attislocal = false\n"
17621 : "WHERE attrelid = ");
17622 532 : appendStringLiteralAH(q, qualrelname, fout);
17623 532 : appendPQExpBufferStr(q, "::pg_catalog.regclass\n"
17624 : " AND attname IN (");
17625 532 : firstitem = false;
17626 : }
17627 : else
17628 674 : appendPQExpBufferStr(q, ", ");
17629 1206 : appendStringLiteralAH(q, tbinfo->attnames[j], fout);
17630 : }
17631 : }
17632 1596 : if (!firstitem)
17633 532 : appendPQExpBufferStr(q, ");\n");
17634 :
17635 : /*
17636 : * Fix up not-null constraints that come from inheritance. As
17637 : * above, do the pg_constraint manipulations in a single SQL
17638 : * command. (Actually, two in special cases, if we're doing an
17639 : * upgrade from < 18).
17640 : */
17641 1596 : firstitem = true;
17642 1596 : firstitem_extra = true;
17643 1596 : resetPQExpBuffer(extra);
17644 7836 : for (j = 0; j < tbinfo->numatts; j++)
17645 : {
17646 : /*
17647 : * If a not-null constraint comes from inheritance, reset
17648 : * conislocal. The inhcount is fixed by ALTER TABLE INHERIT,
17649 : * below. Special hack: in versions < 18, columns with no
17650 : * local definition need their constraint to be matched by
17651 : * column number in conkeys instead of by constraint name,
17652 : * because the latter is not available. (We distinguish the
17653 : * case because the constraint name is the empty string.)
17654 : */
17655 6240 : if (tbinfo->notnull_constrs[j] != NULL &&
17656 580 : !tbinfo->notnull_islocal[j])
17657 : {
17658 168 : if (tbinfo->notnull_constrs[j][0] != '\0')
17659 : {
17660 142 : if (firstitem)
17661 : {
17662 122 : appendPQExpBufferStr(q, "UPDATE pg_catalog.pg_constraint\n"
17663 : "SET conislocal = false\n"
17664 : "WHERE contype = 'n' AND conrelid = ");
17665 122 : appendStringLiteralAH(q, qualrelname, fout);
17666 122 : appendPQExpBufferStr(q, "::pg_catalog.regclass AND\n"
17667 : "conname IN (");
17668 122 : firstitem = false;
17669 : }
17670 : else
17671 20 : appendPQExpBufferStr(q, ", ");
17672 142 : appendStringLiteralAH(q, tbinfo->notnull_constrs[j], fout);
17673 : }
17674 : else
17675 : {
17676 26 : if (firstitem_extra)
17677 : {
17678 26 : appendPQExpBufferStr(extra, "UPDATE pg_catalog.pg_constraint\n"
17679 : "SET conislocal = false\n"
17680 : "WHERE contype = 'n' AND conrelid = ");
17681 26 : appendStringLiteralAH(extra, qualrelname, fout);
17682 26 : appendPQExpBufferStr(extra, "::pg_catalog.regclass AND\n"
17683 : "conkey IN (");
17684 26 : firstitem_extra = false;
17685 : }
17686 : else
17687 0 : appendPQExpBufferStr(extra, ", ");
17688 26 : appendPQExpBuffer(extra, "'{%d}'", j + 1);
17689 : }
17690 : }
17691 : }
17692 1596 : if (!firstitem)
17693 122 : appendPQExpBufferStr(q, ");\n");
17694 1596 : if (!firstitem_extra)
17695 26 : appendPQExpBufferStr(extra, ");\n");
17696 :
17697 1596 : if (extra->len > 0)
17698 26 : appendBinaryPQExpBuffer(q, extra->data, extra->len);
17699 :
17700 : /*
17701 : * Add inherited CHECK constraints, if any.
17702 : *
17703 : * For partitions, they were already dumped, and conislocal
17704 : * doesn't need fixing.
17705 : *
17706 : * As above, issue only one direct manipulation of pg_constraint.
17707 : * Although it is tempting to merge the ALTER ADD CONSTRAINT
17708 : * commands into one as well, refrain for now due to concern about
17709 : * possible backend memory bloat if there are many such
17710 : * constraints.
17711 : */
17712 1596 : resetPQExpBuffer(extra);
17713 1596 : firstitem = true;
17714 1724 : for (k = 0; k < tbinfo->ncheck; k++)
17715 : {
17716 128 : ConstraintInfo *constr = &(tbinfo->checkexprs[k]);
17717 :
17718 128 : if (constr->separate || constr->conislocal || tbinfo->ispartition)
17719 124 : continue;
17720 :
17721 4 : if (firstitem)
17722 4 : appendPQExpBufferStr(q, "\n-- For binary upgrade, set up inherited constraints.\n");
17723 4 : appendPQExpBuffer(q, "ALTER %sTABLE ONLY %s ADD CONSTRAINT %s %s;\n",
17724 : foreign, qualrelname,
17725 4 : fmtId(constr->dobj.name),
17726 : constr->condef);
17727 : /* Update pg_constraint after all the ALTER TABLEs */
17728 4 : if (firstitem)
17729 : {
17730 4 : appendPQExpBufferStr(extra, "UPDATE pg_catalog.pg_constraint\n"
17731 : "SET conislocal = false\n"
17732 : "WHERE contype = 'c' AND conrelid = ");
17733 4 : appendStringLiteralAH(extra, qualrelname, fout);
17734 4 : appendPQExpBufferStr(extra, "::pg_catalog.regclass\n");
17735 4 : appendPQExpBufferStr(extra, " AND conname IN (");
17736 4 : firstitem = false;
17737 : }
17738 : else
17739 0 : appendPQExpBufferStr(extra, ", ");
17740 4 : appendStringLiteralAH(extra, constr->dobj.name, fout);
17741 : }
17742 1596 : if (!firstitem)
17743 : {
17744 4 : appendPQExpBufferStr(extra, ");\n");
17745 4 : appendBinaryPQExpBuffer(q, extra->data, extra->len);
17746 : }
17747 :
17748 1596 : if (numParents > 0 && !tbinfo->ispartition)
17749 : {
17750 126 : appendPQExpBufferStr(q, "\n-- For binary upgrade, set up inheritance this way.\n");
17751 274 : for (k = 0; k < numParents; k++)
17752 : {
17753 148 : TableInfo *parentRel = parents[k];
17754 :
17755 148 : appendPQExpBuffer(q, "ALTER %sTABLE ONLY %s INHERIT %s;\n", foreign,
17756 : qualrelname,
17757 148 : fmtQualifiedDumpable(parentRel));
17758 : }
17759 : }
17760 :
17761 1596 : if (OidIsValid(tbinfo->reloftype))
17762 : {
17763 12 : appendPQExpBufferStr(q, "\n-- For binary upgrade, set up typed tables this way.\n");
17764 12 : appendPQExpBuffer(q, "ALTER TABLE ONLY %s OF %s;\n",
17765 : qualrelname,
17766 12 : getFormattedTypeName(fout, tbinfo->reloftype,
17767 : zeroIsError));
17768 : }
17769 : }
17770 :
17771 : /*
17772 : * In binary_upgrade mode, arrange to restore the old relfrozenxid and
17773 : * relminmxid of all vacuumable relations. (While vacuum.c processes
17774 : * TOAST tables semi-independently, here we see them only as children
17775 : * of other relations; so this "if" lacks RELKIND_TOASTVALUE, and the
17776 : * child toast table is handled below.)
17777 : */
17778 12120 : if (dopt->binary_upgrade &&
17779 1632 : (tbinfo->relkind == RELKIND_RELATION ||
17780 220 : tbinfo->relkind == RELKIND_MATVIEW))
17781 : {
17782 1448 : appendPQExpBufferStr(q, "\n-- For binary upgrade, set heap's relfrozenxid and relminmxid\n");
17783 1448 : appendPQExpBuffer(q, "UPDATE pg_catalog.pg_class\n"
17784 : "SET relfrozenxid = '%u', relminmxid = '%u'\n"
17785 : "WHERE oid = ",
17786 1448 : tbinfo->frozenxid, tbinfo->minmxid);
17787 1448 : appendStringLiteralAH(q, qualrelname, fout);
17788 1448 : appendPQExpBufferStr(q, "::pg_catalog.regclass;\n");
17789 :
17790 1448 : if (tbinfo->toast_oid)
17791 : {
17792 : /*
17793 : * The toast table will have the same OID at restore, so we
17794 : * can safely target it by OID.
17795 : */
17796 560 : appendPQExpBufferStr(q, "\n-- For binary upgrade, set toast's relfrozenxid and relminmxid\n");
17797 560 : appendPQExpBuffer(q, "UPDATE pg_catalog.pg_class\n"
17798 : "SET relfrozenxid = '%u', relminmxid = '%u'\n"
17799 : "WHERE oid = '%u';\n",
17800 560 : tbinfo->toast_frozenxid,
17801 560 : tbinfo->toast_minmxid, tbinfo->toast_oid);
17802 : }
17803 : }
17804 :
17805 : /*
17806 : * In binary_upgrade mode, restore matviews' populated status by
17807 : * poking pg_class directly. This is pretty ugly, but we can't use
17808 : * REFRESH MATERIALIZED VIEW since it's possible that some underlying
17809 : * matview is not populated even though this matview is; in any case,
17810 : * we want to transfer the matview's heap storage, not run REFRESH.
17811 : */
17812 12120 : if (dopt->binary_upgrade && tbinfo->relkind == RELKIND_MATVIEW &&
17813 36 : tbinfo->relispopulated)
17814 : {
17815 32 : appendPQExpBufferStr(q, "\n-- For binary upgrade, mark materialized view as populated\n");
17816 32 : appendPQExpBufferStr(q, "UPDATE pg_catalog.pg_class\n"
17817 : "SET relispopulated = 't'\n"
17818 : "WHERE oid = ");
17819 32 : appendStringLiteralAH(q, qualrelname, fout);
17820 32 : appendPQExpBufferStr(q, "::pg_catalog.regclass;\n");
17821 : }
17822 :
17823 : /*
17824 : * Dump additional per-column properties that we can't handle in the
17825 : * main CREATE TABLE command.
17826 : */
17827 53888 : for (j = 0; j < tbinfo->numatts; j++)
17828 : {
17829 : /* None of this applies to dropped columns */
17830 41768 : if (tbinfo->attisdropped[j])
17831 906 : continue;
17832 :
17833 : /*
17834 : * Dump per-column statistics information. We only issue an ALTER
17835 : * TABLE statement if the attstattarget entry for this column is
17836 : * not the default value.
17837 : */
17838 40862 : if (tbinfo->attstattarget[j] >= 0)
17839 76 : appendPQExpBuffer(q, "ALTER %sTABLE ONLY %s ALTER COLUMN %s SET STATISTICS %d;\n",
17840 : foreign, qualrelname,
17841 76 : fmtId(tbinfo->attnames[j]),
17842 76 : tbinfo->attstattarget[j]);
17843 :
17844 : /*
17845 : * Dump per-column storage information. The statement is only
17846 : * dumped if the storage has been changed from the type's default.
17847 : */
17848 40862 : if (tbinfo->attstorage[j] != tbinfo->typstorage[j])
17849 : {
17850 182 : switch (tbinfo->attstorage[j])
17851 : {
17852 20 : case TYPSTORAGE_PLAIN:
17853 20 : storage = "PLAIN";
17854 20 : break;
17855 86 : case TYPSTORAGE_EXTERNAL:
17856 86 : storage = "EXTERNAL";
17857 86 : break;
17858 0 : case TYPSTORAGE_EXTENDED:
17859 0 : storage = "EXTENDED";
17860 0 : break;
17861 76 : case TYPSTORAGE_MAIN:
17862 76 : storage = "MAIN";
17863 76 : break;
17864 0 : default:
17865 0 : storage = NULL;
17866 : }
17867 :
17868 : /*
17869 : * Only dump the statement if it's a storage type we recognize
17870 : */
17871 182 : if (storage != NULL)
17872 182 : appendPQExpBuffer(q, "ALTER %sTABLE ONLY %s ALTER COLUMN %s SET STORAGE %s;\n",
17873 : foreign, qualrelname,
17874 182 : fmtId(tbinfo->attnames[j]),
17875 : storage);
17876 : }
17877 :
17878 : /*
17879 : * Dump per-column compression, if it's been set.
17880 : */
17881 40862 : if (!dopt->no_toast_compression)
17882 : {
17883 : const char *cmname;
17884 :
17885 40666 : switch (tbinfo->attcompression[j])
17886 : {
17887 154 : case 'p':
17888 154 : cmname = "pglz";
17889 154 : break;
17890 198 : case 'l':
17891 198 : cmname = "lz4";
17892 198 : break;
17893 40314 : default:
17894 40314 : cmname = NULL;
17895 40314 : break;
17896 : }
17897 :
17898 40666 : if (cmname != NULL)
17899 352 : appendPQExpBuffer(q, "ALTER %sTABLE ONLY %s ALTER COLUMN %s SET COMPRESSION %s;\n",
17900 : foreign, qualrelname,
17901 352 : fmtId(tbinfo->attnames[j]),
17902 : cmname);
17903 : }
17904 :
17905 : /*
17906 : * Dump per-column attributes.
17907 : */
17908 40862 : if (tbinfo->attoptions[j][0] != '\0')
17909 76 : appendPQExpBuffer(q, "ALTER %sTABLE ONLY %s ALTER COLUMN %s SET (%s);\n",
17910 : foreign, qualrelname,
17911 76 : fmtId(tbinfo->attnames[j]),
17912 76 : tbinfo->attoptions[j]);
17913 :
17914 : /*
17915 : * Dump per-column fdw options.
17916 : */
17917 40862 : if (tbinfo->relkind == RELKIND_FOREIGN_TABLE &&
17918 80 : tbinfo->attfdwoptions[j][0] != '\0')
17919 76 : appendPQExpBuffer(q,
17920 : "ALTER FOREIGN TABLE ONLY %s ALTER COLUMN %s OPTIONS (\n"
17921 : " %s\n"
17922 : ");\n",
17923 : qualrelname,
17924 76 : fmtId(tbinfo->attnames[j]),
17925 76 : tbinfo->attfdwoptions[j]);
17926 : } /* end loop over columns */
17927 :
17928 12120 : free(partkeydef);
17929 12120 : free(ftoptions);
17930 12120 : free(srvname);
17931 : }
17932 :
17933 : /*
17934 : * dump properties we only have ALTER TABLE syntax for
17935 : */
17936 13198 : if ((tbinfo->relkind == RELKIND_RELATION ||
17937 3178 : tbinfo->relkind == RELKIND_PARTITIONED_TABLE ||
17938 1998 : tbinfo->relkind == RELKIND_MATVIEW) &&
17939 12040 : tbinfo->relreplident != REPLICA_IDENTITY_DEFAULT)
17940 : {
17941 384 : if (tbinfo->relreplident == REPLICA_IDENTITY_INDEX)
17942 : {
17943 : /* nothing to do, will be set when the index is dumped */
17944 : }
17945 384 : else if (tbinfo->relreplident == REPLICA_IDENTITY_NOTHING)
17946 : {
17947 384 : appendPQExpBuffer(q, "\nALTER TABLE ONLY %s REPLICA IDENTITY NOTHING;\n",
17948 : qualrelname);
17949 : }
17950 0 : else if (tbinfo->relreplident == REPLICA_IDENTITY_FULL)
17951 : {
17952 0 : appendPQExpBuffer(q, "\nALTER TABLE ONLY %s REPLICA IDENTITY FULL;\n",
17953 : qualrelname);
17954 : }
17955 : }
17956 :
17957 13198 : if (tbinfo->forcerowsec)
17958 10 : appendPQExpBuffer(q, "\nALTER TABLE ONLY %s FORCE ROW LEVEL SECURITY;\n",
17959 : qualrelname);
17960 :
17961 13198 : if (dopt->binary_upgrade)
17962 1736 : binary_upgrade_extension_member(q, &tbinfo->dobj,
17963 : reltypename, qrelname,
17964 1736 : tbinfo->dobj.namespace->dobj.name);
17965 :
17966 13198 : if (tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
17967 : {
17968 13198 : char *tablespace = NULL;
17969 13198 : char *tableam = NULL;
17970 :
17971 : /*
17972 : * _selectTablespace() relies on tablespace-enabled objects in the
17973 : * default tablespace to have a tablespace of "" (empty string) versus
17974 : * non-tablespace-enabled objects to have a tablespace of NULL.
17975 : * getTables() sets tbinfo->reltablespace to "" for the default
17976 : * tablespace (not NULL).
17977 : */
17978 13198 : if (RELKIND_HAS_TABLESPACE(tbinfo->relkind))
17979 12040 : tablespace = tbinfo->reltablespace;
17980 :
17981 13198 : if (RELKIND_HAS_TABLE_AM(tbinfo->relkind) ||
17982 2338 : tbinfo->relkind == RELKIND_PARTITIONED_TABLE)
17983 12040 : tableam = tbinfo->amname;
17984 :
17985 13198 : ArchiveEntry(fout, tbinfo->dobj.catId, tbinfo->dobj.dumpId,
17986 13198 : ARCHIVE_OPTS(.tag = tbinfo->dobj.name,
17987 : .namespace = tbinfo->dobj.namespace->dobj.name,
17988 : .tablespace = tablespace,
17989 : .tableam = tableam,
17990 : .relkind = tbinfo->relkind,
17991 : .owner = tbinfo->rolname,
17992 : .description = reltypename,
17993 : .section = tbinfo->postponed_def ?
17994 : SECTION_POST_DATA : SECTION_PRE_DATA,
17995 : .createStmt = q->data,
17996 : .dropStmt = delq->data));
17997 : }
17998 :
17999 : /* Dump Table Comments */
18000 13198 : if (tbinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
18001 172 : dumpTableComment(fout, tbinfo, reltypename);
18002 :
18003 : /* Dump Table Security Labels */
18004 13198 : if (tbinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
18005 0 : dumpTableSecLabel(fout, tbinfo, reltypename);
18006 :
18007 : /*
18008 : * Dump comments for not-null constraints that aren't to be dumped
18009 : * separately (those are processed by collectComments/dumpComment).
18010 : */
18011 13198 : if (!fout->dopt->no_comments && dopt->dumpSchema &&
18012 13198 : fout->remoteVersion >= 180000)
18013 : {
18014 13198 : PQExpBuffer comment = NULL;
18015 13198 : PQExpBuffer tag = NULL;
18016 :
18017 61776 : for (j = 0; j < tbinfo->numatts; j++)
18018 : {
18019 48578 : if (tbinfo->notnull_constrs[j] != NULL &&
18020 5074 : tbinfo->notnull_comment[j] != NULL)
18021 : {
18022 96 : if (comment == NULL)
18023 : {
18024 96 : comment = createPQExpBuffer();
18025 96 : tag = createPQExpBuffer();
18026 : }
18027 : else
18028 : {
18029 0 : resetPQExpBuffer(comment);
18030 0 : resetPQExpBuffer(tag);
18031 : }
18032 :
18033 96 : appendPQExpBuffer(comment, "COMMENT ON CONSTRAINT %s ON %s IS ",
18034 96 : fmtId(tbinfo->notnull_constrs[j]), qualrelname);
18035 96 : appendStringLiteralAH(comment, tbinfo->notnull_comment[j], fout);
18036 96 : appendPQExpBufferStr(comment, ";\n");
18037 :
18038 96 : appendPQExpBuffer(tag, "CONSTRAINT %s ON %s",
18039 96 : fmtId(tbinfo->notnull_constrs[j]), qrelname);
18040 :
18041 96 : ArchiveEntry(fout, nilCatalogId, createDumpId(),
18042 96 : ARCHIVE_OPTS(.tag = tag->data,
18043 : .namespace = tbinfo->dobj.namespace->dobj.name,
18044 : .owner = tbinfo->rolname,
18045 : .description = "COMMENT",
18046 : .section = SECTION_NONE,
18047 : .createStmt = comment->data,
18048 : .deps = &(tbinfo->dobj.dumpId),
18049 : .nDeps = 1));
18050 : }
18051 : }
18052 :
18053 13198 : destroyPQExpBuffer(comment);
18054 13198 : destroyPQExpBuffer(tag);
18055 : }
18056 :
18057 : /* Dump comments on inlined table constraints */
18058 14484 : for (j = 0; j < tbinfo->ncheck; j++)
18059 : {
18060 1286 : ConstraintInfo *constr = &(tbinfo->checkexprs[j]);
18061 :
18062 1286 : if (constr->separate || !constr->conislocal)
18063 548 : continue;
18064 :
18065 738 : if (constr->dobj.dump & DUMP_COMPONENT_COMMENT)
18066 86 : dumpTableConstraintComment(fout, constr);
18067 : }
18068 :
18069 13198 : destroyPQExpBuffer(q);
18070 13198 : destroyPQExpBuffer(delq);
18071 13198 : destroyPQExpBuffer(extra);
18072 13198 : free(qrelname);
18073 13198 : free(qualrelname);
18074 13198 : }
18075 :
18076 : /*
18077 : * dumpTableAttach
18078 : * write to fout the commands to attach a child partition
18079 : *
18080 : * Child partitions are always made by creating them separately
18081 : * and then using ATTACH PARTITION, rather than using
18082 : * CREATE TABLE ... PARTITION OF. This is important for preserving
18083 : * any possible discrepancy in column layout, to allow assigning the
18084 : * correct tablespace if different, and so that it's possible to restore
18085 : * a partition without restoring its parent. (You'll get an error from
18086 : * the ATTACH PARTITION command, but that can be ignored, or skipped
18087 : * using "pg_restore -L" if you prefer.) The last point motivates
18088 : * treating ATTACH PARTITION as a completely separate ArchiveEntry
18089 : * rather than emitting it within the child partition's ArchiveEntry.
18090 : */
18091 : static void
18092 2870 : dumpTableAttach(Archive *fout, const TableAttachInfo *attachinfo)
18093 : {
18094 2870 : DumpOptions *dopt = fout->dopt;
18095 : PQExpBuffer q;
18096 : PGresult *res;
18097 : char *partbound;
18098 :
18099 : /* Do nothing if not dumping schema */
18100 2870 : if (!dopt->dumpSchema)
18101 108 : return;
18102 :
18103 2762 : q = createPQExpBuffer();
18104 :
18105 2762 : if (!fout->is_prepared[PREPQUERY_DUMPTABLEATTACH])
18106 : {
18107 : /* Set up query for partbound details */
18108 98 : appendPQExpBufferStr(q,
18109 : "PREPARE dumpTableAttach(pg_catalog.oid) AS\n");
18110 :
18111 98 : appendPQExpBufferStr(q,
18112 : "SELECT pg_get_expr(c.relpartbound, c.oid) "
18113 : "FROM pg_class c "
18114 : "WHERE c.oid = $1");
18115 :
18116 98 : ExecuteSqlStatement(fout, q->data);
18117 :
18118 98 : fout->is_prepared[PREPQUERY_DUMPTABLEATTACH] = true;
18119 : }
18120 :
18121 2762 : printfPQExpBuffer(q,
18122 : "EXECUTE dumpTableAttach('%u')",
18123 2762 : attachinfo->partitionTbl->dobj.catId.oid);
18124 :
18125 2762 : res = ExecuteSqlQueryForSingleRow(fout, q->data);
18126 2762 : partbound = PQgetvalue(res, 0, 0);
18127 :
18128 : /* Perform ALTER TABLE on the parent */
18129 2762 : printfPQExpBuffer(q,
18130 : "ALTER TABLE ONLY %s ",
18131 2762 : fmtQualifiedDumpable(attachinfo->parentTbl));
18132 2762 : appendPQExpBuffer(q,
18133 : "ATTACH PARTITION %s %s;\n",
18134 2762 : fmtQualifiedDumpable(attachinfo->partitionTbl),
18135 : partbound);
18136 :
18137 : /*
18138 : * There is no point in creating a drop query as the drop is done by table
18139 : * drop. (If you think to change this, see also _printTocEntry().)
18140 : * Although this object doesn't really have ownership as such, set the
18141 : * owner field anyway to ensure that the command is run by the correct
18142 : * role at restore time.
18143 : */
18144 2762 : ArchiveEntry(fout, attachinfo->dobj.catId, attachinfo->dobj.dumpId,
18145 2762 : ARCHIVE_OPTS(.tag = attachinfo->dobj.name,
18146 : .namespace = attachinfo->dobj.namespace->dobj.name,
18147 : .owner = attachinfo->partitionTbl->rolname,
18148 : .description = "TABLE ATTACH",
18149 : .section = SECTION_PRE_DATA,
18150 : .createStmt = q->data));
18151 :
18152 2762 : PQclear(res);
18153 2762 : destroyPQExpBuffer(q);
18154 : }
18155 :
18156 : /*
18157 : * dumpAttrDef --- dump an attribute's default-value declaration
18158 : */
18159 : static void
18160 2208 : dumpAttrDef(Archive *fout, const AttrDefInfo *adinfo)
18161 : {
18162 2208 : DumpOptions *dopt = fout->dopt;
18163 2208 : TableInfo *tbinfo = adinfo->adtable;
18164 2208 : int adnum = adinfo->adnum;
18165 : PQExpBuffer q;
18166 : PQExpBuffer delq;
18167 : char *qualrelname;
18168 : char *tag;
18169 : char *foreign;
18170 :
18171 : /* Do nothing if not dumping schema */
18172 2208 : if (!dopt->dumpSchema)
18173 0 : return;
18174 :
18175 : /* Skip if not "separate"; it was dumped in the table's definition */
18176 2208 : if (!adinfo->separate)
18177 1840 : return;
18178 :
18179 368 : q = createPQExpBuffer();
18180 368 : delq = createPQExpBuffer();
18181 :
18182 368 : qualrelname = pg_strdup(fmtQualifiedDumpable(tbinfo));
18183 :
18184 368 : foreign = tbinfo->relkind == RELKIND_FOREIGN_TABLE ? "FOREIGN " : "";
18185 :
18186 368 : appendPQExpBuffer(q,
18187 : "ALTER %sTABLE ONLY %s ALTER COLUMN %s SET DEFAULT %s;\n",
18188 368 : foreign, qualrelname, fmtId(tbinfo->attnames[adnum - 1]),
18189 368 : adinfo->adef_expr);
18190 :
18191 368 : appendPQExpBuffer(delq, "ALTER %sTABLE %s ALTER COLUMN %s DROP DEFAULT;\n",
18192 : foreign, qualrelname,
18193 368 : fmtId(tbinfo->attnames[adnum - 1]));
18194 :
18195 368 : tag = psprintf("%s %s", tbinfo->dobj.name, tbinfo->attnames[adnum - 1]);
18196 :
18197 368 : if (adinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
18198 368 : ArchiveEntry(fout, adinfo->dobj.catId, adinfo->dobj.dumpId,
18199 368 : ARCHIVE_OPTS(.tag = tag,
18200 : .namespace = tbinfo->dobj.namespace->dobj.name,
18201 : .owner = tbinfo->rolname,
18202 : .description = "DEFAULT",
18203 : .section = SECTION_PRE_DATA,
18204 : .createStmt = q->data,
18205 : .dropStmt = delq->data));
18206 :
18207 368 : free(tag);
18208 368 : destroyPQExpBuffer(q);
18209 368 : destroyPQExpBuffer(delq);
18210 368 : free(qualrelname);
18211 : }
18212 :
18213 : /*
18214 : * getAttrName: extract the correct name for an attribute
18215 : *
18216 : * The array tblInfo->attnames[] only provides names of user attributes;
18217 : * if a system attribute number is supplied, we have to fake it.
18218 : * We also do a little bit of bounds checking for safety's sake.
18219 : */
18220 : static const char *
18221 4306 : getAttrName(int attrnum, const TableInfo *tblInfo)
18222 : {
18223 4306 : if (attrnum > 0 && attrnum <= tblInfo->numatts)
18224 4306 : return tblInfo->attnames[attrnum - 1];
18225 0 : switch (attrnum)
18226 : {
18227 0 : case SelfItemPointerAttributeNumber:
18228 0 : return "ctid";
18229 0 : case MinTransactionIdAttributeNumber:
18230 0 : return "xmin";
18231 0 : case MinCommandIdAttributeNumber:
18232 0 : return "cmin";
18233 0 : case MaxTransactionIdAttributeNumber:
18234 0 : return "xmax";
18235 0 : case MaxCommandIdAttributeNumber:
18236 0 : return "cmax";
18237 0 : case TableOidAttributeNumber:
18238 0 : return "tableoid";
18239 : }
18240 0 : pg_fatal("invalid column number %d for table \"%s\"",
18241 : attrnum, tblInfo->dobj.name);
18242 : return NULL; /* keep compiler quiet */
18243 : }
18244 :
18245 : /*
18246 : * dumpIndex
18247 : * write out to fout a user-defined index
18248 : */
18249 : static void
18250 5376 : dumpIndex(Archive *fout, const IndxInfo *indxinfo)
18251 : {
18252 5376 : DumpOptions *dopt = fout->dopt;
18253 5376 : TableInfo *tbinfo = indxinfo->indextable;
18254 5376 : bool is_constraint = (indxinfo->indexconstraint != 0);
18255 : PQExpBuffer q;
18256 : PQExpBuffer delq;
18257 : char *qindxname;
18258 : char *qqindxname;
18259 :
18260 : /* Do nothing if not dumping schema */
18261 5376 : if (!dopt->dumpSchema)
18262 234 : return;
18263 :
18264 5142 : q = createPQExpBuffer();
18265 5142 : delq = createPQExpBuffer();
18266 :
18267 5142 : qindxname = pg_strdup(fmtId(indxinfo->dobj.name));
18268 5142 : qqindxname = pg_strdup(fmtQualifiedDumpable(indxinfo));
18269 :
18270 : /*
18271 : * If there's an associated constraint, don't dump the index per se, but
18272 : * do dump any comment for it. (This is safe because dependency ordering
18273 : * will have ensured the constraint is emitted first.) Note that the
18274 : * emitted comment has to be shown as depending on the constraint, not the
18275 : * index, in such cases.
18276 : */
18277 5142 : if (!is_constraint)
18278 : {
18279 2154 : char *indstatcols = indxinfo->indstatcols;
18280 2154 : char *indstatvals = indxinfo->indstatvals;
18281 2154 : char **indstatcolsarray = NULL;
18282 2154 : char **indstatvalsarray = NULL;
18283 2154 : int nstatcols = 0;
18284 2154 : int nstatvals = 0;
18285 :
18286 2154 : if (dopt->binary_upgrade)
18287 312 : binary_upgrade_set_pg_class_oids(fout, q,
18288 312 : indxinfo->dobj.catId.oid);
18289 :
18290 : /* Plain secondary index */
18291 2154 : appendPQExpBuffer(q, "%s;\n", indxinfo->indexdef);
18292 :
18293 : /*
18294 : * Append ALTER TABLE commands as needed to set properties that we
18295 : * only have ALTER TABLE syntax for. Keep this in sync with the
18296 : * similar code in dumpConstraint!
18297 : */
18298 :
18299 : /* If the index is clustered, we need to record that. */
18300 2154 : if (indxinfo->indisclustered)
18301 : {
18302 0 : appendPQExpBuffer(q, "\nALTER TABLE %s CLUSTER",
18303 0 : fmtQualifiedDumpable(tbinfo));
18304 : /* index name is not qualified in this syntax */
18305 0 : appendPQExpBuffer(q, " ON %s;\n",
18306 : qindxname);
18307 : }
18308 :
18309 : /*
18310 : * If the index has any statistics on some of its columns, generate
18311 : * the associated ALTER INDEX queries.
18312 : */
18313 2154 : if (strlen(indstatcols) != 0 || strlen(indstatvals) != 0)
18314 : {
18315 : int j;
18316 :
18317 76 : if (!parsePGArray(indstatcols, &indstatcolsarray, &nstatcols))
18318 0 : pg_fatal("could not parse index statistic columns");
18319 76 : if (!parsePGArray(indstatvals, &indstatvalsarray, &nstatvals))
18320 0 : pg_fatal("could not parse index statistic values");
18321 76 : if (nstatcols != nstatvals)
18322 0 : pg_fatal("mismatched number of columns and values for index statistics");
18323 :
18324 228 : for (j = 0; j < nstatcols; j++)
18325 : {
18326 152 : appendPQExpBuffer(q, "ALTER INDEX %s ", qqindxname);
18327 :
18328 : /*
18329 : * Note that this is a column number, so no quotes should be
18330 : * used.
18331 : */
18332 152 : appendPQExpBuffer(q, "ALTER COLUMN %s ",
18333 152 : indstatcolsarray[j]);
18334 152 : appendPQExpBuffer(q, "SET STATISTICS %s;\n",
18335 152 : indstatvalsarray[j]);
18336 : }
18337 : }
18338 :
18339 : /* Indexes can depend on extensions */
18340 2154 : append_depends_on_extension(fout, q, &indxinfo->dobj,
18341 : "pg_catalog.pg_class",
18342 : "INDEX", qqindxname);
18343 :
18344 : /* If the index defines identity, we need to record that. */
18345 2154 : if (indxinfo->indisreplident)
18346 : {
18347 0 : appendPQExpBuffer(q, "\nALTER TABLE ONLY %s REPLICA IDENTITY USING",
18348 0 : fmtQualifiedDumpable(tbinfo));
18349 : /* index name is not qualified in this syntax */
18350 0 : appendPQExpBuffer(q, " INDEX %s;\n",
18351 : qindxname);
18352 : }
18353 :
18354 : /*
18355 : * If this index is a member of a partitioned index, the backend will
18356 : * not allow us to drop it separately, so don't try. It will go away
18357 : * automatically when we drop either the index's table or the
18358 : * partitioned index. (If, in a selective restore with --clean, we
18359 : * drop neither of those, then this index will not be dropped either.
18360 : * But that's fine, and even if you think it's not, the backend won't
18361 : * let us do differently.)
18362 : */
18363 2154 : if (indxinfo->parentidx == 0)
18364 1742 : appendPQExpBuffer(delq, "DROP INDEX %s;\n", qqindxname);
18365 :
18366 2154 : if (indxinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
18367 2154 : ArchiveEntry(fout, indxinfo->dobj.catId, indxinfo->dobj.dumpId,
18368 2154 : ARCHIVE_OPTS(.tag = indxinfo->dobj.name,
18369 : .namespace = tbinfo->dobj.namespace->dobj.name,
18370 : .tablespace = indxinfo->tablespace,
18371 : .owner = tbinfo->rolname,
18372 : .description = "INDEX",
18373 : .section = SECTION_POST_DATA,
18374 : .createStmt = q->data,
18375 : .dropStmt = delq->data));
18376 :
18377 2154 : free(indstatcolsarray);
18378 2154 : free(indstatvalsarray);
18379 : }
18380 :
18381 : /* Dump Index Comments */
18382 5142 : if (indxinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
18383 30 : dumpComment(fout, "INDEX", qindxname,
18384 30 : tbinfo->dobj.namespace->dobj.name,
18385 : tbinfo->rolname,
18386 : indxinfo->dobj.catId, 0,
18387 : is_constraint ? indxinfo->indexconstraint :
18388 : indxinfo->dobj.dumpId);
18389 :
18390 5142 : destroyPQExpBuffer(q);
18391 5142 : destroyPQExpBuffer(delq);
18392 5142 : free(qindxname);
18393 5142 : free(qqindxname);
18394 : }
18395 :
18396 : /*
18397 : * dumpIndexAttach
18398 : * write out to fout a partitioned-index attachment clause
18399 : */
18400 : static void
18401 1224 : dumpIndexAttach(Archive *fout, const IndexAttachInfo *attachinfo)
18402 : {
18403 : /* Do nothing if not dumping schema */
18404 1224 : if (!fout->dopt->dumpSchema)
18405 96 : return;
18406 :
18407 1128 : if (attachinfo->partitionIdx->dobj.dump & DUMP_COMPONENT_DEFINITION)
18408 : {
18409 1128 : PQExpBuffer q = createPQExpBuffer();
18410 :
18411 1128 : appendPQExpBuffer(q, "ALTER INDEX %s ",
18412 1128 : fmtQualifiedDumpable(attachinfo->parentIdx));
18413 1128 : appendPQExpBuffer(q, "ATTACH PARTITION %s;\n",
18414 1128 : fmtQualifiedDumpable(attachinfo->partitionIdx));
18415 :
18416 : /*
18417 : * There is no need for a dropStmt since the drop is done implicitly
18418 : * when we drop either the index's table or the partitioned index.
18419 : * Moreover, since there's no ALTER INDEX DETACH PARTITION command,
18420 : * there's no way to do it anyway. (If you think to change this,
18421 : * consider also what to do with --if-exists.)
18422 : *
18423 : * Although this object doesn't really have ownership as such, set the
18424 : * owner field anyway to ensure that the command is run by the correct
18425 : * role at restore time.
18426 : */
18427 1128 : ArchiveEntry(fout, attachinfo->dobj.catId, attachinfo->dobj.dumpId,
18428 1128 : ARCHIVE_OPTS(.tag = attachinfo->dobj.name,
18429 : .namespace = attachinfo->dobj.namespace->dobj.name,
18430 : .owner = attachinfo->parentIdx->indextable->rolname,
18431 : .description = "INDEX ATTACH",
18432 : .section = SECTION_POST_DATA,
18433 : .createStmt = q->data));
18434 :
18435 1128 : destroyPQExpBuffer(q);
18436 : }
18437 : }
18438 :
18439 : /*
18440 : * dumpStatisticsExt
18441 : * write out to fout an extended statistics object
18442 : */
18443 : static void
18444 302 : dumpStatisticsExt(Archive *fout, const StatsExtInfo *statsextinfo)
18445 : {
18446 302 : DumpOptions *dopt = fout->dopt;
18447 : PQExpBuffer q;
18448 : PQExpBuffer delq;
18449 : PQExpBuffer query;
18450 : char *qstatsextname;
18451 : PGresult *res;
18452 : char *stxdef;
18453 :
18454 : /* Do nothing if not dumping schema */
18455 302 : if (!dopt->dumpSchema)
18456 36 : return;
18457 :
18458 266 : q = createPQExpBuffer();
18459 266 : delq = createPQExpBuffer();
18460 266 : query = createPQExpBuffer();
18461 :
18462 266 : qstatsextname = pg_strdup(fmtId(statsextinfo->dobj.name));
18463 :
18464 266 : appendPQExpBuffer(query, "SELECT "
18465 : "pg_catalog.pg_get_statisticsobjdef('%u'::pg_catalog.oid)",
18466 266 : statsextinfo->dobj.catId.oid);
18467 :
18468 266 : res = ExecuteSqlQueryForSingleRow(fout, query->data);
18469 :
18470 266 : stxdef = PQgetvalue(res, 0, 0);
18471 :
18472 : /* Result of pg_get_statisticsobjdef is complete except for semicolon */
18473 266 : appendPQExpBuffer(q, "%s;\n", stxdef);
18474 :
18475 : /*
18476 : * We only issue an ALTER STATISTICS statement if the stxstattarget entry
18477 : * for this statistics object is not the default value.
18478 : */
18479 266 : if (statsextinfo->stattarget >= 0)
18480 : {
18481 76 : appendPQExpBuffer(q, "ALTER STATISTICS %s ",
18482 76 : fmtQualifiedDumpable(statsextinfo));
18483 76 : appendPQExpBuffer(q, "SET STATISTICS %d;\n",
18484 76 : statsextinfo->stattarget);
18485 : }
18486 :
18487 266 : appendPQExpBuffer(delq, "DROP STATISTICS %s;\n",
18488 266 : fmtQualifiedDumpable(statsextinfo));
18489 :
18490 266 : if (statsextinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
18491 266 : ArchiveEntry(fout, statsextinfo->dobj.catId,
18492 266 : statsextinfo->dobj.dumpId,
18493 266 : ARCHIVE_OPTS(.tag = statsextinfo->dobj.name,
18494 : .namespace = statsextinfo->dobj.namespace->dobj.name,
18495 : .owner = statsextinfo->rolname,
18496 : .description = "STATISTICS",
18497 : .section = SECTION_POST_DATA,
18498 : .createStmt = q->data,
18499 : .dropStmt = delq->data));
18500 :
18501 : /* Dump Statistics Comments */
18502 266 : if (statsextinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
18503 0 : dumpComment(fout, "STATISTICS", qstatsextname,
18504 0 : statsextinfo->dobj.namespace->dobj.name,
18505 0 : statsextinfo->rolname,
18506 : statsextinfo->dobj.catId, 0,
18507 0 : statsextinfo->dobj.dumpId);
18508 :
18509 266 : PQclear(res);
18510 266 : destroyPQExpBuffer(q);
18511 266 : destroyPQExpBuffer(delq);
18512 266 : destroyPQExpBuffer(query);
18513 266 : free(qstatsextname);
18514 : }
18515 :
18516 : /*
18517 : * dumpConstraint
18518 : * write out to fout a user-defined constraint
18519 : */
18520 : static void
18521 5290 : dumpConstraint(Archive *fout, const ConstraintInfo *coninfo)
18522 : {
18523 5290 : DumpOptions *dopt = fout->dopt;
18524 5290 : TableInfo *tbinfo = coninfo->contable;
18525 : PQExpBuffer q;
18526 : PQExpBuffer delq;
18527 5290 : char *tag = NULL;
18528 : char *foreign;
18529 :
18530 : /* Do nothing if not dumping schema */
18531 5290 : if (!dopt->dumpSchema)
18532 196 : return;
18533 :
18534 5094 : q = createPQExpBuffer();
18535 5094 : delq = createPQExpBuffer();
18536 :
18537 9856 : foreign = tbinfo &&
18538 5094 : tbinfo->relkind == RELKIND_FOREIGN_TABLE ? "FOREIGN " : "";
18539 :
18540 5094 : if (coninfo->contype == 'p' ||
18541 2596 : coninfo->contype == 'u' ||
18542 2126 : coninfo->contype == 'x')
18543 2988 : {
18544 : /* Index-related constraint */
18545 : IndxInfo *indxinfo;
18546 : int k;
18547 :
18548 2988 : indxinfo = (IndxInfo *) findObjectByDumpId(coninfo->conindex);
18549 :
18550 2988 : if (indxinfo == NULL)
18551 0 : pg_fatal("missing index for constraint \"%s\"",
18552 : coninfo->dobj.name);
18553 :
18554 2988 : if (dopt->binary_upgrade)
18555 292 : binary_upgrade_set_pg_class_oids(fout, q,
18556 : indxinfo->dobj.catId.oid);
18557 :
18558 2988 : appendPQExpBuffer(q, "ALTER %sTABLE ONLY %s\n", foreign,
18559 2988 : fmtQualifiedDumpable(tbinfo));
18560 2988 : appendPQExpBuffer(q, " ADD CONSTRAINT %s ",
18561 2988 : fmtId(coninfo->dobj.name));
18562 :
18563 2988 : if (coninfo->condef)
18564 : {
18565 : /* pg_get_constraintdef should have provided everything */
18566 20 : appendPQExpBuffer(q, "%s;\n", coninfo->condef);
18567 : }
18568 : else
18569 : {
18570 2968 : appendPQExpBufferStr(q,
18571 2968 : coninfo->contype == 'p' ? "PRIMARY KEY" : "UNIQUE");
18572 :
18573 : /*
18574 : * PRIMARY KEY constraints should not be using NULLS NOT DISTINCT
18575 : * indexes. Being able to create this was fixed, but we need to
18576 : * make the index distinct in order to be able to restore the
18577 : * dump.
18578 : */
18579 2968 : if (indxinfo->indnullsnotdistinct && coninfo->contype != 'p')
18580 0 : appendPQExpBufferStr(q, " NULLS NOT DISTINCT");
18581 2968 : appendPQExpBufferStr(q, " (");
18582 7194 : for (k = 0; k < indxinfo->indnkeyattrs; k++)
18583 : {
18584 4226 : int indkey = (int) indxinfo->indkeys[k];
18585 : const char *attname;
18586 :
18587 4226 : if (indkey == InvalidAttrNumber)
18588 0 : break;
18589 4226 : attname = getAttrName(indkey, tbinfo);
18590 :
18591 4226 : appendPQExpBuffer(q, "%s%s",
18592 : (k == 0) ? "" : ", ",
18593 : fmtId(attname));
18594 : }
18595 2968 : if (coninfo->conperiod)
18596 232 : appendPQExpBufferStr(q, " WITHOUT OVERLAPS");
18597 :
18598 2968 : if (indxinfo->indnkeyattrs < indxinfo->indnattrs)
18599 40 : appendPQExpBufferStr(q, ") INCLUDE (");
18600 :
18601 3048 : for (k = indxinfo->indnkeyattrs; k < indxinfo->indnattrs; k++)
18602 : {
18603 80 : int indkey = (int) indxinfo->indkeys[k];
18604 : const char *attname;
18605 :
18606 80 : if (indkey == InvalidAttrNumber)
18607 0 : break;
18608 80 : attname = getAttrName(indkey, tbinfo);
18609 :
18610 160 : appendPQExpBuffer(q, "%s%s",
18611 80 : (k == indxinfo->indnkeyattrs) ? "" : ", ",
18612 : fmtId(attname));
18613 : }
18614 :
18615 2968 : appendPQExpBufferChar(q, ')');
18616 :
18617 2968 : if (nonemptyReloptions(indxinfo->indreloptions))
18618 : {
18619 0 : appendPQExpBufferStr(q, " WITH (");
18620 0 : appendReloptionsArrayAH(q, indxinfo->indreloptions, "", fout);
18621 0 : appendPQExpBufferChar(q, ')');
18622 : }
18623 :
18624 2968 : if (coninfo->condeferrable)
18625 : {
18626 50 : appendPQExpBufferStr(q, " DEFERRABLE");
18627 50 : if (coninfo->condeferred)
18628 30 : appendPQExpBufferStr(q, " INITIALLY DEFERRED");
18629 : }
18630 :
18631 2968 : appendPQExpBufferStr(q, ";\n");
18632 : }
18633 :
18634 : /*
18635 : * Append ALTER TABLE commands as needed to set properties that we
18636 : * only have ALTER TABLE syntax for. Keep this in sync with the
18637 : * similar code in dumpIndex!
18638 : */
18639 :
18640 : /* If the index is clustered, we need to record that. */
18641 2988 : if (indxinfo->indisclustered)
18642 : {
18643 76 : appendPQExpBuffer(q, "\nALTER TABLE %s CLUSTER",
18644 76 : fmtQualifiedDumpable(tbinfo));
18645 : /* index name is not qualified in this syntax */
18646 76 : appendPQExpBuffer(q, " ON %s;\n",
18647 76 : fmtId(indxinfo->dobj.name));
18648 : }
18649 :
18650 : /* If the index defines identity, we need to record that. */
18651 2988 : if (indxinfo->indisreplident)
18652 : {
18653 0 : appendPQExpBuffer(q, "\nALTER TABLE ONLY %s REPLICA IDENTITY USING",
18654 0 : fmtQualifiedDumpable(tbinfo));
18655 : /* index name is not qualified in this syntax */
18656 0 : appendPQExpBuffer(q, " INDEX %s;\n",
18657 0 : fmtId(indxinfo->dobj.name));
18658 : }
18659 :
18660 : /* Indexes can depend on extensions */
18661 2988 : append_depends_on_extension(fout, q, &indxinfo->dobj,
18662 : "pg_catalog.pg_class", "INDEX",
18663 2988 : fmtQualifiedDumpable(indxinfo));
18664 :
18665 2988 : appendPQExpBuffer(delq, "ALTER %sTABLE ONLY %s ", foreign,
18666 2988 : fmtQualifiedDumpable(tbinfo));
18667 2988 : appendPQExpBuffer(delq, "DROP CONSTRAINT %s;\n",
18668 2988 : fmtId(coninfo->dobj.name));
18669 :
18670 2988 : tag = psprintf("%s %s", tbinfo->dobj.name, coninfo->dobj.name);
18671 :
18672 2988 : if (coninfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
18673 2988 : ArchiveEntry(fout, coninfo->dobj.catId, coninfo->dobj.dumpId,
18674 2988 : ARCHIVE_OPTS(.tag = tag,
18675 : .namespace = tbinfo->dobj.namespace->dobj.name,
18676 : .tablespace = indxinfo->tablespace,
18677 : .owner = tbinfo->rolname,
18678 : .description = "CONSTRAINT",
18679 : .section = SECTION_POST_DATA,
18680 : .createStmt = q->data,
18681 : .dropStmt = delq->data));
18682 : }
18683 2106 : else if (coninfo->contype == 'f')
18684 : {
18685 : char *only;
18686 :
18687 : /*
18688 : * Foreign keys on partitioned tables are always declared as
18689 : * inheriting to partitions; for all other cases, emit them as
18690 : * applying ONLY directly to the named table, because that's how they
18691 : * work for regular inherited tables.
18692 : */
18693 342 : only = tbinfo->relkind == RELKIND_PARTITIONED_TABLE ? "" : "ONLY ";
18694 :
18695 : /*
18696 : * XXX Potentially wrap in a 'SET CONSTRAINTS OFF' block so that the
18697 : * current table data is not processed
18698 : */
18699 342 : appendPQExpBuffer(q, "ALTER %sTABLE %s%s\n", foreign,
18700 342 : only, fmtQualifiedDumpable(tbinfo));
18701 342 : appendPQExpBuffer(q, " ADD CONSTRAINT %s %s;\n",
18702 342 : fmtId(coninfo->dobj.name),
18703 342 : coninfo->condef);
18704 :
18705 342 : appendPQExpBuffer(delq, "ALTER %sTABLE %s%s ", foreign,
18706 342 : only, fmtQualifiedDumpable(tbinfo));
18707 342 : appendPQExpBuffer(delq, "DROP CONSTRAINT %s;\n",
18708 342 : fmtId(coninfo->dobj.name));
18709 :
18710 342 : tag = psprintf("%s %s", tbinfo->dobj.name, coninfo->dobj.name);
18711 :
18712 342 : if (coninfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
18713 342 : ArchiveEntry(fout, coninfo->dobj.catId, coninfo->dobj.dumpId,
18714 342 : ARCHIVE_OPTS(.tag = tag,
18715 : .namespace = tbinfo->dobj.namespace->dobj.name,
18716 : .owner = tbinfo->rolname,
18717 : .description = "FK CONSTRAINT",
18718 : .section = SECTION_POST_DATA,
18719 : .createStmt = q->data,
18720 : .dropStmt = delq->data));
18721 : }
18722 1764 : else if ((coninfo->contype == 'c' || coninfo->contype == 'n') && tbinfo)
18723 : {
18724 : /* CHECK or invalid not-null constraint on a table */
18725 :
18726 : /* Ignore if not to be dumped separately, or if it was inherited */
18727 1432 : if (coninfo->separate && coninfo->conislocal)
18728 : {
18729 : const char *keyword;
18730 :
18731 226 : if (coninfo->contype == 'c')
18732 90 : keyword = "CHECK CONSTRAINT";
18733 : else
18734 136 : keyword = "CONSTRAINT";
18735 :
18736 : /* not ONLY since we want it to propagate to children */
18737 226 : appendPQExpBuffer(q, "ALTER %sTABLE %s\n", foreign,
18738 226 : fmtQualifiedDumpable(tbinfo));
18739 226 : appendPQExpBuffer(q, " ADD CONSTRAINT %s %s;\n",
18740 226 : fmtId(coninfo->dobj.name),
18741 226 : coninfo->condef);
18742 :
18743 226 : appendPQExpBuffer(delq, "ALTER %sTABLE %s ", foreign,
18744 226 : fmtQualifiedDumpable(tbinfo));
18745 226 : appendPQExpBuffer(delq, "DROP CONSTRAINT %s;\n",
18746 226 : fmtId(coninfo->dobj.name));
18747 :
18748 226 : tag = psprintf("%s %s", tbinfo->dobj.name, coninfo->dobj.name);
18749 :
18750 226 : if (coninfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
18751 226 : ArchiveEntry(fout, coninfo->dobj.catId, coninfo->dobj.dumpId,
18752 226 : ARCHIVE_OPTS(.tag = tag,
18753 : .namespace = tbinfo->dobj.namespace->dobj.name,
18754 : .owner = tbinfo->rolname,
18755 : .description = keyword,
18756 : .section = SECTION_POST_DATA,
18757 : .createStmt = q->data,
18758 : .dropStmt = delq->data));
18759 : }
18760 : }
18761 332 : else if (tbinfo == NULL)
18762 : {
18763 : /* CHECK, NOT NULL constraint on a domain */
18764 332 : TypeInfo *tyinfo = coninfo->condomain;
18765 :
18766 : Assert(coninfo->contype == 'c' || coninfo->contype == 'n');
18767 :
18768 : /* Ignore if not to be dumped separately */
18769 332 : if (coninfo->separate)
18770 : {
18771 : const char *keyword;
18772 :
18773 10 : if (coninfo->contype == 'c')
18774 10 : keyword = "CHECK CONSTRAINT";
18775 : else
18776 0 : keyword = "CONSTRAINT";
18777 :
18778 10 : appendPQExpBuffer(q, "ALTER DOMAIN %s\n",
18779 10 : fmtQualifiedDumpable(tyinfo));
18780 10 : appendPQExpBuffer(q, " ADD CONSTRAINT %s %s;\n",
18781 10 : fmtId(coninfo->dobj.name),
18782 10 : coninfo->condef);
18783 :
18784 10 : appendPQExpBuffer(delq, "ALTER DOMAIN %s ",
18785 10 : fmtQualifiedDumpable(tyinfo));
18786 10 : appendPQExpBuffer(delq, "DROP CONSTRAINT %s;\n",
18787 10 : fmtId(coninfo->dobj.name));
18788 :
18789 10 : tag = psprintf("%s %s", tyinfo->dobj.name, coninfo->dobj.name);
18790 :
18791 10 : if (coninfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
18792 10 : ArchiveEntry(fout, coninfo->dobj.catId, coninfo->dobj.dumpId,
18793 10 : ARCHIVE_OPTS(.tag = tag,
18794 : .namespace = tyinfo->dobj.namespace->dobj.name,
18795 : .owner = tyinfo->rolname,
18796 : .description = keyword,
18797 : .section = SECTION_POST_DATA,
18798 : .createStmt = q->data,
18799 : .dropStmt = delq->data));
18800 :
18801 10 : if (coninfo->dobj.dump & DUMP_COMPONENT_COMMENT)
18802 : {
18803 10 : PQExpBuffer conprefix = createPQExpBuffer();
18804 10 : char *qtypname = pg_strdup(fmtId(tyinfo->dobj.name));
18805 :
18806 10 : appendPQExpBuffer(conprefix, "CONSTRAINT %s ON DOMAIN",
18807 10 : fmtId(coninfo->dobj.name));
18808 :
18809 10 : dumpComment(fout, conprefix->data, qtypname,
18810 10 : tyinfo->dobj.namespace->dobj.name,
18811 : tyinfo->rolname,
18812 10 : coninfo->dobj.catId, 0, coninfo->dobj.dumpId);
18813 10 : destroyPQExpBuffer(conprefix);
18814 10 : free(qtypname);
18815 : }
18816 : }
18817 : }
18818 : else
18819 : {
18820 0 : pg_fatal("unrecognized constraint type: %c",
18821 : coninfo->contype);
18822 : }
18823 :
18824 : /* Dump Constraint Comments --- only works for table constraints */
18825 5094 : if (tbinfo && coninfo->separate &&
18826 3616 : coninfo->dobj.dump & DUMP_COMPONENT_COMMENT)
18827 106 : dumpTableConstraintComment(fout, coninfo);
18828 :
18829 5094 : free(tag);
18830 5094 : destroyPQExpBuffer(q);
18831 5094 : destroyPQExpBuffer(delq);
18832 : }
18833 :
18834 : /*
18835 : * dumpTableConstraintComment --- dump a constraint's comment if any
18836 : *
18837 : * This is split out because we need the function in two different places
18838 : * depending on whether the constraint is dumped as part of CREATE TABLE
18839 : * or as a separate ALTER command.
18840 : */
18841 : static void
18842 192 : dumpTableConstraintComment(Archive *fout, const ConstraintInfo *coninfo)
18843 : {
18844 192 : TableInfo *tbinfo = coninfo->contable;
18845 192 : PQExpBuffer conprefix = createPQExpBuffer();
18846 : char *qtabname;
18847 :
18848 192 : qtabname = pg_strdup(fmtId(tbinfo->dobj.name));
18849 :
18850 192 : appendPQExpBuffer(conprefix, "CONSTRAINT %s ON",
18851 192 : fmtId(coninfo->dobj.name));
18852 :
18853 192 : if (coninfo->dobj.dump & DUMP_COMPONENT_COMMENT)
18854 192 : dumpComment(fout, conprefix->data, qtabname,
18855 192 : tbinfo->dobj.namespace->dobj.name,
18856 : tbinfo->rolname,
18857 : coninfo->dobj.catId, 0,
18858 192 : coninfo->separate ? coninfo->dobj.dumpId : tbinfo->dobj.dumpId);
18859 :
18860 192 : destroyPQExpBuffer(conprefix);
18861 192 : free(qtabname);
18862 192 : }
18863 :
18864 : static inline SeqType
18865 1312 : parse_sequence_type(const char *name)
18866 : {
18867 2922 : for (int i = 0; i < lengthof(SeqTypeNames); i++)
18868 : {
18869 2922 : if (strcmp(SeqTypeNames[i], name) == 0)
18870 1312 : return (SeqType) i;
18871 : }
18872 :
18873 0 : pg_fatal("unrecognized sequence type: %s", name);
18874 : return (SeqType) 0; /* keep compiler quiet */
18875 : }
18876 :
18877 : /*
18878 : * bsearch() comparator for SequenceItem
18879 : */
18880 : static int
18881 6034 : SequenceItemCmp(const void *p1, const void *p2)
18882 : {
18883 6034 : SequenceItem v1 = *((const SequenceItem *) p1);
18884 6034 : SequenceItem v2 = *((const SequenceItem *) p2);
18885 :
18886 6034 : return pg_cmp_u32(v1.oid, v2.oid);
18887 : }
18888 :
18889 : /*
18890 : * collectSequences
18891 : *
18892 : * Construct a table of sequence information. This table is sorted by OID for
18893 : * speed in lookup.
18894 : */
18895 : static void
18896 370 : collectSequences(Archive *fout)
18897 : {
18898 : PGresult *res;
18899 : const char *query;
18900 :
18901 : /*
18902 : * Before Postgres 10, sequence metadata is in the sequence itself. With
18903 : * some extra effort, we might be able to use the sorted table for those
18904 : * versions, but for now it seems unlikely to be worth it.
18905 : *
18906 : * Since version 18, we can gather the sequence data in this query with
18907 : * pg_get_sequence_data(), but we only do so for non-schema-only dumps.
18908 : */
18909 370 : if (fout->remoteVersion < 100000)
18910 0 : return;
18911 370 : else if (fout->remoteVersion < 180000 ||
18912 370 : (!fout->dopt->dumpData && !fout->dopt->sequence_data))
18913 16 : query = "SELECT seqrelid, format_type(seqtypid, NULL), "
18914 : "seqstart, seqincrement, "
18915 : "seqmax, seqmin, "
18916 : "seqcache, seqcycle, "
18917 : "NULL, 'f' "
18918 : "FROM pg_catalog.pg_sequence "
18919 : "ORDER BY seqrelid";
18920 : else
18921 354 : query = "SELECT seqrelid, format_type(seqtypid, NULL), "
18922 : "seqstart, seqincrement, "
18923 : "seqmax, seqmin, "
18924 : "seqcache, seqcycle, "
18925 : "last_value, is_called "
18926 : "FROM pg_catalog.pg_sequence, "
18927 : "pg_get_sequence_data(seqrelid) "
18928 : "ORDER BY seqrelid;";
18929 :
18930 370 : res = ExecuteSqlQuery(fout, query, PGRES_TUPLES_OK);
18931 :
18932 370 : nsequences = PQntuples(res);
18933 370 : sequences = (SequenceItem *) pg_malloc(nsequences * sizeof(SequenceItem));
18934 :
18935 1682 : for (int i = 0; i < nsequences; i++)
18936 : {
18937 1312 : sequences[i].oid = atooid(PQgetvalue(res, i, 0));
18938 1312 : sequences[i].seqtype = parse_sequence_type(PQgetvalue(res, i, 1));
18939 1312 : sequences[i].startv = strtoi64(PQgetvalue(res, i, 2), NULL, 10);
18940 1312 : sequences[i].incby = strtoi64(PQgetvalue(res, i, 3), NULL, 10);
18941 1312 : sequences[i].maxv = strtoi64(PQgetvalue(res, i, 4), NULL, 10);
18942 1312 : sequences[i].minv = strtoi64(PQgetvalue(res, i, 5), NULL, 10);
18943 1312 : sequences[i].cache = strtoi64(PQgetvalue(res, i, 6), NULL, 10);
18944 1312 : sequences[i].cycled = (strcmp(PQgetvalue(res, i, 7), "t") == 0);
18945 1312 : sequences[i].last_value = strtoi64(PQgetvalue(res, i, 8), NULL, 10);
18946 1312 : sequences[i].is_called = (strcmp(PQgetvalue(res, i, 9), "t") == 0);
18947 : }
18948 :
18949 370 : PQclear(res);
18950 : }
18951 :
18952 : /*
18953 : * dumpSequence
18954 : * write the declaration (not data) of one user-defined sequence
18955 : */
18956 : static void
18957 786 : dumpSequence(Archive *fout, const TableInfo *tbinfo)
18958 : {
18959 786 : DumpOptions *dopt = fout->dopt;
18960 : SequenceItem *seq;
18961 : bool is_ascending;
18962 : int64 default_minv,
18963 : default_maxv;
18964 786 : PQExpBuffer query = createPQExpBuffer();
18965 786 : PQExpBuffer delqry = createPQExpBuffer();
18966 : char *qseqname;
18967 786 : TableInfo *owning_tab = NULL;
18968 :
18969 786 : qseqname = pg_strdup(fmtId(tbinfo->dobj.name));
18970 :
18971 : /*
18972 : * For versions >= 10, the sequence information is gathered in a sorted
18973 : * table before any calls to dumpSequence(). See collectSequences() for
18974 : * more information.
18975 : */
18976 786 : if (fout->remoteVersion >= 100000)
18977 : {
18978 786 : SequenceItem key = {0};
18979 :
18980 : Assert(sequences);
18981 :
18982 786 : key.oid = tbinfo->dobj.catId.oid;
18983 786 : seq = bsearch(&key, sequences, nsequences,
18984 : sizeof(SequenceItem), SequenceItemCmp);
18985 : }
18986 : else
18987 : {
18988 : PGresult *res;
18989 :
18990 : /*
18991 : * Before PostgreSQL 10, sequence metadata is in the sequence itself.
18992 : *
18993 : * Note: it might seem that 'bigint' potentially needs to be
18994 : * schema-qualified, but actually that's a keyword.
18995 : */
18996 0 : appendPQExpBuffer(query,
18997 : "SELECT 'bigint' AS sequence_type, "
18998 : "start_value, increment_by, max_value, min_value, "
18999 : "cache_value, is_cycled FROM %s",
19000 0 : fmtQualifiedDumpable(tbinfo));
19001 :
19002 0 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
19003 :
19004 0 : if (PQntuples(res) != 1)
19005 0 : pg_fatal(ngettext("query to get data of sequence \"%s\" returned %d row (expected 1)",
19006 : "query to get data of sequence \"%s\" returned %d rows (expected 1)",
19007 : PQntuples(res)),
19008 : tbinfo->dobj.name, PQntuples(res));
19009 :
19010 0 : seq = pg_malloc0(sizeof(SequenceItem));
19011 0 : seq->seqtype = parse_sequence_type(PQgetvalue(res, 0, 0));
19012 0 : seq->startv = strtoi64(PQgetvalue(res, 0, 1), NULL, 10);
19013 0 : seq->incby = strtoi64(PQgetvalue(res, 0, 2), NULL, 10);
19014 0 : seq->maxv = strtoi64(PQgetvalue(res, 0, 3), NULL, 10);
19015 0 : seq->minv = strtoi64(PQgetvalue(res, 0, 4), NULL, 10);
19016 0 : seq->cache = strtoi64(PQgetvalue(res, 0, 5), NULL, 10);
19017 0 : seq->cycled = (strcmp(PQgetvalue(res, 0, 6), "t") == 0);
19018 :
19019 0 : PQclear(res);
19020 : }
19021 :
19022 : /* Calculate default limits for a sequence of this type */
19023 786 : is_ascending = (seq->incby >= 0);
19024 786 : if (seq->seqtype == SEQTYPE_SMALLINT)
19025 : {
19026 50 : default_minv = is_ascending ? 1 : PG_INT16_MIN;
19027 50 : default_maxv = is_ascending ? PG_INT16_MAX : -1;
19028 : }
19029 736 : else if (seq->seqtype == SEQTYPE_INTEGER)
19030 : {
19031 604 : default_minv = is_ascending ? 1 : PG_INT32_MIN;
19032 604 : default_maxv = is_ascending ? PG_INT32_MAX : -1;
19033 : }
19034 132 : else if (seq->seqtype == SEQTYPE_BIGINT)
19035 : {
19036 132 : default_minv = is_ascending ? 1 : PG_INT64_MIN;
19037 132 : default_maxv = is_ascending ? PG_INT64_MAX : -1;
19038 : }
19039 : else
19040 : {
19041 0 : pg_fatal("unrecognized sequence type: %d", seq->seqtype);
19042 : default_minv = default_maxv = 0; /* keep compiler quiet */
19043 : }
19044 :
19045 : /*
19046 : * Identity sequences are not to be dropped separately.
19047 : */
19048 786 : if (!tbinfo->is_identity_sequence)
19049 : {
19050 490 : appendPQExpBuffer(delqry, "DROP SEQUENCE %s;\n",
19051 490 : fmtQualifiedDumpable(tbinfo));
19052 : }
19053 :
19054 786 : resetPQExpBuffer(query);
19055 :
19056 786 : if (dopt->binary_upgrade)
19057 : {
19058 132 : binary_upgrade_set_pg_class_oids(fout, query,
19059 132 : tbinfo->dobj.catId.oid);
19060 :
19061 : /*
19062 : * In older PG versions a sequence will have a pg_type entry, but v14
19063 : * and up don't use that, so don't attempt to preserve the type OID.
19064 : */
19065 : }
19066 :
19067 786 : if (tbinfo->is_identity_sequence)
19068 : {
19069 296 : owning_tab = findTableByOid(tbinfo->owning_tab);
19070 :
19071 296 : appendPQExpBuffer(query,
19072 : "ALTER TABLE %s ",
19073 296 : fmtQualifiedDumpable(owning_tab));
19074 296 : appendPQExpBuffer(query,
19075 : "ALTER COLUMN %s ADD GENERATED ",
19076 296 : fmtId(owning_tab->attnames[tbinfo->owning_col - 1]));
19077 296 : if (owning_tab->attidentity[tbinfo->owning_col - 1] == ATTRIBUTE_IDENTITY_ALWAYS)
19078 216 : appendPQExpBufferStr(query, "ALWAYS");
19079 80 : else if (owning_tab->attidentity[tbinfo->owning_col - 1] == ATTRIBUTE_IDENTITY_BY_DEFAULT)
19080 80 : appendPQExpBufferStr(query, "BY DEFAULT");
19081 296 : appendPQExpBuffer(query, " AS IDENTITY (\n SEQUENCE NAME %s\n",
19082 296 : fmtQualifiedDumpable(tbinfo));
19083 :
19084 : /*
19085 : * Emit persistence option only if it's different from the owning
19086 : * table's. This avoids using this new syntax unnecessarily.
19087 : */
19088 296 : if (tbinfo->relpersistence != owning_tab->relpersistence)
19089 20 : appendPQExpBuffer(query, " %s\n",
19090 20 : tbinfo->relpersistence == RELPERSISTENCE_UNLOGGED ?
19091 : "UNLOGGED" : "LOGGED");
19092 : }
19093 : else
19094 : {
19095 490 : appendPQExpBuffer(query,
19096 : "CREATE %sSEQUENCE %s\n",
19097 490 : tbinfo->relpersistence == RELPERSISTENCE_UNLOGGED ?
19098 : "UNLOGGED " : "",
19099 490 : fmtQualifiedDumpable(tbinfo));
19100 :
19101 490 : if (seq->seqtype != SEQTYPE_BIGINT)
19102 388 : appendPQExpBuffer(query, " AS %s\n", SeqTypeNames[seq->seqtype]);
19103 : }
19104 :
19105 786 : appendPQExpBuffer(query, " START WITH " INT64_FORMAT "\n", seq->startv);
19106 :
19107 786 : appendPQExpBuffer(query, " INCREMENT BY " INT64_FORMAT "\n", seq->incby);
19108 :
19109 786 : if (seq->minv != default_minv)
19110 30 : appendPQExpBuffer(query, " MINVALUE " INT64_FORMAT "\n", seq->minv);
19111 : else
19112 756 : appendPQExpBufferStr(query, " NO MINVALUE\n");
19113 :
19114 786 : if (seq->maxv != default_maxv)
19115 30 : appendPQExpBuffer(query, " MAXVALUE " INT64_FORMAT "\n", seq->maxv);
19116 : else
19117 756 : appendPQExpBufferStr(query, " NO MAXVALUE\n");
19118 :
19119 786 : appendPQExpBuffer(query,
19120 : " CACHE " INT64_FORMAT "%s",
19121 786 : seq->cache, (seq->cycled ? "\n CYCLE" : ""));
19122 :
19123 786 : if (tbinfo->is_identity_sequence)
19124 296 : appendPQExpBufferStr(query, "\n);\n");
19125 : else
19126 490 : appendPQExpBufferStr(query, ";\n");
19127 :
19128 : /* binary_upgrade: no need to clear TOAST table oid */
19129 :
19130 786 : if (dopt->binary_upgrade)
19131 132 : binary_upgrade_extension_member(query, &tbinfo->dobj,
19132 : "SEQUENCE", qseqname,
19133 132 : tbinfo->dobj.namespace->dobj.name);
19134 :
19135 786 : if (tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
19136 786 : ArchiveEntry(fout, tbinfo->dobj.catId, tbinfo->dobj.dumpId,
19137 786 : ARCHIVE_OPTS(.tag = tbinfo->dobj.name,
19138 : .namespace = tbinfo->dobj.namespace->dobj.name,
19139 : .owner = tbinfo->rolname,
19140 : .description = "SEQUENCE",
19141 : .section = SECTION_PRE_DATA,
19142 : .createStmt = query->data,
19143 : .dropStmt = delqry->data));
19144 :
19145 : /*
19146 : * If the sequence is owned by a table column, emit the ALTER for it as a
19147 : * separate TOC entry immediately following the sequence's own entry. It's
19148 : * OK to do this rather than using full sorting logic, because the
19149 : * dependency that tells us it's owned will have forced the table to be
19150 : * created first. We can't just include the ALTER in the TOC entry
19151 : * because it will fail if we haven't reassigned the sequence owner to
19152 : * match the table's owner.
19153 : *
19154 : * We need not schema-qualify the table reference because both sequence
19155 : * and table must be in the same schema.
19156 : */
19157 786 : if (OidIsValid(tbinfo->owning_tab) && !tbinfo->is_identity_sequence)
19158 : {
19159 298 : owning_tab = findTableByOid(tbinfo->owning_tab);
19160 :
19161 298 : if (owning_tab == NULL)
19162 0 : pg_fatal("failed sanity check, parent table with OID %u of sequence with OID %u not found",
19163 : tbinfo->owning_tab, tbinfo->dobj.catId.oid);
19164 :
19165 298 : if (owning_tab->dobj.dump & DUMP_COMPONENT_DEFINITION)
19166 : {
19167 294 : resetPQExpBuffer(query);
19168 294 : appendPQExpBuffer(query, "ALTER SEQUENCE %s",
19169 294 : fmtQualifiedDumpable(tbinfo));
19170 294 : appendPQExpBuffer(query, " OWNED BY %s",
19171 294 : fmtQualifiedDumpable(owning_tab));
19172 294 : appendPQExpBuffer(query, ".%s;\n",
19173 294 : fmtId(owning_tab->attnames[tbinfo->owning_col - 1]));
19174 :
19175 294 : if (tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
19176 294 : ArchiveEntry(fout, nilCatalogId, createDumpId(),
19177 294 : ARCHIVE_OPTS(.tag = tbinfo->dobj.name,
19178 : .namespace = tbinfo->dobj.namespace->dobj.name,
19179 : .owner = tbinfo->rolname,
19180 : .description = "SEQUENCE OWNED BY",
19181 : .section = SECTION_PRE_DATA,
19182 : .createStmt = query->data,
19183 : .deps = &(tbinfo->dobj.dumpId),
19184 : .nDeps = 1));
19185 : }
19186 : }
19187 :
19188 : /* Dump Sequence Comments and Security Labels */
19189 786 : if (tbinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
19190 0 : dumpComment(fout, "SEQUENCE", qseqname,
19191 0 : tbinfo->dobj.namespace->dobj.name, tbinfo->rolname,
19192 0 : tbinfo->dobj.catId, 0, tbinfo->dobj.dumpId);
19193 :
19194 786 : if (tbinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
19195 0 : dumpSecLabel(fout, "SEQUENCE", qseqname,
19196 0 : tbinfo->dobj.namespace->dobj.name, tbinfo->rolname,
19197 0 : tbinfo->dobj.catId, 0, tbinfo->dobj.dumpId);
19198 :
19199 786 : if (fout->remoteVersion < 100000)
19200 0 : pg_free(seq);
19201 786 : destroyPQExpBuffer(query);
19202 786 : destroyPQExpBuffer(delqry);
19203 786 : free(qseqname);
19204 786 : }
19205 :
19206 : /*
19207 : * dumpSequenceData
19208 : * write the data of one user-defined sequence
19209 : */
19210 : static void
19211 822 : dumpSequenceData(Archive *fout, const TableDataInfo *tdinfo)
19212 : {
19213 822 : TableInfo *tbinfo = tdinfo->tdtable;
19214 : int64 last;
19215 : bool called;
19216 822 : PQExpBuffer query = createPQExpBuffer();
19217 :
19218 : /*
19219 : * For versions >= 18, the sequence information is gathered in the sorted
19220 : * array before any calls to dumpSequenceData(). See collectSequences()
19221 : * for more information.
19222 : *
19223 : * For older versions, we have to query the sequence relations
19224 : * individually.
19225 : */
19226 822 : if (fout->remoteVersion < 180000)
19227 : {
19228 : PGresult *res;
19229 :
19230 0 : appendPQExpBuffer(query,
19231 : "SELECT last_value, is_called FROM %s",
19232 0 : fmtQualifiedDumpable(tbinfo));
19233 :
19234 0 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
19235 :
19236 0 : if (PQntuples(res) != 1)
19237 0 : pg_fatal(ngettext("query to get data of sequence \"%s\" returned %d row (expected 1)",
19238 : "query to get data of sequence \"%s\" returned %d rows (expected 1)",
19239 : PQntuples(res)),
19240 : tbinfo->dobj.name, PQntuples(res));
19241 :
19242 0 : last = strtoi64(PQgetvalue(res, 0, 0), NULL, 10);
19243 0 : called = (strcmp(PQgetvalue(res, 0, 1), "t") == 0);
19244 :
19245 0 : PQclear(res);
19246 : }
19247 : else
19248 : {
19249 822 : SequenceItem key = {0};
19250 : SequenceItem *entry;
19251 :
19252 : Assert(sequences);
19253 : Assert(tbinfo->dobj.catId.oid);
19254 :
19255 822 : key.oid = tbinfo->dobj.catId.oid;
19256 822 : entry = bsearch(&key, sequences, nsequences,
19257 : sizeof(SequenceItem), SequenceItemCmp);
19258 :
19259 822 : last = entry->last_value;
19260 822 : called = entry->is_called;
19261 : }
19262 :
19263 822 : resetPQExpBuffer(query);
19264 822 : appendPQExpBufferStr(query, "SELECT pg_catalog.setval(");
19265 822 : appendStringLiteralAH(query, fmtQualifiedDumpable(tbinfo), fout);
19266 822 : appendPQExpBuffer(query, ", " INT64_FORMAT ", %s);\n",
19267 : last, (called ? "true" : "false"));
19268 :
19269 822 : if (tdinfo->dobj.dump & DUMP_COMPONENT_DATA)
19270 822 : ArchiveEntry(fout, nilCatalogId, createDumpId(),
19271 822 : ARCHIVE_OPTS(.tag = tbinfo->dobj.name,
19272 : .namespace = tbinfo->dobj.namespace->dobj.name,
19273 : .owner = tbinfo->rolname,
19274 : .description = "SEQUENCE SET",
19275 : .section = SECTION_DATA,
19276 : .createStmt = query->data,
19277 : .deps = &(tbinfo->dobj.dumpId),
19278 : .nDeps = 1));
19279 :
19280 822 : destroyPQExpBuffer(query);
19281 822 : }
19282 :
19283 : /*
19284 : * dumpTrigger
19285 : * write the declaration of one user-defined table trigger
19286 : */
19287 : static void
19288 1106 : dumpTrigger(Archive *fout, const TriggerInfo *tginfo)
19289 : {
19290 1106 : DumpOptions *dopt = fout->dopt;
19291 1106 : TableInfo *tbinfo = tginfo->tgtable;
19292 : PQExpBuffer query;
19293 : PQExpBuffer delqry;
19294 : PQExpBuffer trigprefix;
19295 : PQExpBuffer trigidentity;
19296 : char *qtabname;
19297 : char *tag;
19298 :
19299 : /* Do nothing if not dumping schema */
19300 1106 : if (!dopt->dumpSchema)
19301 62 : return;
19302 :
19303 1044 : query = createPQExpBuffer();
19304 1044 : delqry = createPQExpBuffer();
19305 1044 : trigprefix = createPQExpBuffer();
19306 1044 : trigidentity = createPQExpBuffer();
19307 :
19308 1044 : qtabname = pg_strdup(fmtId(tbinfo->dobj.name));
19309 :
19310 1044 : appendPQExpBuffer(trigidentity, "%s ", fmtId(tginfo->dobj.name));
19311 1044 : appendPQExpBuffer(trigidentity, "ON %s", fmtQualifiedDumpable(tbinfo));
19312 :
19313 1044 : appendPQExpBuffer(query, "%s;\n", tginfo->tgdef);
19314 1044 : appendPQExpBuffer(delqry, "DROP TRIGGER %s;\n", trigidentity->data);
19315 :
19316 : /* Triggers can depend on extensions */
19317 1044 : append_depends_on_extension(fout, query, &tginfo->dobj,
19318 : "pg_catalog.pg_trigger", "TRIGGER",
19319 1044 : trigidentity->data);
19320 :
19321 1044 : if (tginfo->tgispartition)
19322 : {
19323 : Assert(tbinfo->ispartition);
19324 :
19325 : /*
19326 : * Partition triggers only appear here because their 'tgenabled' flag
19327 : * differs from its parent's. The trigger is created already, so
19328 : * remove the CREATE and replace it with an ALTER. (Clear out the
19329 : * DROP query too, so that pg_dump --create does not cause errors.)
19330 : */
19331 254 : resetPQExpBuffer(query);
19332 254 : resetPQExpBuffer(delqry);
19333 254 : appendPQExpBuffer(query, "\nALTER %sTABLE %s ",
19334 254 : tbinfo->relkind == RELKIND_FOREIGN_TABLE ? "FOREIGN " : "",
19335 254 : fmtQualifiedDumpable(tbinfo));
19336 254 : switch (tginfo->tgenabled)
19337 : {
19338 88 : case 'f':
19339 : case 'D':
19340 88 : appendPQExpBufferStr(query, "DISABLE");
19341 88 : break;
19342 0 : case 't':
19343 : case 'O':
19344 0 : appendPQExpBufferStr(query, "ENABLE");
19345 0 : break;
19346 78 : case 'R':
19347 78 : appendPQExpBufferStr(query, "ENABLE REPLICA");
19348 78 : break;
19349 88 : case 'A':
19350 88 : appendPQExpBufferStr(query, "ENABLE ALWAYS");
19351 88 : break;
19352 : }
19353 254 : appendPQExpBuffer(query, " TRIGGER %s;\n",
19354 254 : fmtId(tginfo->dobj.name));
19355 : }
19356 790 : else if (tginfo->tgenabled != 't' && tginfo->tgenabled != 'O')
19357 : {
19358 0 : appendPQExpBuffer(query, "\nALTER %sTABLE %s ",
19359 0 : tbinfo->relkind == RELKIND_FOREIGN_TABLE ? "FOREIGN " : "",
19360 0 : fmtQualifiedDumpable(tbinfo));
19361 0 : switch (tginfo->tgenabled)
19362 : {
19363 0 : case 'D':
19364 : case 'f':
19365 0 : appendPQExpBufferStr(query, "DISABLE");
19366 0 : break;
19367 0 : case 'A':
19368 0 : appendPQExpBufferStr(query, "ENABLE ALWAYS");
19369 0 : break;
19370 0 : case 'R':
19371 0 : appendPQExpBufferStr(query, "ENABLE REPLICA");
19372 0 : break;
19373 0 : default:
19374 0 : appendPQExpBufferStr(query, "ENABLE");
19375 0 : break;
19376 : }
19377 0 : appendPQExpBuffer(query, " TRIGGER %s;\n",
19378 0 : fmtId(tginfo->dobj.name));
19379 : }
19380 :
19381 1044 : appendPQExpBuffer(trigprefix, "TRIGGER %s ON",
19382 1044 : fmtId(tginfo->dobj.name));
19383 :
19384 1044 : tag = psprintf("%s %s", tbinfo->dobj.name, tginfo->dobj.name);
19385 :
19386 1044 : if (tginfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
19387 1044 : ArchiveEntry(fout, tginfo->dobj.catId, tginfo->dobj.dumpId,
19388 1044 : ARCHIVE_OPTS(.tag = tag,
19389 : .namespace = tbinfo->dobj.namespace->dobj.name,
19390 : .owner = tbinfo->rolname,
19391 : .description = "TRIGGER",
19392 : .section = SECTION_POST_DATA,
19393 : .createStmt = query->data,
19394 : .dropStmt = delqry->data));
19395 :
19396 1044 : if (tginfo->dobj.dump & DUMP_COMPONENT_COMMENT)
19397 0 : dumpComment(fout, trigprefix->data, qtabname,
19398 0 : tbinfo->dobj.namespace->dobj.name, tbinfo->rolname,
19399 0 : tginfo->dobj.catId, 0, tginfo->dobj.dumpId);
19400 :
19401 1044 : free(tag);
19402 1044 : destroyPQExpBuffer(query);
19403 1044 : destroyPQExpBuffer(delqry);
19404 1044 : destroyPQExpBuffer(trigprefix);
19405 1044 : destroyPQExpBuffer(trigidentity);
19406 1044 : free(qtabname);
19407 : }
19408 :
19409 : /*
19410 : * dumpEventTrigger
19411 : * write the declaration of one user-defined event trigger
19412 : */
19413 : static void
19414 96 : dumpEventTrigger(Archive *fout, const EventTriggerInfo *evtinfo)
19415 : {
19416 96 : DumpOptions *dopt = fout->dopt;
19417 : PQExpBuffer query;
19418 : PQExpBuffer delqry;
19419 : char *qevtname;
19420 :
19421 : /* Do nothing if not dumping schema */
19422 96 : if (!dopt->dumpSchema)
19423 12 : return;
19424 :
19425 84 : query = createPQExpBuffer();
19426 84 : delqry = createPQExpBuffer();
19427 :
19428 84 : qevtname = pg_strdup(fmtId(evtinfo->dobj.name));
19429 :
19430 84 : appendPQExpBufferStr(query, "CREATE EVENT TRIGGER ");
19431 84 : appendPQExpBufferStr(query, qevtname);
19432 84 : appendPQExpBufferStr(query, " ON ");
19433 84 : appendPQExpBufferStr(query, fmtId(evtinfo->evtevent));
19434 :
19435 84 : if (strcmp("", evtinfo->evttags) != 0)
19436 : {
19437 10 : appendPQExpBufferStr(query, "\n WHEN TAG IN (");
19438 10 : appendPQExpBufferStr(query, evtinfo->evttags);
19439 10 : appendPQExpBufferChar(query, ')');
19440 : }
19441 :
19442 84 : appendPQExpBufferStr(query, "\n EXECUTE FUNCTION ");
19443 84 : appendPQExpBufferStr(query, evtinfo->evtfname);
19444 84 : appendPQExpBufferStr(query, "();\n");
19445 :
19446 84 : if (evtinfo->evtenabled != 'O')
19447 : {
19448 0 : appendPQExpBuffer(query, "\nALTER EVENT TRIGGER %s ",
19449 : qevtname);
19450 0 : switch (evtinfo->evtenabled)
19451 : {
19452 0 : case 'D':
19453 0 : appendPQExpBufferStr(query, "DISABLE");
19454 0 : break;
19455 0 : case 'A':
19456 0 : appendPQExpBufferStr(query, "ENABLE ALWAYS");
19457 0 : break;
19458 0 : case 'R':
19459 0 : appendPQExpBufferStr(query, "ENABLE REPLICA");
19460 0 : break;
19461 0 : default:
19462 0 : appendPQExpBufferStr(query, "ENABLE");
19463 0 : break;
19464 : }
19465 0 : appendPQExpBufferStr(query, ";\n");
19466 : }
19467 :
19468 84 : appendPQExpBuffer(delqry, "DROP EVENT TRIGGER %s;\n",
19469 : qevtname);
19470 :
19471 84 : if (dopt->binary_upgrade)
19472 4 : binary_upgrade_extension_member(query, &evtinfo->dobj,
19473 : "EVENT TRIGGER", qevtname, NULL);
19474 :
19475 84 : if (evtinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
19476 84 : ArchiveEntry(fout, evtinfo->dobj.catId, evtinfo->dobj.dumpId,
19477 84 : ARCHIVE_OPTS(.tag = evtinfo->dobj.name,
19478 : .owner = evtinfo->evtowner,
19479 : .description = "EVENT TRIGGER",
19480 : .section = SECTION_POST_DATA,
19481 : .createStmt = query->data,
19482 : .dropStmt = delqry->data));
19483 :
19484 84 : if (evtinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
19485 0 : dumpComment(fout, "EVENT TRIGGER", qevtname,
19486 0 : NULL, evtinfo->evtowner,
19487 0 : evtinfo->dobj.catId, 0, evtinfo->dobj.dumpId);
19488 :
19489 84 : if (evtinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
19490 0 : dumpSecLabel(fout, "EVENT TRIGGER", qevtname,
19491 0 : NULL, evtinfo->evtowner,
19492 0 : evtinfo->dobj.catId, 0, evtinfo->dobj.dumpId);
19493 :
19494 84 : destroyPQExpBuffer(query);
19495 84 : destroyPQExpBuffer(delqry);
19496 84 : free(qevtname);
19497 : }
19498 :
19499 : /*
19500 : * dumpRule
19501 : * Dump a rule
19502 : */
19503 : static void
19504 2452 : dumpRule(Archive *fout, const RuleInfo *rinfo)
19505 : {
19506 2452 : DumpOptions *dopt = fout->dopt;
19507 2452 : TableInfo *tbinfo = rinfo->ruletable;
19508 : bool is_view;
19509 : PQExpBuffer query;
19510 : PQExpBuffer cmd;
19511 : PQExpBuffer delcmd;
19512 : PQExpBuffer ruleprefix;
19513 : char *qtabname;
19514 : PGresult *res;
19515 : char *tag;
19516 :
19517 : /* Do nothing if not dumping schema */
19518 2452 : if (!dopt->dumpSchema)
19519 132 : return;
19520 :
19521 : /*
19522 : * If it is an ON SELECT rule that is created implicitly by CREATE VIEW,
19523 : * we do not want to dump it as a separate object.
19524 : */
19525 2320 : if (!rinfo->separate)
19526 1898 : return;
19527 :
19528 : /*
19529 : * If it's an ON SELECT rule, we want to print it as a view definition,
19530 : * instead of a rule.
19531 : */
19532 422 : is_view = (rinfo->ev_type == '1' && rinfo->is_instead);
19533 :
19534 422 : query = createPQExpBuffer();
19535 422 : cmd = createPQExpBuffer();
19536 422 : delcmd = createPQExpBuffer();
19537 422 : ruleprefix = createPQExpBuffer();
19538 :
19539 422 : qtabname = pg_strdup(fmtId(tbinfo->dobj.name));
19540 :
19541 422 : if (is_view)
19542 : {
19543 : PQExpBuffer result;
19544 :
19545 : /*
19546 : * We need OR REPLACE here because we'll be replacing a dummy view.
19547 : * Otherwise this should look largely like the regular view dump code.
19548 : */
19549 20 : appendPQExpBuffer(cmd, "CREATE OR REPLACE VIEW %s",
19550 20 : fmtQualifiedDumpable(tbinfo));
19551 20 : if (nonemptyReloptions(tbinfo->reloptions))
19552 : {
19553 0 : appendPQExpBufferStr(cmd, " WITH (");
19554 0 : appendReloptionsArrayAH(cmd, tbinfo->reloptions, "", fout);
19555 0 : appendPQExpBufferChar(cmd, ')');
19556 : }
19557 20 : result = createViewAsClause(fout, tbinfo);
19558 20 : appendPQExpBuffer(cmd, " AS\n%s", result->data);
19559 20 : destroyPQExpBuffer(result);
19560 20 : if (tbinfo->checkoption != NULL)
19561 0 : appendPQExpBuffer(cmd, "\n WITH %s CHECK OPTION",
19562 : tbinfo->checkoption);
19563 20 : appendPQExpBufferStr(cmd, ";\n");
19564 : }
19565 : else
19566 : {
19567 : /* In the rule case, just print pg_get_ruledef's result verbatim */
19568 402 : appendPQExpBuffer(query,
19569 : "SELECT pg_catalog.pg_get_ruledef('%u'::pg_catalog.oid)",
19570 402 : rinfo->dobj.catId.oid);
19571 :
19572 402 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
19573 :
19574 402 : if (PQntuples(res) != 1)
19575 0 : pg_fatal("query to get rule \"%s\" for table \"%s\" failed: wrong number of rows returned",
19576 : rinfo->dobj.name, tbinfo->dobj.name);
19577 :
19578 402 : printfPQExpBuffer(cmd, "%s\n", PQgetvalue(res, 0, 0));
19579 :
19580 402 : PQclear(res);
19581 : }
19582 :
19583 : /*
19584 : * Add the command to alter the rules replication firing semantics if it
19585 : * differs from the default.
19586 : */
19587 422 : if (rinfo->ev_enabled != 'O')
19588 : {
19589 30 : appendPQExpBuffer(cmd, "ALTER TABLE %s ", fmtQualifiedDumpable(tbinfo));
19590 30 : switch (rinfo->ev_enabled)
19591 : {
19592 0 : case 'A':
19593 0 : appendPQExpBuffer(cmd, "ENABLE ALWAYS RULE %s;\n",
19594 0 : fmtId(rinfo->dobj.name));
19595 0 : break;
19596 0 : case 'R':
19597 0 : appendPQExpBuffer(cmd, "ENABLE REPLICA RULE %s;\n",
19598 0 : fmtId(rinfo->dobj.name));
19599 0 : break;
19600 30 : case 'D':
19601 30 : appendPQExpBuffer(cmd, "DISABLE RULE %s;\n",
19602 30 : fmtId(rinfo->dobj.name));
19603 30 : break;
19604 : }
19605 : }
19606 :
19607 422 : if (is_view)
19608 : {
19609 : /*
19610 : * We can't DROP a view's ON SELECT rule. Instead, use CREATE OR
19611 : * REPLACE VIEW to replace the rule with something with minimal
19612 : * dependencies.
19613 : */
19614 : PQExpBuffer result;
19615 :
19616 20 : appendPQExpBuffer(delcmd, "CREATE OR REPLACE VIEW %s",
19617 20 : fmtQualifiedDumpable(tbinfo));
19618 20 : result = createDummyViewAsClause(fout, tbinfo);
19619 20 : appendPQExpBuffer(delcmd, " AS\n%s;\n", result->data);
19620 20 : destroyPQExpBuffer(result);
19621 : }
19622 : else
19623 : {
19624 402 : appendPQExpBuffer(delcmd, "DROP RULE %s ",
19625 402 : fmtId(rinfo->dobj.name));
19626 402 : appendPQExpBuffer(delcmd, "ON %s;\n",
19627 402 : fmtQualifiedDumpable(tbinfo));
19628 : }
19629 :
19630 422 : appendPQExpBuffer(ruleprefix, "RULE %s ON",
19631 422 : fmtId(rinfo->dobj.name));
19632 :
19633 422 : tag = psprintf("%s %s", tbinfo->dobj.name, rinfo->dobj.name);
19634 :
19635 422 : if (rinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
19636 422 : ArchiveEntry(fout, rinfo->dobj.catId, rinfo->dobj.dumpId,
19637 422 : ARCHIVE_OPTS(.tag = tag,
19638 : .namespace = tbinfo->dobj.namespace->dobj.name,
19639 : .owner = tbinfo->rolname,
19640 : .description = "RULE",
19641 : .section = SECTION_POST_DATA,
19642 : .createStmt = cmd->data,
19643 : .dropStmt = delcmd->data));
19644 :
19645 : /* Dump rule comments */
19646 422 : if (rinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
19647 0 : dumpComment(fout, ruleprefix->data, qtabname,
19648 0 : tbinfo->dobj.namespace->dobj.name,
19649 : tbinfo->rolname,
19650 0 : rinfo->dobj.catId, 0, rinfo->dobj.dumpId);
19651 :
19652 422 : free(tag);
19653 422 : destroyPQExpBuffer(query);
19654 422 : destroyPQExpBuffer(cmd);
19655 422 : destroyPQExpBuffer(delcmd);
19656 422 : destroyPQExpBuffer(ruleprefix);
19657 422 : free(qtabname);
19658 : }
19659 :
19660 : /*
19661 : * getExtensionMembership --- obtain extension membership data
19662 : *
19663 : * We need to identify objects that are extension members as soon as they're
19664 : * loaded, so that we can correctly determine whether they need to be dumped.
19665 : * Generally speaking, extension member objects will get marked as *not* to
19666 : * be dumped, as they will be recreated by the single CREATE EXTENSION
19667 : * command. However, in binary upgrade mode we still need to dump the members
19668 : * individually.
19669 : */
19670 : void
19671 372 : getExtensionMembership(Archive *fout, ExtensionInfo extinfo[],
19672 : int numExtensions)
19673 : {
19674 : PQExpBuffer query;
19675 : PGresult *res;
19676 : int ntups,
19677 : i;
19678 : int i_classid,
19679 : i_objid,
19680 : i_refobjid;
19681 : ExtensionInfo *ext;
19682 :
19683 : /* Nothing to do if no extensions */
19684 372 : if (numExtensions == 0)
19685 0 : return;
19686 :
19687 372 : query = createPQExpBuffer();
19688 :
19689 : /* refclassid constraint is redundant but may speed the search */
19690 372 : appendPQExpBufferStr(query, "SELECT "
19691 : "classid, objid, refobjid "
19692 : "FROM pg_depend "
19693 : "WHERE refclassid = 'pg_extension'::regclass "
19694 : "AND deptype = 'e' "
19695 : "ORDER BY 3");
19696 :
19697 372 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
19698 :
19699 372 : ntups = PQntuples(res);
19700 :
19701 372 : i_classid = PQfnumber(res, "classid");
19702 372 : i_objid = PQfnumber(res, "objid");
19703 372 : i_refobjid = PQfnumber(res, "refobjid");
19704 :
19705 : /*
19706 : * Since we ordered the SELECT by referenced ID, we can expect that
19707 : * multiple entries for the same extension will appear together; this
19708 : * saves on searches.
19709 : */
19710 372 : ext = NULL;
19711 :
19712 3070 : for (i = 0; i < ntups; i++)
19713 : {
19714 : CatalogId objId;
19715 : Oid extId;
19716 :
19717 2698 : objId.tableoid = atooid(PQgetvalue(res, i, i_classid));
19718 2698 : objId.oid = atooid(PQgetvalue(res, i, i_objid));
19719 2698 : extId = atooid(PQgetvalue(res, i, i_refobjid));
19720 :
19721 2698 : if (ext == NULL ||
19722 2326 : ext->dobj.catId.oid != extId)
19723 432 : ext = findExtensionByOid(extId);
19724 :
19725 2698 : if (ext == NULL)
19726 : {
19727 : /* shouldn't happen */
19728 0 : pg_log_warning("could not find referenced extension %u", extId);
19729 0 : continue;
19730 : }
19731 :
19732 2698 : recordExtensionMembership(objId, ext);
19733 : }
19734 :
19735 372 : PQclear(res);
19736 :
19737 372 : destroyPQExpBuffer(query);
19738 : }
19739 :
19740 : /*
19741 : * processExtensionTables --- deal with extension configuration tables
19742 : *
19743 : * There are two parts to this process:
19744 : *
19745 : * 1. Identify and create dump records for extension configuration tables.
19746 : *
19747 : * Extensions can mark tables as "configuration", which means that the user
19748 : * is able and expected to modify those tables after the extension has been
19749 : * loaded. For these tables, we dump out only the data- the structure is
19750 : * expected to be handled at CREATE EXTENSION time, including any indexes or
19751 : * foreign keys, which brings us to-
19752 : *
19753 : * 2. Record FK dependencies between configuration tables.
19754 : *
19755 : * Due to the FKs being created at CREATE EXTENSION time and therefore before
19756 : * the data is loaded, we have to work out what the best order for reloading
19757 : * the data is, to avoid FK violations when the tables are restored. This is
19758 : * not perfect- we can't handle circular dependencies and if any exist they
19759 : * will cause an invalid dump to be produced (though at least all of the data
19760 : * is included for a user to manually restore). This is currently documented
19761 : * but perhaps we can provide a better solution in the future.
19762 : */
19763 : void
19764 370 : processExtensionTables(Archive *fout, ExtensionInfo extinfo[],
19765 : int numExtensions)
19766 : {
19767 370 : DumpOptions *dopt = fout->dopt;
19768 : PQExpBuffer query;
19769 : PGresult *res;
19770 : int ntups,
19771 : i;
19772 : int i_conrelid,
19773 : i_confrelid;
19774 :
19775 : /* Nothing to do if no extensions */
19776 370 : if (numExtensions == 0)
19777 0 : return;
19778 :
19779 : /*
19780 : * Identify extension configuration tables and create TableDataInfo
19781 : * objects for them, ensuring their data will be dumped even though the
19782 : * tables themselves won't be.
19783 : *
19784 : * Note that we create TableDataInfo objects even in schema-only mode, ie,
19785 : * user data in a configuration table is treated like schema data. This
19786 : * seems appropriate since system data in a config table would get
19787 : * reloaded by CREATE EXTENSION. If the extension is not listed in the
19788 : * list of extensions to be included, none of its data is dumped.
19789 : */
19790 800 : for (i = 0; i < numExtensions; i++)
19791 : {
19792 430 : ExtensionInfo *curext = &(extinfo[i]);
19793 430 : char *extconfig = curext->extconfig;
19794 430 : char *extcondition = curext->extcondition;
19795 430 : char **extconfigarray = NULL;
19796 430 : char **extconditionarray = NULL;
19797 430 : int nconfigitems = 0;
19798 430 : int nconditionitems = 0;
19799 :
19800 : /*
19801 : * Check if this extension is listed as to include in the dump. If
19802 : * not, any table data associated with it is discarded.
19803 : */
19804 430 : if (extension_include_oids.head != NULL &&
19805 16 : !simple_oid_list_member(&extension_include_oids,
19806 : curext->dobj.catId.oid))
19807 12 : continue;
19808 :
19809 : /*
19810 : * Check if this extension is listed as to exclude in the dump. If
19811 : * yes, any table data associated with it is discarded.
19812 : */
19813 430 : if (extension_exclude_oids.head != NULL &&
19814 8 : simple_oid_list_member(&extension_exclude_oids,
19815 : curext->dobj.catId.oid))
19816 4 : continue;
19817 :
19818 418 : if (strlen(extconfig) != 0 || strlen(extcondition) != 0)
19819 : {
19820 : int j;
19821 :
19822 40 : if (!parsePGArray(extconfig, &extconfigarray, &nconfigitems))
19823 0 : pg_fatal("could not parse %s array", "extconfig");
19824 40 : if (!parsePGArray(extcondition, &extconditionarray, &nconditionitems))
19825 0 : pg_fatal("could not parse %s array", "extcondition");
19826 40 : if (nconfigitems != nconditionitems)
19827 0 : pg_fatal("mismatched number of configurations and conditions for extension");
19828 :
19829 120 : for (j = 0; j < nconfigitems; j++)
19830 : {
19831 : TableInfo *configtbl;
19832 80 : Oid configtbloid = atooid(extconfigarray[j]);
19833 80 : bool dumpobj =
19834 80 : curext->dobj.dump & DUMP_COMPONENT_DEFINITION;
19835 :
19836 80 : configtbl = findTableByOid(configtbloid);
19837 80 : if (configtbl == NULL)
19838 0 : continue;
19839 :
19840 : /*
19841 : * Tables of not-to-be-dumped extensions shouldn't be dumped
19842 : * unless the table or its schema is explicitly included
19843 : */
19844 80 : if (!(curext->dobj.dump & DUMP_COMPONENT_DEFINITION))
19845 : {
19846 : /* check table explicitly requested */
19847 4 : if (table_include_oids.head != NULL &&
19848 0 : simple_oid_list_member(&table_include_oids,
19849 : configtbloid))
19850 0 : dumpobj = true;
19851 :
19852 : /* check table's schema explicitly requested */
19853 4 : if (configtbl->dobj.namespace->dobj.dump &
19854 : DUMP_COMPONENT_DATA)
19855 4 : dumpobj = true;
19856 : }
19857 :
19858 : /* check table excluded by an exclusion switch */
19859 88 : if (table_exclude_oids.head != NULL &&
19860 8 : simple_oid_list_member(&table_exclude_oids,
19861 : configtbloid))
19862 2 : dumpobj = false;
19863 :
19864 : /* check schema excluded by an exclusion switch */
19865 80 : if (simple_oid_list_member(&schema_exclude_oids,
19866 80 : configtbl->dobj.namespace->dobj.catId.oid))
19867 0 : dumpobj = false;
19868 :
19869 80 : if (dumpobj)
19870 : {
19871 78 : makeTableDataInfo(dopt, configtbl);
19872 78 : if (configtbl->dataObj != NULL)
19873 : {
19874 78 : if (strlen(extconditionarray[j]) > 0)
19875 0 : configtbl->dataObj->filtercond = pg_strdup(extconditionarray[j]);
19876 : }
19877 : }
19878 : }
19879 : }
19880 418 : if (extconfigarray)
19881 40 : free(extconfigarray);
19882 418 : if (extconditionarray)
19883 40 : free(extconditionarray);
19884 : }
19885 :
19886 : /*
19887 : * Now that all the TableDataInfo objects have been created for all the
19888 : * extensions, check their FK dependencies and register them to try and
19889 : * dump the data out in an order that they can be restored in.
19890 : *
19891 : * Note that this is not a problem for user tables as their FKs are
19892 : * recreated after the data has been loaded.
19893 : */
19894 :
19895 370 : query = createPQExpBuffer();
19896 :
19897 370 : printfPQExpBuffer(query,
19898 : "SELECT conrelid, confrelid "
19899 : "FROM pg_constraint "
19900 : "JOIN pg_depend ON (objid = confrelid) "
19901 : "WHERE contype = 'f' "
19902 : "AND refclassid = 'pg_extension'::regclass "
19903 : "AND classid = 'pg_class'::regclass;");
19904 :
19905 370 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
19906 370 : ntups = PQntuples(res);
19907 :
19908 370 : i_conrelid = PQfnumber(res, "conrelid");
19909 370 : i_confrelid = PQfnumber(res, "confrelid");
19910 :
19911 : /* Now get the dependencies and register them */
19912 370 : for (i = 0; i < ntups; i++)
19913 : {
19914 : Oid conrelid,
19915 : confrelid;
19916 : TableInfo *reftable,
19917 : *contable;
19918 :
19919 0 : conrelid = atooid(PQgetvalue(res, i, i_conrelid));
19920 0 : confrelid = atooid(PQgetvalue(res, i, i_confrelid));
19921 0 : contable = findTableByOid(conrelid);
19922 0 : reftable = findTableByOid(confrelid);
19923 :
19924 0 : if (reftable == NULL ||
19925 0 : reftable->dataObj == NULL ||
19926 0 : contable == NULL ||
19927 0 : contable->dataObj == NULL)
19928 0 : continue;
19929 :
19930 : /*
19931 : * Make referencing TABLE_DATA object depend on the referenced table's
19932 : * TABLE_DATA object.
19933 : */
19934 0 : addObjectDependency(&contable->dataObj->dobj,
19935 0 : reftable->dataObj->dobj.dumpId);
19936 : }
19937 370 : PQclear(res);
19938 370 : destroyPQExpBuffer(query);
19939 : }
19940 :
19941 : /*
19942 : * getDependencies --- obtain available dependency data
19943 : */
19944 : static void
19945 370 : getDependencies(Archive *fout)
19946 : {
19947 : PQExpBuffer query;
19948 : PGresult *res;
19949 : int ntups,
19950 : i;
19951 : int i_classid,
19952 : i_objid,
19953 : i_refclassid,
19954 : i_refobjid,
19955 : i_deptype;
19956 : DumpableObject *dobj,
19957 : *refdobj;
19958 :
19959 370 : pg_log_info("reading dependency data");
19960 :
19961 370 : query = createPQExpBuffer();
19962 :
19963 : /*
19964 : * Messy query to collect the dependency data we need. Note that we
19965 : * ignore the sub-object column, so that dependencies of or on a column
19966 : * look the same as dependencies of or on a whole table.
19967 : *
19968 : * PIN dependencies aren't interesting, and EXTENSION dependencies were
19969 : * already processed by getExtensionMembership.
19970 : */
19971 370 : appendPQExpBufferStr(query, "SELECT "
19972 : "classid, objid, refclassid, refobjid, deptype "
19973 : "FROM pg_depend "
19974 : "WHERE deptype != 'p' AND deptype != 'e'\n");
19975 :
19976 : /*
19977 : * Since we don't treat pg_amop entries as separate DumpableObjects, we
19978 : * have to translate their dependencies into dependencies of their parent
19979 : * opfamily. Ignore internal dependencies though, as those will point to
19980 : * their parent opclass, which we needn't consider here (and if we did,
19981 : * it'd just result in circular dependencies). Also, "loose" opfamily
19982 : * entries will have dependencies on their parent opfamily, which we
19983 : * should drop since they'd likewise become useless self-dependencies.
19984 : * (But be sure to keep deps on *other* opfamilies; see amopsortfamily.)
19985 : */
19986 370 : appendPQExpBufferStr(query, "UNION ALL\n"
19987 : "SELECT 'pg_opfamily'::regclass AS classid, amopfamily AS objid, refclassid, refobjid, deptype "
19988 : "FROM pg_depend d, pg_amop o "
19989 : "WHERE deptype NOT IN ('p', 'e', 'i') AND "
19990 : "classid = 'pg_amop'::regclass AND objid = o.oid "
19991 : "AND NOT (refclassid = 'pg_opfamily'::regclass AND amopfamily = refobjid)\n");
19992 :
19993 : /* Likewise for pg_amproc entries */
19994 370 : appendPQExpBufferStr(query, "UNION ALL\n"
19995 : "SELECT 'pg_opfamily'::regclass AS classid, amprocfamily AS objid, refclassid, refobjid, deptype "
19996 : "FROM pg_depend d, pg_amproc p "
19997 : "WHERE deptype NOT IN ('p', 'e', 'i') AND "
19998 : "classid = 'pg_amproc'::regclass AND objid = p.oid "
19999 : "AND NOT (refclassid = 'pg_opfamily'::regclass AND amprocfamily = refobjid)\n");
20000 :
20001 : /* Sort the output for efficiency below */
20002 370 : appendPQExpBufferStr(query, "ORDER BY 1,2");
20003 :
20004 370 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
20005 :
20006 370 : ntups = PQntuples(res);
20007 :
20008 370 : i_classid = PQfnumber(res, "classid");
20009 370 : i_objid = PQfnumber(res, "objid");
20010 370 : i_refclassid = PQfnumber(res, "refclassid");
20011 370 : i_refobjid = PQfnumber(res, "refobjid");
20012 370 : i_deptype = PQfnumber(res, "deptype");
20013 :
20014 : /*
20015 : * Since we ordered the SELECT by referencing ID, we can expect that
20016 : * multiple entries for the same object will appear together; this saves
20017 : * on searches.
20018 : */
20019 370 : dobj = NULL;
20020 :
20021 809090 : for (i = 0; i < ntups; i++)
20022 : {
20023 : CatalogId objId;
20024 : CatalogId refobjId;
20025 : char deptype;
20026 :
20027 808720 : objId.tableoid = atooid(PQgetvalue(res, i, i_classid));
20028 808720 : objId.oid = atooid(PQgetvalue(res, i, i_objid));
20029 808720 : refobjId.tableoid = atooid(PQgetvalue(res, i, i_refclassid));
20030 808720 : refobjId.oid = atooid(PQgetvalue(res, i, i_refobjid));
20031 808720 : deptype = *(PQgetvalue(res, i, i_deptype));
20032 :
20033 808720 : if (dobj == NULL ||
20034 757362 : dobj->catId.tableoid != objId.tableoid ||
20035 753048 : dobj->catId.oid != objId.oid)
20036 356302 : dobj = findObjectByCatalogId(objId);
20037 :
20038 : /*
20039 : * Failure to find objects mentioned in pg_depend is not unexpected,
20040 : * since for example we don't collect info about TOAST tables.
20041 : */
20042 808720 : if (dobj == NULL)
20043 : {
20044 : #ifdef NOT_USED
20045 : pg_log_warning("no referencing object %u %u",
20046 : objId.tableoid, objId.oid);
20047 : #endif
20048 52686 : continue;
20049 : }
20050 :
20051 757732 : refdobj = findObjectByCatalogId(refobjId);
20052 :
20053 757732 : if (refdobj == NULL)
20054 : {
20055 : #ifdef NOT_USED
20056 : pg_log_warning("no referenced object %u %u",
20057 : refobjId.tableoid, refobjId.oid);
20058 : #endif
20059 1698 : continue;
20060 : }
20061 :
20062 : /*
20063 : * For 'x' dependencies, mark the object for later; we still add the
20064 : * normal dependency, for possible ordering purposes. Currently
20065 : * pg_dump_sort.c knows to put extensions ahead of all object types
20066 : * that could possibly depend on them, but this is safer.
20067 : */
20068 756034 : if (deptype == 'x')
20069 88 : dobj->depends_on_ext = true;
20070 :
20071 : /*
20072 : * Ordinarily, table rowtypes have implicit dependencies on their
20073 : * tables. However, for a composite type the implicit dependency goes
20074 : * the other way in pg_depend; which is the right thing for DROP but
20075 : * it doesn't produce the dependency ordering we need. So in that one
20076 : * case, we reverse the direction of the dependency.
20077 : */
20078 756034 : if (deptype == 'i' &&
20079 212960 : dobj->objType == DO_TABLE &&
20080 2560 : refdobj->objType == DO_TYPE)
20081 376 : addObjectDependency(refdobj, dobj->dumpId);
20082 : else
20083 : /* normal case */
20084 755658 : addObjectDependency(dobj, refdobj->dumpId);
20085 : }
20086 :
20087 370 : PQclear(res);
20088 :
20089 370 : destroyPQExpBuffer(query);
20090 370 : }
20091 :
20092 :
20093 : /*
20094 : * createBoundaryObjects - create dummy DumpableObjects to represent
20095 : * dump section boundaries.
20096 : */
20097 : static DumpableObject *
20098 370 : createBoundaryObjects(void)
20099 : {
20100 : DumpableObject *dobjs;
20101 :
20102 370 : dobjs = (DumpableObject *) pg_malloc(2 * sizeof(DumpableObject));
20103 :
20104 370 : dobjs[0].objType = DO_PRE_DATA_BOUNDARY;
20105 370 : dobjs[0].catId = nilCatalogId;
20106 370 : AssignDumpId(dobjs + 0);
20107 370 : dobjs[0].name = pg_strdup("PRE-DATA BOUNDARY");
20108 :
20109 370 : dobjs[1].objType = DO_POST_DATA_BOUNDARY;
20110 370 : dobjs[1].catId = nilCatalogId;
20111 370 : AssignDumpId(dobjs + 1);
20112 370 : dobjs[1].name = pg_strdup("POST-DATA BOUNDARY");
20113 :
20114 370 : return dobjs;
20115 : }
20116 :
20117 : /*
20118 : * addBoundaryDependencies - add dependencies as needed to enforce the dump
20119 : * section boundaries.
20120 : */
20121 : static void
20122 370 : addBoundaryDependencies(DumpableObject **dobjs, int numObjs,
20123 : DumpableObject *boundaryObjs)
20124 : {
20125 370 : DumpableObject *preDataBound = boundaryObjs + 0;
20126 370 : DumpableObject *postDataBound = boundaryObjs + 1;
20127 : int i;
20128 :
20129 1380772 : for (i = 0; i < numObjs; i++)
20130 : {
20131 1380402 : DumpableObject *dobj = dobjs[i];
20132 :
20133 : /*
20134 : * The classification of object types here must match the SECTION_xxx
20135 : * values assigned during subsequent ArchiveEntry calls!
20136 : */
20137 1380402 : switch (dobj->objType)
20138 : {
20139 1286504 : case DO_NAMESPACE:
20140 : case DO_EXTENSION:
20141 : case DO_TYPE:
20142 : case DO_SHELL_TYPE:
20143 : case DO_FUNC:
20144 : case DO_AGG:
20145 : case DO_OPERATOR:
20146 : case DO_ACCESS_METHOD:
20147 : case DO_OPCLASS:
20148 : case DO_OPFAMILY:
20149 : case DO_COLLATION:
20150 : case DO_CONVERSION:
20151 : case DO_TABLE:
20152 : case DO_TABLE_ATTACH:
20153 : case DO_ATTRDEF:
20154 : case DO_PROCLANG:
20155 : case DO_CAST:
20156 : case DO_DUMMY_TYPE:
20157 : case DO_TSPARSER:
20158 : case DO_TSDICT:
20159 : case DO_TSTEMPLATE:
20160 : case DO_TSCONFIG:
20161 : case DO_FDW:
20162 : case DO_FOREIGN_SERVER:
20163 : case DO_TRANSFORM:
20164 : /* Pre-data objects: must come before the pre-data boundary */
20165 1286504 : addObjectDependency(preDataBound, dobj->dumpId);
20166 1286504 : break;
20167 10194 : case DO_TABLE_DATA:
20168 : case DO_SEQUENCE_SET:
20169 : case DO_LARGE_OBJECT:
20170 : case DO_LARGE_OBJECT_DATA:
20171 : /* Data objects: must come between the boundaries */
20172 10194 : addObjectDependency(dobj, preDataBound->dumpId);
20173 10194 : addObjectDependency(postDataBound, dobj->dumpId);
20174 10194 : break;
20175 12378 : case DO_INDEX:
20176 : case DO_INDEX_ATTACH:
20177 : case DO_STATSEXT:
20178 : case DO_REFRESH_MATVIEW:
20179 : case DO_TRIGGER:
20180 : case DO_EVENT_TRIGGER:
20181 : case DO_DEFAULT_ACL:
20182 : case DO_POLICY:
20183 : case DO_PUBLICATION:
20184 : case DO_PUBLICATION_REL:
20185 : case DO_PUBLICATION_TABLE_IN_SCHEMA:
20186 : case DO_SUBSCRIPTION:
20187 : case DO_SUBSCRIPTION_REL:
20188 : /* Post-data objects: must come after the post-data boundary */
20189 12378 : addObjectDependency(dobj, postDataBound->dumpId);
20190 12378 : break;
20191 57624 : case DO_RULE:
20192 : /* Rules are post-data, but only if dumped separately */
20193 57624 : if (((RuleInfo *) dobj)->separate)
20194 1286 : addObjectDependency(dobj, postDataBound->dumpId);
20195 57624 : break;
20196 5378 : case DO_CONSTRAINT:
20197 : case DO_FK_CONSTRAINT:
20198 : /* Constraints are post-data, but only if dumped separately */
20199 5378 : if (((ConstraintInfo *) dobj)->separate)
20200 3798 : addObjectDependency(dobj, postDataBound->dumpId);
20201 5378 : break;
20202 370 : case DO_PRE_DATA_BOUNDARY:
20203 : /* nothing to do */
20204 370 : break;
20205 370 : case DO_POST_DATA_BOUNDARY:
20206 : /* must come after the pre-data boundary */
20207 370 : addObjectDependency(dobj, preDataBound->dumpId);
20208 370 : break;
20209 7584 : case DO_REL_STATS:
20210 : /* stats section varies by parent object type, DATA or POST */
20211 7584 : if (((RelStatsInfo *) dobj)->section == SECTION_DATA)
20212 : {
20213 4844 : addObjectDependency(dobj, preDataBound->dumpId);
20214 4844 : addObjectDependency(postDataBound, dobj->dumpId);
20215 : }
20216 : else
20217 2740 : addObjectDependency(dobj, postDataBound->dumpId);
20218 7584 : break;
20219 : }
20220 : }
20221 370 : }
20222 :
20223 :
20224 : /*
20225 : * BuildArchiveDependencies - create dependency data for archive TOC entries
20226 : *
20227 : * The raw dependency data obtained by getDependencies() is not terribly
20228 : * useful in an archive dump, because in many cases there are dependency
20229 : * chains linking through objects that don't appear explicitly in the dump.
20230 : * For example, a view will depend on its _RETURN rule while the _RETURN rule
20231 : * will depend on other objects --- but the rule will not appear as a separate
20232 : * object in the dump. We need to adjust the view's dependencies to include
20233 : * whatever the rule depends on that is included in the dump.
20234 : *
20235 : * Just to make things more complicated, there are also "special" dependencies
20236 : * such as the dependency of a TABLE DATA item on its TABLE, which we must
20237 : * not rearrange because pg_restore knows that TABLE DATA only depends on
20238 : * its table. In these cases we must leave the dependencies strictly as-is
20239 : * even if they refer to not-to-be-dumped objects.
20240 : *
20241 : * To handle this, the convention is that "special" dependencies are created
20242 : * during ArchiveEntry calls, and an archive TOC item that has any such
20243 : * entries will not be touched here. Otherwise, we recursively search the
20244 : * DumpableObject data structures to build the correct dependencies for each
20245 : * archive TOC item.
20246 : */
20247 : static void
20248 110 : BuildArchiveDependencies(Archive *fout)
20249 : {
20250 110 : ArchiveHandle *AH = (ArchiveHandle *) fout;
20251 : TocEntry *te;
20252 :
20253 : /* Scan all TOC entries in the archive */
20254 16266 : for (te = AH->toc->next; te != AH->toc; te = te->next)
20255 : {
20256 : DumpableObject *dobj;
20257 : DumpId *dependencies;
20258 : int nDeps;
20259 : int allocDeps;
20260 :
20261 : /* No need to process entries that will not be dumped */
20262 16156 : if (te->reqs == 0)
20263 8084 : continue;
20264 : /* Ignore entries that already have "special" dependencies */
20265 16130 : if (te->nDeps > 0)
20266 7130 : continue;
20267 : /* Otherwise, look up the item's original DumpableObject, if any */
20268 9000 : dobj = findObjectByDumpId(te->dumpId);
20269 9000 : if (dobj == NULL)
20270 672 : continue;
20271 : /* No work if it has no dependencies */
20272 8328 : if (dobj->nDeps <= 0)
20273 256 : continue;
20274 : /* Set up work array */
20275 8072 : allocDeps = 64;
20276 8072 : dependencies = (DumpId *) pg_malloc(allocDeps * sizeof(DumpId));
20277 8072 : nDeps = 0;
20278 : /* Recursively find all dumpable dependencies */
20279 8072 : findDumpableDependencies(AH, dobj,
20280 : &dependencies, &nDeps, &allocDeps);
20281 : /* And save 'em ... */
20282 8072 : if (nDeps > 0)
20283 : {
20284 6278 : dependencies = (DumpId *) pg_realloc(dependencies,
20285 : nDeps * sizeof(DumpId));
20286 6278 : te->dependencies = dependencies;
20287 6278 : te->nDeps = nDeps;
20288 : }
20289 : else
20290 1794 : free(dependencies);
20291 : }
20292 110 : }
20293 :
20294 : /* Recursive search subroutine for BuildArchiveDependencies */
20295 : static void
20296 18938 : findDumpableDependencies(ArchiveHandle *AH, const DumpableObject *dobj,
20297 : DumpId **dependencies, int *nDeps, int *allocDeps)
20298 : {
20299 : int i;
20300 :
20301 : /*
20302 : * Ignore section boundary objects: if we search through them, we'll
20303 : * report lots of bogus dependencies.
20304 : */
20305 18938 : if (dobj->objType == DO_PRE_DATA_BOUNDARY ||
20306 18898 : dobj->objType == DO_POST_DATA_BOUNDARY)
20307 3432 : return;
20308 :
20309 39570 : for (i = 0; i < dobj->nDeps; i++)
20310 : {
20311 24064 : DumpId depid = dobj->dependencies[i];
20312 :
20313 24064 : if (TocIDRequired(AH, depid) != 0)
20314 : {
20315 : /* Object will be dumped, so just reference it as a dependency */
20316 13198 : if (*nDeps >= *allocDeps)
20317 : {
20318 0 : *allocDeps *= 2;
20319 0 : *dependencies = (DumpId *) pg_realloc(*dependencies,
20320 0 : *allocDeps * sizeof(DumpId));
20321 : }
20322 13198 : (*dependencies)[*nDeps] = depid;
20323 13198 : (*nDeps)++;
20324 : }
20325 : else
20326 : {
20327 : /*
20328 : * Object will not be dumped, so recursively consider its deps. We
20329 : * rely on the assumption that sortDumpableObjects already broke
20330 : * any dependency loops, else we might recurse infinitely.
20331 : */
20332 10866 : DumpableObject *otherdobj = findObjectByDumpId(depid);
20333 :
20334 10866 : if (otherdobj)
20335 10866 : findDumpableDependencies(AH, otherdobj,
20336 : dependencies, nDeps, allocDeps);
20337 : }
20338 : }
20339 : }
20340 :
20341 :
20342 : /*
20343 : * getFormattedTypeName - retrieve a nicely-formatted type name for the
20344 : * given type OID.
20345 : *
20346 : * This does not guarantee to schema-qualify the output, so it should not
20347 : * be used to create the target object name for CREATE or ALTER commands.
20348 : *
20349 : * Note that the result is cached and must not be freed by the caller.
20350 : */
20351 : static const char *
20352 4744 : getFormattedTypeName(Archive *fout, Oid oid, OidOptions opts)
20353 : {
20354 : TypeInfo *typeInfo;
20355 : char *result;
20356 : PQExpBuffer query;
20357 : PGresult *res;
20358 :
20359 4744 : if (oid == 0)
20360 : {
20361 0 : if ((opts & zeroAsStar) != 0)
20362 0 : return "*";
20363 0 : else if ((opts & zeroAsNone) != 0)
20364 0 : return "NONE";
20365 : }
20366 :
20367 : /* see if we have the result cached in the type's TypeInfo record */
20368 4744 : typeInfo = findTypeByOid(oid);
20369 4744 : if (typeInfo && typeInfo->ftypname)
20370 3726 : return typeInfo->ftypname;
20371 :
20372 1018 : query = createPQExpBuffer();
20373 1018 : appendPQExpBuffer(query, "SELECT pg_catalog.format_type('%u'::pg_catalog.oid, NULL)",
20374 : oid);
20375 :
20376 1018 : res = ExecuteSqlQueryForSingleRow(fout, query->data);
20377 :
20378 : /* result of format_type is already quoted */
20379 1018 : result = pg_strdup(PQgetvalue(res, 0, 0));
20380 :
20381 1018 : PQclear(res);
20382 1018 : destroyPQExpBuffer(query);
20383 :
20384 : /*
20385 : * Cache the result for re-use in later requests, if possible. If we
20386 : * don't have a TypeInfo for the type, the string will be leaked once the
20387 : * caller is done with it ... but that case really should not happen, so
20388 : * leaking if it does seems acceptable.
20389 : */
20390 1018 : if (typeInfo)
20391 1018 : typeInfo->ftypname = result;
20392 :
20393 1018 : return result;
20394 : }
20395 :
20396 : /*
20397 : * Return a column list clause for the given relation.
20398 : *
20399 : * Special case: if there are no undropped columns in the relation, return
20400 : * "", not an invalid "()" column list.
20401 : */
20402 : static const char *
20403 17476 : fmtCopyColumnList(const TableInfo *ti, PQExpBuffer buffer)
20404 : {
20405 17476 : int numatts = ti->numatts;
20406 17476 : char **attnames = ti->attnames;
20407 17476 : bool *attisdropped = ti->attisdropped;
20408 17476 : char *attgenerated = ti->attgenerated;
20409 : bool needComma;
20410 : int i;
20411 :
20412 17476 : appendPQExpBufferChar(buffer, '(');
20413 17476 : needComma = false;
20414 83232 : for (i = 0; i < numatts; i++)
20415 : {
20416 65756 : if (attisdropped[i])
20417 1220 : continue;
20418 64536 : if (attgenerated[i])
20419 2400 : continue;
20420 62136 : if (needComma)
20421 45180 : appendPQExpBufferStr(buffer, ", ");
20422 62136 : appendPQExpBufferStr(buffer, fmtId(attnames[i]));
20423 62136 : needComma = true;
20424 : }
20425 :
20426 17476 : if (!needComma)
20427 520 : return ""; /* no undropped columns */
20428 :
20429 16956 : appendPQExpBufferChar(buffer, ')');
20430 16956 : return buffer->data;
20431 : }
20432 :
20433 : /*
20434 : * Check if a reloptions array is nonempty.
20435 : */
20436 : static bool
20437 28582 : nonemptyReloptions(const char *reloptions)
20438 : {
20439 : /* Don't want to print it if it's just "{}" */
20440 28582 : return (reloptions != NULL && strlen(reloptions) > 2);
20441 : }
20442 :
20443 : /*
20444 : * Format a reloptions array and append it to the given buffer.
20445 : *
20446 : * "prefix" is prepended to the option names; typically it's "" or "toast.".
20447 : */
20448 : static void
20449 440 : appendReloptionsArrayAH(PQExpBuffer buffer, const char *reloptions,
20450 : const char *prefix, Archive *fout)
20451 : {
20452 : bool res;
20453 :
20454 440 : res = appendReloptionsArray(buffer, reloptions, prefix, fout->encoding,
20455 440 : fout->std_strings);
20456 440 : if (!res)
20457 0 : pg_log_warning("could not parse %s array", "reloptions");
20458 440 : }
20459 :
20460 : /*
20461 : * read_dump_filters - retrieve object identifier patterns from file
20462 : *
20463 : * Parse the specified filter file for include and exclude patterns, and add
20464 : * them to the relevant lists. If the filename is "-" then filters will be
20465 : * read from STDIN rather than a file.
20466 : */
20467 : static void
20468 52 : read_dump_filters(const char *filename, DumpOptions *dopt)
20469 : {
20470 : FilterStateData fstate;
20471 : char *objname;
20472 : FilterCommandType comtype;
20473 : FilterObjectType objtype;
20474 :
20475 52 : filter_init(&fstate, filename, exit_nicely);
20476 :
20477 168 : while (filter_read_item(&fstate, &objname, &comtype, &objtype))
20478 : {
20479 66 : if (comtype == FILTER_COMMAND_TYPE_INCLUDE)
20480 : {
20481 34 : switch (objtype)
20482 : {
20483 0 : case FILTER_OBJECT_TYPE_NONE:
20484 0 : break;
20485 0 : case FILTER_OBJECT_TYPE_DATABASE:
20486 : case FILTER_OBJECT_TYPE_FUNCTION:
20487 : case FILTER_OBJECT_TYPE_INDEX:
20488 : case FILTER_OBJECT_TYPE_TABLE_DATA:
20489 : case FILTER_OBJECT_TYPE_TABLE_DATA_AND_CHILDREN:
20490 : case FILTER_OBJECT_TYPE_TRIGGER:
20491 0 : pg_log_filter_error(&fstate, _("%s filter for \"%s\" is not allowed"),
20492 : "include",
20493 : filter_object_type_name(objtype));
20494 0 : exit_nicely(1);
20495 : break; /* unreachable */
20496 :
20497 2 : case FILTER_OBJECT_TYPE_EXTENSION:
20498 2 : simple_string_list_append(&extension_include_patterns, objname);
20499 2 : break;
20500 2 : case FILTER_OBJECT_TYPE_FOREIGN_DATA:
20501 2 : simple_string_list_append(&foreign_servers_include_patterns, objname);
20502 2 : break;
20503 2 : case FILTER_OBJECT_TYPE_SCHEMA:
20504 2 : simple_string_list_append(&schema_include_patterns, objname);
20505 2 : dopt->include_everything = false;
20506 2 : break;
20507 26 : case FILTER_OBJECT_TYPE_TABLE:
20508 26 : simple_string_list_append(&table_include_patterns, objname);
20509 26 : dopt->include_everything = false;
20510 26 : break;
20511 2 : case FILTER_OBJECT_TYPE_TABLE_AND_CHILDREN:
20512 2 : simple_string_list_append(&table_include_patterns_and_children,
20513 : objname);
20514 2 : dopt->include_everything = false;
20515 2 : break;
20516 : }
20517 : }
20518 32 : else if (comtype == FILTER_COMMAND_TYPE_EXCLUDE)
20519 : {
20520 18 : switch (objtype)
20521 : {
20522 0 : case FILTER_OBJECT_TYPE_NONE:
20523 0 : break;
20524 2 : case FILTER_OBJECT_TYPE_DATABASE:
20525 : case FILTER_OBJECT_TYPE_FUNCTION:
20526 : case FILTER_OBJECT_TYPE_INDEX:
20527 : case FILTER_OBJECT_TYPE_TRIGGER:
20528 : case FILTER_OBJECT_TYPE_FOREIGN_DATA:
20529 2 : pg_log_filter_error(&fstate, _("%s filter for \"%s\" is not allowed"),
20530 : "exclude",
20531 : filter_object_type_name(objtype));
20532 2 : exit_nicely(1);
20533 : break;
20534 :
20535 2 : case FILTER_OBJECT_TYPE_EXTENSION:
20536 2 : simple_string_list_append(&extension_exclude_patterns, objname);
20537 2 : break;
20538 2 : case FILTER_OBJECT_TYPE_TABLE_DATA:
20539 2 : simple_string_list_append(&tabledata_exclude_patterns,
20540 : objname);
20541 2 : break;
20542 2 : case FILTER_OBJECT_TYPE_TABLE_DATA_AND_CHILDREN:
20543 2 : simple_string_list_append(&tabledata_exclude_patterns_and_children,
20544 : objname);
20545 2 : break;
20546 4 : case FILTER_OBJECT_TYPE_SCHEMA:
20547 4 : simple_string_list_append(&schema_exclude_patterns, objname);
20548 4 : break;
20549 4 : case FILTER_OBJECT_TYPE_TABLE:
20550 4 : simple_string_list_append(&table_exclude_patterns, objname);
20551 4 : break;
20552 2 : case FILTER_OBJECT_TYPE_TABLE_AND_CHILDREN:
20553 2 : simple_string_list_append(&table_exclude_patterns_and_children,
20554 : objname);
20555 2 : break;
20556 : }
20557 : }
20558 : else
20559 : {
20560 : Assert(comtype == FILTER_COMMAND_TYPE_NONE);
20561 : Assert(objtype == FILTER_OBJECT_TYPE_NONE);
20562 : }
20563 :
20564 64 : if (objname)
20565 50 : free(objname);
20566 : }
20567 :
20568 44 : filter_free(&fstate);
20569 44 : }
|