Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * pg_dump.c
4 : * pg_dump is a utility for dumping out a postgres database
5 : * into a script file.
6 : *
7 : * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
8 : * Portions Copyright (c) 1994, Regents of the University of California
9 : *
10 : * pg_dump will read the system catalogs in a database and dump out a
11 : * script that reproduces the schema in terms of SQL that is understood
12 : * by PostgreSQL
13 : *
14 : * Note that pg_dump runs in a transaction-snapshot mode transaction,
15 : * so it sees a consistent snapshot of the database including system
16 : * catalogs. However, it relies in part on various specialized backend
17 : * functions like pg_get_indexdef(), and those things tend to look at
18 : * the currently committed state. So it is possible to get 'cache
19 : * lookup failed' error if someone performs DDL changes while a dump is
20 : * happening. The window for this sort of thing is from the acquisition
21 : * of the transaction snapshot to getSchemaData() (when pg_dump acquires
22 : * AccessShareLock on every table it intends to dump). It isn't very large,
23 : * but it can happen.
24 : *
25 : * http://archives.postgresql.org/pgsql-bugs/2010-02/msg00187.php
26 : *
27 : * IDENTIFICATION
28 : * src/bin/pg_dump/pg_dump.c
29 : *
30 : *-------------------------------------------------------------------------
31 : */
32 : #include "postgres_fe.h"
33 :
34 : #include <unistd.h>
35 : #include <ctype.h>
36 : #include <limits.h>
37 : #ifdef HAVE_TERMIOS_H
38 : #include <termios.h>
39 : #endif
40 :
41 : #include "access/attnum.h"
42 : #include "access/sysattr.h"
43 : #include "access/transam.h"
44 : #include "catalog/pg_aggregate_d.h"
45 : #include "catalog/pg_am_d.h"
46 : #include "catalog/pg_attribute_d.h"
47 : #include "catalog/pg_authid_d.h"
48 : #include "catalog/pg_cast_d.h"
49 : #include "catalog/pg_class_d.h"
50 : #include "catalog/pg_constraint_d.h"
51 : #include "catalog/pg_default_acl_d.h"
52 : #include "catalog/pg_largeobject_d.h"
53 : #include "catalog/pg_largeobject_metadata_d.h"
54 : #include "catalog/pg_proc_d.h"
55 : #include "catalog/pg_publication_d.h"
56 : #include "catalog/pg_shdepend_d.h"
57 : #include "catalog/pg_subscription_d.h"
58 : #include "catalog/pg_type_d.h"
59 : #include "common/connect.h"
60 : #include "common/int.h"
61 : #include "common/relpath.h"
62 : #include "common/shortest_dec.h"
63 : #include "compress_io.h"
64 : #include "dumputils.h"
65 : #include "fe_utils/option_utils.h"
66 : #include "fe_utils/string_utils.h"
67 : #include "filter.h"
68 : #include "getopt_long.h"
69 : #include "libpq/libpq-fs.h"
70 : #include "parallel.h"
71 : #include "pg_backup_db.h"
72 : #include "pg_backup_utils.h"
73 : #include "pg_dump.h"
74 : #include "statistics/statistics_format.h"
75 : #include "storage/block.h"
76 :
77 : typedef struct
78 : {
79 : Oid roleoid; /* role's OID */
80 : const char *rolename; /* role's name */
81 : } RoleNameItem;
82 :
83 : typedef struct
84 : {
85 : const char *descr; /* comment for an object */
86 : Oid classoid; /* object class (catalog OID) */
87 : Oid objoid; /* object OID */
88 : int objsubid; /* subobject (table column #) */
89 : } CommentItem;
90 :
91 : typedef struct
92 : {
93 : const char *provider; /* label provider of this security label */
94 : const char *label; /* security label for an object */
95 : Oid classoid; /* object class (catalog OID) */
96 : Oid objoid; /* object OID */
97 : int objsubid; /* subobject (table column #) */
98 : } SecLabelItem;
99 :
100 : typedef struct
101 : {
102 : Oid oid; /* object OID */
103 : char relkind; /* object kind */
104 : RelFileNumber relfilenumber; /* object filenode */
105 : Oid toast_oid; /* toast table OID */
106 : RelFileNumber toast_relfilenumber; /* toast table filenode */
107 : Oid toast_index_oid; /* toast table index OID */
108 : RelFileNumber toast_index_relfilenumber; /* toast table index filenode */
109 : } BinaryUpgradeClassOidItem;
110 :
111 : /* sequence types */
112 : typedef enum SeqType
113 : {
114 : SEQTYPE_SMALLINT,
115 : SEQTYPE_INTEGER,
116 : SEQTYPE_BIGINT,
117 : } SeqType;
118 :
119 : static const char *const SeqTypeNames[] =
120 : {
121 : [SEQTYPE_SMALLINT] = "smallint",
122 : [SEQTYPE_INTEGER] = "integer",
123 : [SEQTYPE_BIGINT] = "bigint",
124 : };
125 :
126 : StaticAssertDecl(lengthof(SeqTypeNames) == (SEQTYPE_BIGINT + 1),
127 : "array length mismatch");
128 :
129 : typedef struct
130 : {
131 : Oid oid; /* sequence OID */
132 : SeqType seqtype; /* data type of sequence */
133 : bool cycled; /* whether sequence cycles */
134 : int64 minv; /* minimum value */
135 : int64 maxv; /* maximum value */
136 : int64 startv; /* start value */
137 : int64 incby; /* increment value */
138 : int64 cache; /* cache size */
139 : int64 last_value; /* last value of sequence */
140 : bool is_called; /* whether nextval advances before returning */
141 : bool null_seqtuple; /* did pg_get_sequence_data return nulls? */
142 : } SequenceItem;
143 :
144 : typedef enum OidOptions
145 : {
146 : zeroIsError = 1,
147 : zeroAsStar = 2,
148 : zeroAsNone = 4,
149 : } OidOptions;
150 :
151 : /* global decls */
152 : static bool dosync = true; /* Issue fsync() to make dump durable on disk. */
153 :
154 : static Oid g_last_builtin_oid; /* value of the last builtin oid */
155 :
156 : /* The specified names/patterns should to match at least one entity */
157 : static int strict_names = 0;
158 :
159 : static pg_compress_algorithm compression_algorithm = PG_COMPRESSION_NONE;
160 :
161 : /*
162 : * Object inclusion/exclusion lists
163 : *
164 : * The string lists record the patterns given by command-line switches,
165 : * which we then convert to lists of OIDs of matching objects.
166 : */
167 : static SimpleStringList schema_include_patterns = {NULL, NULL};
168 : static SimpleOidList schema_include_oids = {NULL, NULL};
169 : static SimpleStringList schema_exclude_patterns = {NULL, NULL};
170 : static SimpleOidList schema_exclude_oids = {NULL, NULL};
171 :
172 : static SimpleStringList table_include_patterns = {NULL, NULL};
173 : static SimpleStringList table_include_patterns_and_children = {NULL, NULL};
174 : static SimpleOidList table_include_oids = {NULL, NULL};
175 : static SimpleStringList table_exclude_patterns = {NULL, NULL};
176 : static SimpleStringList table_exclude_patterns_and_children = {NULL, NULL};
177 : static SimpleOidList table_exclude_oids = {NULL, NULL};
178 : static SimpleStringList tabledata_exclude_patterns = {NULL, NULL};
179 : static SimpleStringList tabledata_exclude_patterns_and_children = {NULL, NULL};
180 : static SimpleOidList tabledata_exclude_oids = {NULL, NULL};
181 :
182 : static SimpleStringList foreign_servers_include_patterns = {NULL, NULL};
183 : static SimpleOidList foreign_servers_include_oids = {NULL, NULL};
184 :
185 : static SimpleStringList extension_include_patterns = {NULL, NULL};
186 : static SimpleOidList extension_include_oids = {NULL, NULL};
187 :
188 : static SimpleStringList extension_exclude_patterns = {NULL, NULL};
189 : static SimpleOidList extension_exclude_oids = {NULL, NULL};
190 :
191 : static const CatalogId nilCatalogId = {0, 0};
192 :
193 : /* override for standard extra_float_digits setting */
194 : static bool have_extra_float_digits = false;
195 : static int extra_float_digits;
196 :
197 : /* sorted table of role names */
198 : static RoleNameItem *rolenames = NULL;
199 : static int nrolenames = 0;
200 :
201 : /* sorted table of comments */
202 : static CommentItem *comments = NULL;
203 : static int ncomments = 0;
204 :
205 : /* sorted table of security labels */
206 : static SecLabelItem *seclabels = NULL;
207 : static int nseclabels = 0;
208 :
209 : /* sorted table of pg_class information for binary upgrade */
210 : static BinaryUpgradeClassOidItem *binaryUpgradeClassOids = NULL;
211 : static int nbinaryUpgradeClassOids = 0;
212 :
213 : /* sorted table of sequences */
214 : static SequenceItem *sequences = NULL;
215 : static int nsequences = 0;
216 :
217 : /*
218 : * For binary upgrade, the dump ID of pg_largeobject_metadata is saved for use
219 : * as a dependency for pg_shdepend and any large object comments/seclabels.
220 : */
221 : static DumpId lo_metadata_dumpId;
222 :
223 : /* Maximum number of relations to fetch in a fetchAttributeStats() call. */
224 : #define MAX_ATTR_STATS_RELS 64
225 :
226 : /*
227 : * The default number of rows per INSERT when
228 : * --inserts is specified without --rows-per-insert
229 : */
230 : #define DUMP_DEFAULT_ROWS_PER_INSERT 1
231 :
232 : /*
233 : * Maximum number of large objects to group into a single ArchiveEntry.
234 : * At some point we might want to make this user-controllable, but for now
235 : * a hard-wired setting will suffice.
236 : */
237 : #define MAX_BLOBS_PER_ARCHIVE_ENTRY 1000
238 :
239 : /*
240 : * Macro for producing quoted, schema-qualified name of a dumpable object.
241 : */
242 : #define fmtQualifiedDumpable(obj) \
243 : fmtQualifiedId((obj)->dobj.namespace->dobj.name, \
244 : (obj)->dobj.name)
245 :
246 : static void help(const char *progname);
247 : static void setup_connection(Archive *AH,
248 : const char *dumpencoding, const char *dumpsnapshot,
249 : char *use_role);
250 : static ArchiveFormat parseArchiveFormat(const char *format, ArchiveMode *mode);
251 : static void expand_schema_name_patterns(Archive *fout,
252 : SimpleStringList *patterns,
253 : SimpleOidList *oids,
254 : bool strict_names);
255 : static void expand_extension_name_patterns(Archive *fout,
256 : SimpleStringList *patterns,
257 : SimpleOidList *oids,
258 : bool strict_names);
259 : static void expand_foreign_server_name_patterns(Archive *fout,
260 : SimpleStringList *patterns,
261 : SimpleOidList *oids);
262 : static void expand_table_name_patterns(Archive *fout,
263 : SimpleStringList *patterns,
264 : SimpleOidList *oids,
265 : bool strict_names,
266 : bool with_child_tables);
267 : static void prohibit_crossdb_refs(PGconn *conn, const char *dbname,
268 : const char *pattern);
269 :
270 : static NamespaceInfo *findNamespace(Oid nsoid);
271 : static void dumpTableData(Archive *fout, const TableDataInfo *tdinfo);
272 : static void refreshMatViewData(Archive *fout, const TableDataInfo *tdinfo);
273 : static const char *getRoleName(const char *roleoid_str);
274 : static void collectRoleNames(Archive *fout);
275 : static void getAdditionalACLs(Archive *fout);
276 : static void dumpCommentExtended(Archive *fout, const char *type,
277 : const char *name, const char *namespace,
278 : const char *owner, CatalogId catalogId,
279 : int subid, DumpId dumpId,
280 : const char *initdb_comment);
281 : static inline void dumpComment(Archive *fout, const char *type,
282 : const char *name, const char *namespace,
283 : const char *owner, CatalogId catalogId,
284 : int subid, DumpId dumpId);
285 : static int findComments(Oid classoid, Oid objoid, CommentItem **items);
286 : static void collectComments(Archive *fout);
287 : static void dumpSecLabel(Archive *fout, const char *type, const char *name,
288 : const char *namespace, const char *owner,
289 : CatalogId catalogId, int subid, DumpId dumpId);
290 : static int findSecLabels(Oid classoid, Oid objoid, SecLabelItem **items);
291 : static void collectSecLabels(Archive *fout);
292 : static void dumpDumpableObject(Archive *fout, DumpableObject *dobj);
293 : static void dumpNamespace(Archive *fout, const NamespaceInfo *nspinfo);
294 : static void dumpExtension(Archive *fout, const ExtensionInfo *extinfo);
295 : static void dumpType(Archive *fout, const TypeInfo *tyinfo);
296 : static void dumpBaseType(Archive *fout, const TypeInfo *tyinfo);
297 : static void dumpEnumType(Archive *fout, const TypeInfo *tyinfo);
298 : static void dumpRangeType(Archive *fout, const TypeInfo *tyinfo);
299 : static void dumpUndefinedType(Archive *fout, const TypeInfo *tyinfo);
300 : static void dumpDomain(Archive *fout, const TypeInfo *tyinfo);
301 : static void dumpCompositeType(Archive *fout, const TypeInfo *tyinfo);
302 : static void dumpCompositeTypeColComments(Archive *fout, const TypeInfo *tyinfo,
303 : PGresult *res);
304 : static void dumpShellType(Archive *fout, const ShellTypeInfo *stinfo);
305 : static void dumpProcLang(Archive *fout, const ProcLangInfo *plang);
306 : static void dumpFunc(Archive *fout, const FuncInfo *finfo);
307 : static void dumpCast(Archive *fout, const CastInfo *cast);
308 : static void dumpTransform(Archive *fout, const TransformInfo *transform);
309 : static void dumpOpr(Archive *fout, const OprInfo *oprinfo);
310 : static void dumpAccessMethod(Archive *fout, const AccessMethodInfo *aminfo);
311 : static void dumpOpclass(Archive *fout, const OpclassInfo *opcinfo);
312 : static void dumpOpfamily(Archive *fout, const OpfamilyInfo *opfinfo);
313 : static void dumpCollation(Archive *fout, const CollInfo *collinfo);
314 : static void dumpConversion(Archive *fout, const ConvInfo *convinfo);
315 : static void dumpRule(Archive *fout, const RuleInfo *rinfo);
316 : static void dumpAgg(Archive *fout, const AggInfo *agginfo);
317 : static void dumpTrigger(Archive *fout, const TriggerInfo *tginfo);
318 : static void dumpEventTrigger(Archive *fout, const EventTriggerInfo *evtinfo);
319 : static void dumpTable(Archive *fout, const TableInfo *tbinfo);
320 : static void dumpTableSchema(Archive *fout, const TableInfo *tbinfo);
321 : static void dumpTableAttach(Archive *fout, const TableAttachInfo *attachinfo);
322 : static void dumpAttrDef(Archive *fout, const AttrDefInfo *adinfo);
323 : static void collectSequences(Archive *fout);
324 : static void dumpSequence(Archive *fout, const TableInfo *tbinfo);
325 : static void dumpSequenceData(Archive *fout, const TableDataInfo *tdinfo);
326 : static void dumpIndex(Archive *fout, const IndxInfo *indxinfo);
327 : static void dumpIndexAttach(Archive *fout, const IndexAttachInfo *attachinfo);
328 : static void dumpStatisticsExt(Archive *fout, const StatsExtInfo *statsextinfo);
329 : static void dumpStatisticsExtStats(Archive *fout, const StatsExtInfo *statsextinfo);
330 : static void dumpConstraint(Archive *fout, const ConstraintInfo *coninfo);
331 : static void dumpTableConstraintComment(Archive *fout, const ConstraintInfo *coninfo);
332 : static void dumpTSParser(Archive *fout, const TSParserInfo *prsinfo);
333 : static void dumpTSDictionary(Archive *fout, const TSDictInfo *dictinfo);
334 : static void dumpTSTemplate(Archive *fout, const TSTemplateInfo *tmplinfo);
335 : static void dumpTSConfig(Archive *fout, const TSConfigInfo *cfginfo);
336 : static void dumpForeignDataWrapper(Archive *fout, const FdwInfo *fdwinfo);
337 : static void dumpForeignServer(Archive *fout, const ForeignServerInfo *srvinfo);
338 : static void dumpUserMappings(Archive *fout,
339 : const char *servername, const char *namespace,
340 : const char *owner, CatalogId catalogId, DumpId dumpId);
341 : static void dumpDefaultACL(Archive *fout, const DefaultACLInfo *daclinfo);
342 :
343 : static DumpId dumpACL(Archive *fout, DumpId objDumpId, DumpId altDumpId,
344 : const char *type, const char *name, const char *subname,
345 : const char *nspname, const char *tag, const char *owner,
346 : const DumpableAcl *dacl);
347 :
348 : static void getDependencies(Archive *fout);
349 : static void BuildArchiveDependencies(Archive *fout);
350 : static void findDumpableDependencies(ArchiveHandle *AH, const DumpableObject *dobj,
351 : DumpId **dependencies, int *nDeps, int *allocDeps);
352 :
353 : static DumpableObject *createBoundaryObjects(void);
354 : static void addBoundaryDependencies(DumpableObject **dobjs, int numObjs,
355 : DumpableObject *boundaryObjs);
356 :
357 : static void addConstrChildIdxDeps(DumpableObject *dobj, const IndxInfo *refidx);
358 : static void getDomainConstraints(Archive *fout, TypeInfo *tyinfo);
359 : static void getTableData(DumpOptions *dopt, TableInfo *tblinfo, int numTables, char relkind);
360 : static void makeTableDataInfo(DumpOptions *dopt, TableInfo *tbinfo);
361 : static void buildMatViewRefreshDependencies(Archive *fout);
362 : static void getTableDataFKConstraints(void);
363 : static void determineNotNullFlags(Archive *fout, PGresult *res, int r,
364 : TableInfo *tbinfo, int j,
365 : int i_notnull_name,
366 : int i_notnull_comment,
367 : int i_notnull_invalidoid,
368 : int i_notnull_noinherit,
369 : int i_notnull_islocal,
370 : PQExpBuffer *invalidnotnulloids);
371 : static char *format_function_arguments(const FuncInfo *finfo, const char *funcargs,
372 : bool is_agg);
373 : static char *format_function_signature(Archive *fout,
374 : const FuncInfo *finfo, bool honor_quotes);
375 : static char *convertRegProcReference(const char *proc);
376 : static char *getFormattedOperatorName(const char *oproid);
377 : static char *convertTSFunction(Archive *fout, Oid funcOid);
378 : static const char *getFormattedTypeName(Archive *fout, Oid oid, OidOptions opts);
379 : static void getLOs(Archive *fout);
380 : static void dumpLO(Archive *fout, const LoInfo *loinfo);
381 : static int dumpLOs(Archive *fout, const void *arg);
382 : static void dumpPolicy(Archive *fout, const PolicyInfo *polinfo);
383 : static void dumpPublication(Archive *fout, const PublicationInfo *pubinfo);
384 : static void dumpPublicationTable(Archive *fout, const PublicationRelInfo *pubrinfo);
385 : static void dumpSubscription(Archive *fout, const SubscriptionInfo *subinfo);
386 : static void dumpSubscriptionTable(Archive *fout, const SubRelInfo *subrinfo);
387 : static void dumpDatabase(Archive *fout);
388 : static void dumpDatabaseConfig(Archive *AH, PQExpBuffer outbuf,
389 : const char *dbname, Oid dboid);
390 : static void dumpEncoding(Archive *AH);
391 : static void dumpStdStrings(Archive *AH);
392 : static void dumpSearchPath(Archive *AH);
393 : static void binary_upgrade_set_type_oids_by_type_oid(Archive *fout,
394 : PQExpBuffer upgrade_buffer,
395 : Oid pg_type_oid,
396 : bool force_array_type,
397 : bool include_multirange_type);
398 : static void binary_upgrade_set_type_oids_by_rel(Archive *fout,
399 : PQExpBuffer upgrade_buffer,
400 : const TableInfo *tbinfo);
401 : static void collectBinaryUpgradeClassOids(Archive *fout);
402 : static void binary_upgrade_set_pg_class_oids(Archive *fout,
403 : PQExpBuffer upgrade_buffer,
404 : Oid pg_class_oid);
405 : static void binary_upgrade_extension_member(PQExpBuffer upgrade_buffer,
406 : const DumpableObject *dobj,
407 : const char *objtype,
408 : const char *objname,
409 : const char *objnamespace);
410 : static const char *getAttrName(int attrnum, const TableInfo *tblInfo);
411 : static const char *fmtCopyColumnList(const TableInfo *ti, PQExpBuffer buffer);
412 : static bool nonemptyReloptions(const char *reloptions);
413 : static void appendReloptionsArrayAH(PQExpBuffer buffer, const char *reloptions,
414 : const char *prefix, Archive *fout);
415 : static char *get_synchronized_snapshot(Archive *fout);
416 : static void set_restrict_relation_kind(Archive *AH, const char *value);
417 : static void setupDumpWorker(Archive *AH);
418 : static TableInfo *getRootTableInfo(const TableInfo *tbinfo);
419 : static bool forcePartitionRootLoad(const TableInfo *tbinfo);
420 : static void read_dump_filters(const char *filename, DumpOptions *dopt);
421 :
422 :
423 : int
424 596 : main(int argc, char **argv)
425 : {
426 : int c;
427 596 : const char *filename = NULL;
428 596 : const char *format = "p";
429 : TableInfo *tblinfo;
430 : int numTables;
431 : DumpableObject **dobjs;
432 : int numObjs;
433 : DumpableObject *boundaryObjs;
434 : int i;
435 : int optindex;
436 : RestoreOptions *ropt;
437 : Archive *fout; /* the script file */
438 596 : bool g_verbose = false;
439 596 : const char *dumpencoding = NULL;
440 596 : const char *dumpsnapshot = NULL;
441 596 : char *use_role = NULL;
442 596 : int numWorkers = 1;
443 596 : int plainText = 0;
444 596 : ArchiveFormat archiveFormat = archUnknown;
445 : ArchiveMode archiveMode;
446 596 : pg_compress_specification compression_spec = {0};
447 596 : char *compression_detail = NULL;
448 596 : char *compression_algorithm_str = "none";
449 596 : char *error_detail = NULL;
450 596 : bool user_compression_defined = false;
451 596 : DataDirSyncMethod sync_method = DATA_DIR_SYNC_METHOD_FSYNC;
452 596 : bool data_only = false;
453 596 : bool schema_only = false;
454 596 : bool statistics_only = false;
455 596 : bool with_statistics = false;
456 596 : bool no_data = false;
457 596 : bool no_schema = false;
458 596 : bool no_statistics = false;
459 :
460 : static DumpOptions dopt;
461 :
462 : static struct option long_options[] = {
463 : {"data-only", no_argument, NULL, 'a'},
464 : {"blobs", no_argument, NULL, 'b'},
465 : {"large-objects", no_argument, NULL, 'b'},
466 : {"no-blobs", no_argument, NULL, 'B'},
467 : {"no-large-objects", no_argument, NULL, 'B'},
468 : {"clean", no_argument, NULL, 'c'},
469 : {"create", no_argument, NULL, 'C'},
470 : {"dbname", required_argument, NULL, 'd'},
471 : {"extension", required_argument, NULL, 'e'},
472 : {"file", required_argument, NULL, 'f'},
473 : {"format", required_argument, NULL, 'F'},
474 : {"host", required_argument, NULL, 'h'},
475 : {"jobs", 1, NULL, 'j'},
476 : {"no-reconnect", no_argument, NULL, 'R'},
477 : {"no-owner", no_argument, NULL, 'O'},
478 : {"port", required_argument, NULL, 'p'},
479 : {"schema", required_argument, NULL, 'n'},
480 : {"exclude-schema", required_argument, NULL, 'N'},
481 : {"schema-only", no_argument, NULL, 's'},
482 : {"superuser", required_argument, NULL, 'S'},
483 : {"table", required_argument, NULL, 't'},
484 : {"exclude-table", required_argument, NULL, 'T'},
485 : {"no-password", no_argument, NULL, 'w'},
486 : {"password", no_argument, NULL, 'W'},
487 : {"username", required_argument, NULL, 'U'},
488 : {"verbose", no_argument, NULL, 'v'},
489 : {"no-privileges", no_argument, NULL, 'x'},
490 : {"no-acl", no_argument, NULL, 'x'},
491 : {"compress", required_argument, NULL, 'Z'},
492 : {"encoding", required_argument, NULL, 'E'},
493 : {"help", no_argument, NULL, '?'},
494 : {"version", no_argument, NULL, 'V'},
495 :
496 : /*
497 : * the following options don't have an equivalent short option letter
498 : */
499 : {"attribute-inserts", no_argument, &dopt.column_inserts, 1},
500 : {"binary-upgrade", no_argument, &dopt.binary_upgrade, 1},
501 : {"column-inserts", no_argument, &dopt.column_inserts, 1},
502 : {"disable-dollar-quoting", no_argument, &dopt.disable_dollar_quoting, 1},
503 : {"disable-triggers", no_argument, &dopt.disable_triggers, 1},
504 : {"enable-row-security", no_argument, &dopt.enable_row_security, 1},
505 : {"exclude-table-data", required_argument, NULL, 4},
506 : {"extra-float-digits", required_argument, NULL, 8},
507 : {"if-exists", no_argument, &dopt.if_exists, 1},
508 : {"inserts", no_argument, NULL, 9},
509 : {"lock-wait-timeout", required_argument, NULL, 2},
510 : {"no-table-access-method", no_argument, &dopt.outputNoTableAm, 1},
511 : {"no-tablespaces", no_argument, &dopt.outputNoTablespaces, 1},
512 : {"quote-all-identifiers", no_argument, "e_all_identifiers, 1},
513 : {"load-via-partition-root", no_argument, &dopt.load_via_partition_root, 1},
514 : {"role", required_argument, NULL, 3},
515 : {"section", required_argument, NULL, 5},
516 : {"serializable-deferrable", no_argument, &dopt.serializable_deferrable, 1},
517 : {"snapshot", required_argument, NULL, 6},
518 : {"statistics", no_argument, NULL, 22},
519 : {"statistics-only", no_argument, NULL, 18},
520 : {"strict-names", no_argument, &strict_names, 1},
521 : {"use-set-session-authorization", no_argument, &dopt.use_setsessauth, 1},
522 : {"no-comments", no_argument, &dopt.no_comments, 1},
523 : {"no-data", no_argument, NULL, 19},
524 : {"no-policies", no_argument, &dopt.no_policies, 1},
525 : {"no-publications", no_argument, &dopt.no_publications, 1},
526 : {"no-schema", no_argument, NULL, 20},
527 : {"no-security-labels", no_argument, &dopt.no_security_labels, 1},
528 : {"no-statistics", no_argument, NULL, 21},
529 : {"no-subscriptions", no_argument, &dopt.no_subscriptions, 1},
530 : {"no-toast-compression", no_argument, &dopt.no_toast_compression, 1},
531 : {"no-unlogged-table-data", no_argument, &dopt.no_unlogged_table_data, 1},
532 : {"no-sync", no_argument, NULL, 7},
533 : {"on-conflict-do-nothing", no_argument, &dopt.do_nothing, 1},
534 : {"rows-per-insert", required_argument, NULL, 10},
535 : {"include-foreign-data", required_argument, NULL, 11},
536 : {"table-and-children", required_argument, NULL, 12},
537 : {"exclude-table-and-children", required_argument, NULL, 13},
538 : {"exclude-table-data-and-children", required_argument, NULL, 14},
539 : {"sync-method", required_argument, NULL, 15},
540 : {"filter", required_argument, NULL, 16},
541 : {"exclude-extension", required_argument, NULL, 17},
542 : {"sequence-data", no_argument, &dopt.sequence_data, 1},
543 : {"restrict-key", required_argument, NULL, 25},
544 :
545 : {NULL, 0, NULL, 0}
546 : };
547 :
548 596 : pg_logging_init(argv[0]);
549 596 : pg_logging_set_level(PG_LOG_WARNING);
550 596 : set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("pg_dump"));
551 :
552 : /*
553 : * Initialize what we need for parallel execution, especially for thread
554 : * support on Windows.
555 : */
556 596 : init_parallel_dump_utils();
557 :
558 596 : progname = get_progname(argv[0]);
559 :
560 596 : if (argc > 1)
561 : {
562 596 : if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0)
563 : {
564 2 : help(progname);
565 2 : exit_nicely(0);
566 : }
567 594 : if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0)
568 : {
569 128 : puts("pg_dump (PostgreSQL) " PG_VERSION);
570 128 : exit_nicely(0);
571 : }
572 : }
573 :
574 466 : InitDumpOptions(&dopt);
575 :
576 2636 : while ((c = getopt_long(argc, argv, "abBcCd:e:E:f:F:h:j:n:N:Op:RsS:t:T:U:vwWxXZ:",
577 2636 : long_options, &optindex)) != -1)
578 : {
579 2186 : switch (c)
580 : {
581 18 : case 'a': /* Dump data only */
582 18 : data_only = true;
583 18 : break;
584 :
585 2 : case 'b': /* Dump LOs */
586 2 : dopt.outputLOs = true;
587 2 : break;
588 :
589 4 : case 'B': /* Don't dump LOs */
590 4 : dopt.dontOutputLOs = true;
591 4 : break;
592 :
593 12 : case 'c': /* clean (i.e., drop) schema prior to create */
594 12 : dopt.outputClean = 1;
595 12 : break;
596 :
597 58 : case 'C': /* Create DB */
598 58 : dopt.outputCreateDB = 1;
599 58 : break;
600 :
601 10 : case 'd': /* database name */
602 10 : dopt.cparams.dbname = pg_strdup(optarg);
603 10 : break;
604 :
605 8 : case 'e': /* include extension(s) */
606 8 : simple_string_list_append(&extension_include_patterns, optarg);
607 8 : dopt.include_everything = false;
608 8 : break;
609 :
610 4 : case 'E': /* Dump encoding */
611 4 : dumpencoding = pg_strdup(optarg);
612 4 : break;
613 :
614 386 : case 'f':
615 386 : filename = pg_strdup(optarg);
616 386 : break;
617 :
618 226 : case 'F':
619 226 : format = pg_strdup(optarg);
620 226 : break;
621 :
622 72 : case 'h': /* server host */
623 72 : dopt.cparams.pghost = pg_strdup(optarg);
624 72 : break;
625 :
626 22 : case 'j': /* number of dump jobs */
627 22 : if (!option_parse_int(optarg, "-j/--jobs", 1,
628 : PG_MAX_JOBS,
629 : &numWorkers))
630 2 : exit_nicely(1);
631 20 : break;
632 :
633 34 : case 'n': /* include schema(s) */
634 34 : simple_string_list_append(&schema_include_patterns, optarg);
635 34 : dopt.include_everything = false;
636 34 : break;
637 :
638 2 : case 'N': /* exclude schema(s) */
639 2 : simple_string_list_append(&schema_exclude_patterns, optarg);
640 2 : break;
641 :
642 4 : case 'O': /* Don't reconnect to match owner */
643 4 : dopt.outputNoOwner = 1;
644 4 : break;
645 :
646 150 : case 'p': /* server port */
647 150 : dopt.cparams.pgport = pg_strdup(optarg);
648 150 : break;
649 :
650 4 : case 'R':
651 : /* no-op, still accepted for backwards compatibility */
652 4 : break;
653 :
654 14 : case 's': /* dump schema only */
655 14 : schema_only = true;
656 14 : break;
657 :
658 2 : case 'S': /* Username for superuser in plain text output */
659 2 : dopt.outputSuperuser = pg_strdup(optarg);
660 2 : break;
661 :
662 16 : case 't': /* include table(s) */
663 16 : simple_string_list_append(&table_include_patterns, optarg);
664 16 : dopt.include_everything = false;
665 16 : break;
666 :
667 8 : case 'T': /* exclude table(s) */
668 8 : simple_string_list_append(&table_exclude_patterns, optarg);
669 8 : break;
670 :
671 76 : case 'U':
672 76 : dopt.cparams.username = pg_strdup(optarg);
673 76 : break;
674 :
675 12 : case 'v': /* verbose */
676 12 : g_verbose = true;
677 12 : pg_logging_increase_verbosity();
678 12 : break;
679 :
680 2 : case 'w':
681 2 : dopt.cparams.promptPassword = TRI_NO;
682 2 : break;
683 :
684 0 : case 'W':
685 0 : dopt.cparams.promptPassword = TRI_YES;
686 0 : break;
687 :
688 4 : case 'x': /* skip ACL dump */
689 4 : dopt.aclsSkip = true;
690 4 : break;
691 :
692 26 : case 'Z': /* Compression */
693 26 : parse_compress_options(optarg, &compression_algorithm_str,
694 : &compression_detail);
695 26 : user_compression_defined = true;
696 26 : break;
697 :
698 270 : case 0:
699 : /* This covers the long options. */
700 270 : break;
701 :
702 4 : case 2: /* lock-wait-timeout */
703 4 : dopt.lockWaitTimeout = pg_strdup(optarg);
704 4 : break;
705 :
706 6 : case 3: /* SET ROLE */
707 6 : use_role = pg_strdup(optarg);
708 6 : break;
709 :
710 2 : case 4: /* exclude table(s) data */
711 2 : simple_string_list_append(&tabledata_exclude_patterns, optarg);
712 2 : break;
713 :
714 12 : case 5: /* section */
715 12 : set_dump_section(optarg, &dopt.dumpSections);
716 12 : break;
717 :
718 0 : case 6: /* snapshot */
719 0 : dumpsnapshot = pg_strdup(optarg);
720 0 : break;
721 :
722 300 : case 7: /* no-sync */
723 300 : dosync = false;
724 300 : break;
725 :
726 2 : case 8:
727 2 : have_extra_float_digits = true;
728 2 : if (!option_parse_int(optarg, "--extra-float-digits", -15, 3,
729 : &extra_float_digits))
730 2 : exit_nicely(1);
731 0 : break;
732 :
733 4 : case 9: /* inserts */
734 :
735 : /*
736 : * dump_inserts also stores --rows-per-insert, careful not to
737 : * overwrite that.
738 : */
739 4 : if (dopt.dump_inserts == 0)
740 4 : dopt.dump_inserts = DUMP_DEFAULT_ROWS_PER_INSERT;
741 4 : break;
742 :
743 4 : case 10: /* rows per insert */
744 4 : if (!option_parse_int(optarg, "--rows-per-insert", 1, INT_MAX,
745 : &dopt.dump_inserts))
746 2 : exit_nicely(1);
747 2 : break;
748 :
749 8 : case 11: /* include foreign data */
750 8 : simple_string_list_append(&foreign_servers_include_patterns,
751 : optarg);
752 8 : break;
753 :
754 2 : case 12: /* include table(s) and their children */
755 2 : simple_string_list_append(&table_include_patterns_and_children,
756 : optarg);
757 2 : dopt.include_everything = false;
758 2 : break;
759 :
760 2 : case 13: /* exclude table(s) and their children */
761 2 : simple_string_list_append(&table_exclude_patterns_and_children,
762 : optarg);
763 2 : break;
764 :
765 2 : case 14: /* exclude data of table(s) and children */
766 2 : simple_string_list_append(&tabledata_exclude_patterns_and_children,
767 : optarg);
768 2 : break;
769 :
770 0 : case 15:
771 0 : if (!parse_sync_method(optarg, &sync_method))
772 0 : exit_nicely(1);
773 0 : break;
774 :
775 52 : case 16: /* read object filters from file */
776 52 : read_dump_filters(optarg, &dopt);
777 44 : break;
778 :
779 2 : case 17: /* exclude extension(s) */
780 2 : simple_string_list_append(&extension_exclude_patterns,
781 : optarg);
782 2 : break;
783 :
784 8 : case 18:
785 8 : statistics_only = true;
786 8 : break;
787 :
788 76 : case 19:
789 76 : no_data = true;
790 76 : break;
791 :
792 4 : case 20:
793 4 : no_schema = true;
794 4 : break;
795 :
796 16 : case 21:
797 16 : no_statistics = true;
798 16 : break;
799 :
800 180 : case 22:
801 180 : with_statistics = true;
802 180 : break;
803 :
804 52 : case 25:
805 52 : dopt.restrict_key = pg_strdup(optarg);
806 52 : break;
807 :
808 2 : default:
809 : /* getopt_long already emitted a complaint */
810 2 : pg_log_error_hint("Try \"%s --help\" for more information.", progname);
811 2 : exit_nicely(1);
812 : }
813 : }
814 :
815 : /*
816 : * Non-option argument specifies database name as long as it wasn't
817 : * already specified with -d / --dbname
818 : */
819 450 : if (optind < argc && dopt.cparams.dbname == NULL)
820 378 : dopt.cparams.dbname = argv[optind++];
821 :
822 : /* Complain if any arguments remain */
823 450 : if (optind < argc)
824 : {
825 2 : pg_log_error("too many command-line arguments (first is \"%s\")",
826 : argv[optind]);
827 2 : pg_log_error_hint("Try \"%s --help\" for more information.", progname);
828 2 : exit_nicely(1);
829 : }
830 :
831 : /* --column-inserts implies --inserts */
832 448 : if (dopt.column_inserts && dopt.dump_inserts == 0)
833 2 : dopt.dump_inserts = DUMP_DEFAULT_ROWS_PER_INSERT;
834 :
835 : /* reject conflicting "-only" options */
836 448 : if (data_only && schema_only)
837 2 : pg_fatal("options %s and %s cannot be used together",
838 : "-s/--schema-only", "-a/--data-only");
839 446 : if (schema_only && statistics_only)
840 2 : pg_fatal("options %s and %s cannot be used together",
841 : "-s/--schema-only", "--statistics-only");
842 444 : if (data_only && statistics_only)
843 2 : pg_fatal("options %s and %s cannot be used together",
844 : "-a/--data-only", "--statistics-only");
845 :
846 : /* reject conflicting "-only" and "no-" options */
847 442 : if (data_only && no_data)
848 0 : pg_fatal("options %s and %s cannot be used together",
849 : "-a/--data-only", "--no-data");
850 442 : if (schema_only && no_schema)
851 0 : pg_fatal("options %s and %s cannot be used together",
852 : "-s/--schema-only", "--no-schema");
853 442 : if (statistics_only && no_statistics)
854 2 : pg_fatal("options %s and %s cannot be used together",
855 : "--statistics-only", "--no-statistics");
856 :
857 : /* reject conflicting "no-" options */
858 440 : if (with_statistics && no_statistics)
859 0 : pg_fatal("options %s and %s cannot be used together",
860 : "--statistics", "--no-statistics");
861 :
862 : /* reject conflicting "-only" options */
863 440 : if (data_only && with_statistics)
864 0 : pg_fatal("options %s and %s cannot be used together",
865 : "-a/--data-only", "--statistics");
866 440 : if (schema_only && with_statistics)
867 2 : pg_fatal("options %s and %s cannot be used together",
868 : "-s/--schema-only", "--statistics");
869 :
870 438 : if (schema_only && foreign_servers_include_patterns.head != NULL)
871 2 : pg_fatal("options %s and %s cannot be used together",
872 : "-s/--schema-only", "--include-foreign-data");
873 :
874 436 : if (numWorkers > 1 && foreign_servers_include_patterns.head != NULL)
875 2 : pg_fatal("option %s is not supported with parallel backup",
876 : "--include-foreign-data");
877 :
878 434 : if (data_only && dopt.outputClean)
879 2 : pg_fatal("options %s and %s cannot be used together",
880 : "-c/--clean", "-a/--data-only");
881 :
882 432 : if (dopt.if_exists && !dopt.outputClean)
883 2 : pg_fatal("option %s requires option %s",
884 : "--if-exists", "-c/--clean");
885 :
886 : /*
887 : * Set derivative flags. Ambiguous or nonsensical combinations, e.g.
888 : * "--schema-only --no-schema", will have already caused an error in one
889 : * of the checks above.
890 : */
891 430 : dopt.dumpData = ((dopt.dumpData && !schema_only && !statistics_only) ||
892 860 : data_only) && !no_data;
893 430 : dopt.dumpSchema = ((dopt.dumpSchema && !data_only && !statistics_only) ||
894 860 : schema_only) && !no_schema;
895 430 : dopt.dumpStatistics = ((dopt.dumpStatistics && !schema_only && !data_only) ||
896 860 : (statistics_only || with_statistics)) && !no_statistics;
897 :
898 :
899 : /*
900 : * --inserts are already implied above if --column-inserts or
901 : * --rows-per-insert were specified.
902 : */
903 430 : if (dopt.do_nothing && dopt.dump_inserts == 0)
904 2 : pg_fatal("option %s requires option %s, %s, or %s",
905 : "--on-conflict-do-nothing",
906 : "--inserts", "--rows-per-insert", "--column-inserts");
907 :
908 : /* Identify archive format to emit */
909 428 : archiveFormat = parseArchiveFormat(format, &archiveMode);
910 :
911 : /* archiveFormat specific setup */
912 426 : if (archiveFormat == archNull)
913 : {
914 308 : plainText = 1;
915 :
916 : /*
917 : * If you don't provide a restrict key, one will be appointed for you.
918 : */
919 308 : if (!dopt.restrict_key)
920 256 : dopt.restrict_key = generate_restrict_key();
921 308 : if (!dopt.restrict_key)
922 0 : pg_fatal("could not generate restrict key");
923 308 : if (!valid_restrict_key(dopt.restrict_key))
924 0 : pg_fatal("invalid restrict key");
925 : }
926 118 : else if (dopt.restrict_key)
927 0 : pg_fatal("option %s can only be used with %s",
928 : "--restrict-key", "--format=plain");
929 :
930 : /*
931 : * Custom and directory formats are compressed by default with gzip when
932 : * available, not the others. If gzip is not available, no compression is
933 : * done by default.
934 : */
935 426 : if ((archiveFormat == archCustom || archiveFormat == archDirectory) &&
936 112 : !user_compression_defined)
937 : {
938 : #ifdef HAVE_LIBZ
939 100 : compression_algorithm_str = "gzip";
940 : #else
941 : compression_algorithm_str = "none";
942 : #endif
943 : }
944 :
945 : /*
946 : * Compression options
947 : */
948 426 : if (!parse_compress_algorithm(compression_algorithm_str,
949 : &compression_algorithm))
950 2 : pg_fatal("unrecognized compression algorithm: \"%s\"",
951 : compression_algorithm_str);
952 :
953 424 : parse_compress_specification(compression_algorithm, compression_detail,
954 : &compression_spec);
955 424 : error_detail = validate_compress_specification(&compression_spec);
956 424 : if (error_detail != NULL)
957 6 : pg_fatal("invalid compression specification: %s",
958 : error_detail);
959 :
960 418 : error_detail = supports_compression(compression_spec);
961 418 : if (error_detail != NULL)
962 0 : pg_fatal("%s", error_detail);
963 :
964 : /*
965 : * Disable support for zstd workers for now - these are based on
966 : * threading, and it's unclear how it interacts with parallel dumps on
967 : * platforms where that relies on threads too (e.g. Windows).
968 : */
969 418 : if (compression_spec.options & PG_COMPRESSION_OPTION_WORKERS)
970 0 : pg_log_warning("compression option \"%s\" is not currently supported by pg_dump",
971 : "workers");
972 :
973 : /*
974 : * If emitting an archive format, we always want to emit a DATABASE item,
975 : * in case --create is specified at pg_restore time.
976 : */
977 418 : if (!plainText)
978 118 : dopt.outputCreateDB = 1;
979 :
980 : /* Parallel backup only in the directory archive format so far */
981 418 : if (archiveFormat != archDirectory && numWorkers > 1)
982 2 : pg_fatal("parallel backup only supported by the directory format");
983 :
984 : /* Open the output file */
985 416 : fout = CreateArchive(filename, archiveFormat, compression_spec,
986 : dosync, archiveMode, setupDumpWorker, sync_method);
987 :
988 : /* Make dump options accessible right away */
989 414 : SetArchiveOptions(fout, &dopt, NULL);
990 :
991 : /* Register the cleanup hook */
992 414 : on_exit_close_archive(fout);
993 :
994 : /* Let the archiver know how noisy to be */
995 414 : fout->verbose = g_verbose;
996 :
997 :
998 : /*
999 : * We allow the server to be back to 9.2, and up to any minor release of
1000 : * our own major version. (See also version check in pg_dumpall.c.)
1001 : */
1002 414 : fout->minRemoteVersion = 90200;
1003 414 : fout->maxRemoteVersion = (PG_VERSION_NUM / 100) * 100 + 99;
1004 :
1005 414 : fout->numWorkers = numWorkers;
1006 :
1007 : /*
1008 : * Open the database using the Archiver, so it knows about it. Errors mean
1009 : * death.
1010 : */
1011 414 : ConnectDatabaseAhx(fout, &dopt.cparams, false);
1012 410 : setup_connection(fout, dumpencoding, dumpsnapshot, use_role);
1013 :
1014 : /*
1015 : * On hot standbys, never try to dump unlogged table data, since it will
1016 : * just throw an error.
1017 : */
1018 410 : if (fout->isStandby)
1019 8 : dopt.no_unlogged_table_data = true;
1020 :
1021 : /*
1022 : * Find the last built-in OID, if needed (prior to 8.1)
1023 : *
1024 : * With 8.1 and above, we can just use FirstNormalObjectId - 1.
1025 : */
1026 410 : g_last_builtin_oid = FirstNormalObjectId - 1;
1027 :
1028 410 : pg_log_info("last built-in OID is %u", g_last_builtin_oid);
1029 :
1030 : /* Expand schema selection patterns into OID lists */
1031 410 : if (schema_include_patterns.head != NULL)
1032 : {
1033 36 : expand_schema_name_patterns(fout, &schema_include_patterns,
1034 : &schema_include_oids,
1035 : strict_names);
1036 24 : if (schema_include_oids.head == NULL)
1037 2 : pg_fatal("no matching schemas were found");
1038 : }
1039 396 : expand_schema_name_patterns(fout, &schema_exclude_patterns,
1040 : &schema_exclude_oids,
1041 : false);
1042 : /* non-matching exclusion patterns aren't an error */
1043 :
1044 : /* Expand table selection patterns into OID lists */
1045 396 : expand_table_name_patterns(fout, &table_include_patterns,
1046 : &table_include_oids,
1047 : strict_names, false);
1048 386 : expand_table_name_patterns(fout, &table_include_patterns_and_children,
1049 : &table_include_oids,
1050 : strict_names, true);
1051 386 : if ((table_include_patterns.head != NULL ||
1052 364 : table_include_patterns_and_children.head != NULL) &&
1053 26 : table_include_oids.head == NULL)
1054 4 : pg_fatal("no matching tables were found");
1055 :
1056 382 : expand_table_name_patterns(fout, &table_exclude_patterns,
1057 : &table_exclude_oids,
1058 : false, false);
1059 382 : expand_table_name_patterns(fout, &table_exclude_patterns_and_children,
1060 : &table_exclude_oids,
1061 : false, true);
1062 :
1063 382 : expand_table_name_patterns(fout, &tabledata_exclude_patterns,
1064 : &tabledata_exclude_oids,
1065 : false, false);
1066 382 : expand_table_name_patterns(fout, &tabledata_exclude_patterns_and_children,
1067 : &tabledata_exclude_oids,
1068 : false, true);
1069 :
1070 382 : expand_foreign_server_name_patterns(fout, &foreign_servers_include_patterns,
1071 : &foreign_servers_include_oids);
1072 :
1073 : /* non-matching exclusion patterns aren't an error */
1074 :
1075 : /* Expand extension selection patterns into OID lists */
1076 380 : if (extension_include_patterns.head != NULL)
1077 : {
1078 10 : expand_extension_name_patterns(fout, &extension_include_patterns,
1079 : &extension_include_oids,
1080 : strict_names);
1081 10 : if (extension_include_oids.head == NULL)
1082 2 : pg_fatal("no matching extensions were found");
1083 : }
1084 378 : expand_extension_name_patterns(fout, &extension_exclude_patterns,
1085 : &extension_exclude_oids,
1086 : false);
1087 : /* non-matching exclusion patterns aren't an error */
1088 :
1089 : /*
1090 : * Dumping LOs is the default for dumps where an inclusion switch is not
1091 : * used (an "include everything" dump). -B can be used to exclude LOs
1092 : * from those dumps. -b can be used to include LOs even when an inclusion
1093 : * switch is used.
1094 : *
1095 : * -s means "schema only" and LOs are data, not schema, so we never
1096 : * include LOs when -s is used.
1097 : */
1098 378 : if (dopt.include_everything && dopt.dumpData && !dopt.dontOutputLOs)
1099 244 : dopt.outputLOs = true;
1100 :
1101 : /*
1102 : * Collect role names so we can map object owner OIDs to names.
1103 : */
1104 378 : collectRoleNames(fout);
1105 :
1106 : /*
1107 : * Now scan the database and create DumpableObject structs for all the
1108 : * objects we intend to dump.
1109 : */
1110 378 : tblinfo = getSchemaData(fout, &numTables);
1111 :
1112 376 : if (dopt.dumpData)
1113 : {
1114 292 : getTableData(&dopt, tblinfo, numTables, 0);
1115 292 : buildMatViewRefreshDependencies(fout);
1116 292 : if (!dopt.dumpSchema)
1117 14 : getTableDataFKConstraints();
1118 : }
1119 :
1120 376 : if (!dopt.dumpData && dopt.sequence_data)
1121 68 : getTableData(&dopt, tblinfo, numTables, RELKIND_SEQUENCE);
1122 :
1123 : /*
1124 : * For binary upgrade mode, dump pg_largeobject_metadata and the
1125 : * associated pg_shdepend rows. This is faster to restore than the
1126 : * equivalent set of large object commands. We can only do this for
1127 : * upgrades from v12 and newer; in older versions, pg_largeobject_metadata
1128 : * was created WITH OIDS, so the OID column is hidden and won't be dumped.
1129 : */
1130 376 : if (dopt.binary_upgrade && fout->remoteVersion >= 120000)
1131 : {
1132 76 : TableInfo *lo_metadata = findTableByOid(LargeObjectMetadataRelationId);
1133 76 : TableInfo *shdepend = findTableByOid(SharedDependRelationId);
1134 :
1135 76 : makeTableDataInfo(&dopt, lo_metadata);
1136 76 : makeTableDataInfo(&dopt, shdepend);
1137 :
1138 : /*
1139 : * Save pg_largeobject_metadata's dump ID for use as a dependency for
1140 : * pg_shdepend and any large object comments/seclabels.
1141 : */
1142 76 : lo_metadata_dumpId = lo_metadata->dataObj->dobj.dumpId;
1143 76 : addObjectDependency(&shdepend->dataObj->dobj, lo_metadata_dumpId);
1144 :
1145 : /*
1146 : * Only dump large object shdepend rows for this database.
1147 : */
1148 76 : shdepend->dataObj->filtercond = "WHERE classid = 'pg_largeobject'::regclass "
1149 : "AND dbid = (SELECT oid FROM pg_database "
1150 : " WHERE datname = current_database())";
1151 :
1152 : /*
1153 : * If upgrading from v16 or newer, only dump large objects with
1154 : * comments/seclabels. For these upgrades, pg_upgrade can copy/link
1155 : * pg_largeobject_metadata's files (which is usually faster) but we
1156 : * still need to dump LOs with comments/seclabels here so that the
1157 : * subsequent COMMENT and SECURITY LABEL commands work. pg_upgrade
1158 : * can't copy/link the files from older versions because aclitem
1159 : * (needed by pg_largeobject_metadata.lomacl) changed its storage
1160 : * format in v16.
1161 : */
1162 76 : if (fout->remoteVersion >= 160000)
1163 76 : lo_metadata->dataObj->filtercond = "WHERE oid IN "
1164 : "(SELECT objoid FROM pg_description "
1165 : "WHERE classoid = " CppAsString2(LargeObjectRelationId) " "
1166 : "UNION SELECT objoid FROM pg_seclabel "
1167 : "WHERE classoid = " CppAsString2(LargeObjectRelationId) ")";
1168 : }
1169 :
1170 : /*
1171 : * In binary-upgrade mode, we do not have to worry about the actual LO
1172 : * data or the associated metadata that resides in the pg_largeobject and
1173 : * pg_largeobject_metadata tables, respectively.
1174 : *
1175 : * However, we do need to collect LO information as there may be comments
1176 : * or other information on LOs that we do need to dump out.
1177 : */
1178 376 : if (dopt.outputLOs || dopt.binary_upgrade)
1179 320 : getLOs(fout);
1180 :
1181 : /*
1182 : * Collect dependency data to assist in ordering the objects.
1183 : */
1184 376 : getDependencies(fout);
1185 :
1186 : /*
1187 : * Collect ACLs, comments, and security labels, if wanted.
1188 : */
1189 376 : if (!dopt.aclsSkip)
1190 372 : getAdditionalACLs(fout);
1191 376 : if (!dopt.no_comments)
1192 376 : collectComments(fout);
1193 376 : if (!dopt.no_security_labels)
1194 376 : collectSecLabels(fout);
1195 :
1196 : /* For binary upgrade mode, collect required pg_class information. */
1197 376 : if (dopt.binary_upgrade)
1198 76 : collectBinaryUpgradeClassOids(fout);
1199 :
1200 : /* Collect sequence information. */
1201 376 : collectSequences(fout);
1202 :
1203 : /* Lastly, create dummy objects to represent the section boundaries */
1204 376 : boundaryObjs = createBoundaryObjects();
1205 :
1206 : /* Get pointers to all the known DumpableObjects */
1207 376 : getDumpableObjects(&dobjs, &numObjs);
1208 :
1209 : /*
1210 : * Add dummy dependencies to enforce the dump section ordering.
1211 : */
1212 376 : addBoundaryDependencies(dobjs, numObjs, boundaryObjs);
1213 :
1214 : /*
1215 : * Sort the objects into a safe dump order (no forward references).
1216 : *
1217 : * We rely on dependency information to help us determine a safe order, so
1218 : * the initial sort is mostly for cosmetic purposes: we sort by name to
1219 : * ensure that logically identical schemas will dump identically.
1220 : */
1221 376 : sortDumpableObjectsByTypeName(dobjs, numObjs);
1222 :
1223 376 : sortDumpableObjects(dobjs, numObjs,
1224 376 : boundaryObjs[0].dumpId, boundaryObjs[1].dumpId);
1225 :
1226 : /*
1227 : * Create archive TOC entries for all the objects to be dumped, in a safe
1228 : * order.
1229 : */
1230 :
1231 : /*
1232 : * First the special entries for ENCODING, STDSTRINGS, and SEARCHPATH.
1233 : */
1234 376 : dumpEncoding(fout);
1235 376 : dumpStdStrings(fout);
1236 376 : dumpSearchPath(fout);
1237 :
1238 : /* The database items are always next, unless we don't want them at all */
1239 376 : if (dopt.outputCreateDB)
1240 174 : dumpDatabase(fout);
1241 :
1242 : /* Now the rearrangeable objects. */
1243 1402260 : for (i = 0; i < numObjs; i++)
1244 1401884 : dumpDumpableObject(fout, dobjs[i]);
1245 :
1246 : /*
1247 : * Set up options info to ensure we dump what we want.
1248 : */
1249 376 : ropt = NewRestoreOptions();
1250 376 : ropt->filename = filename;
1251 :
1252 : /* if you change this list, see dumpOptionsFromRestoreOptions */
1253 376 : ropt->cparams.dbname = dopt.cparams.dbname ? pg_strdup(dopt.cparams.dbname) : NULL;
1254 376 : ropt->cparams.pgport = dopt.cparams.pgport ? pg_strdup(dopt.cparams.pgport) : NULL;
1255 376 : ropt->cparams.pghost = dopt.cparams.pghost ? pg_strdup(dopt.cparams.pghost) : NULL;
1256 376 : ropt->cparams.username = dopt.cparams.username ? pg_strdup(dopt.cparams.username) : NULL;
1257 376 : ropt->cparams.promptPassword = dopt.cparams.promptPassword;
1258 376 : ropt->dropSchema = dopt.outputClean;
1259 376 : ropt->dumpData = dopt.dumpData;
1260 376 : ropt->dumpSchema = dopt.dumpSchema;
1261 376 : ropt->dumpStatistics = dopt.dumpStatistics;
1262 376 : ropt->if_exists = dopt.if_exists;
1263 376 : ropt->column_inserts = dopt.column_inserts;
1264 376 : ropt->dumpSections = dopt.dumpSections;
1265 376 : ropt->aclsSkip = dopt.aclsSkip;
1266 376 : ropt->superuser = dopt.outputSuperuser;
1267 376 : ropt->createDB = dopt.outputCreateDB;
1268 376 : ropt->noOwner = dopt.outputNoOwner;
1269 376 : ropt->noTableAm = dopt.outputNoTableAm;
1270 376 : ropt->noTablespace = dopt.outputNoTablespaces;
1271 376 : ropt->disable_triggers = dopt.disable_triggers;
1272 376 : ropt->use_setsessauth = dopt.use_setsessauth;
1273 376 : ropt->disable_dollar_quoting = dopt.disable_dollar_quoting;
1274 376 : ropt->dump_inserts = dopt.dump_inserts;
1275 376 : ropt->no_comments = dopt.no_comments;
1276 376 : ropt->no_policies = dopt.no_policies;
1277 376 : ropt->no_publications = dopt.no_publications;
1278 376 : ropt->no_security_labels = dopt.no_security_labels;
1279 376 : ropt->no_subscriptions = dopt.no_subscriptions;
1280 376 : ropt->lockWaitTimeout = dopt.lockWaitTimeout;
1281 376 : ropt->include_everything = dopt.include_everything;
1282 376 : ropt->enable_row_security = dopt.enable_row_security;
1283 376 : ropt->sequence_data = dopt.sequence_data;
1284 376 : ropt->binary_upgrade = dopt.binary_upgrade;
1285 376 : ropt->restrict_key = dopt.restrict_key ? pg_strdup(dopt.restrict_key) : NULL;
1286 :
1287 376 : ropt->compression_spec = compression_spec;
1288 :
1289 376 : ropt->suppressDumpWarnings = true; /* We've already shown them */
1290 :
1291 376 : SetArchiveOptions(fout, &dopt, ropt);
1292 :
1293 : /* Mark which entries should be output */
1294 376 : ProcessArchiveRestoreOptions(fout);
1295 :
1296 : /*
1297 : * The archive's TOC entries are now marked as to which ones will actually
1298 : * be output, so we can set up their dependency lists properly. This isn't
1299 : * necessary for plain-text output, though.
1300 : */
1301 376 : if (!plainText)
1302 116 : BuildArchiveDependencies(fout);
1303 :
1304 : /*
1305 : * And finally we can do the actual output.
1306 : *
1307 : * Note: for non-plain-text output formats, the output file is written
1308 : * inside CloseArchive(). This is, um, bizarre; but not worth changing
1309 : * right now.
1310 : */
1311 376 : if (plainText)
1312 260 : RestoreArchive(fout);
1313 :
1314 374 : CloseArchive(fout);
1315 :
1316 374 : exit_nicely(0);
1317 : }
1318 :
1319 :
1320 : static void
1321 2 : help(const char *progname)
1322 : {
1323 2 : printf(_("%s exports a PostgreSQL database as an SQL script or to other formats.\n\n"), progname);
1324 2 : printf(_("Usage:\n"));
1325 2 : printf(_(" %s [OPTION]... [DBNAME]\n"), progname);
1326 :
1327 2 : printf(_("\nGeneral options:\n"));
1328 2 : printf(_(" -f, --file=FILENAME output file or directory name\n"));
1329 2 : printf(_(" -F, --format=c|d|t|p output file format (custom, directory, tar,\n"
1330 : " plain text (default))\n"));
1331 2 : printf(_(" -j, --jobs=NUM use this many parallel jobs to dump\n"));
1332 2 : printf(_(" -v, --verbose verbose mode\n"));
1333 2 : printf(_(" -V, --version output version information, then exit\n"));
1334 2 : printf(_(" -Z, --compress=METHOD[:DETAIL]\n"
1335 : " compress as specified\n"));
1336 2 : printf(_(" --lock-wait-timeout=TIMEOUT fail after waiting TIMEOUT for a table lock\n"));
1337 2 : printf(_(" --no-sync do not wait for changes to be written safely to disk\n"));
1338 2 : printf(_(" --sync-method=METHOD set method for syncing files to disk\n"));
1339 2 : printf(_(" -?, --help show this help, then exit\n"));
1340 :
1341 2 : printf(_("\nOptions controlling the output content:\n"));
1342 2 : printf(_(" -a, --data-only dump only the data, not the schema or statistics\n"));
1343 2 : printf(_(" -b, --large-objects include large objects in dump\n"));
1344 2 : printf(_(" --blobs (same as --large-objects, deprecated)\n"));
1345 2 : printf(_(" -B, --no-large-objects exclude large objects in dump\n"));
1346 2 : printf(_(" --no-blobs (same as --no-large-objects, deprecated)\n"));
1347 2 : printf(_(" -c, --clean clean (drop) database objects before recreating\n"));
1348 2 : printf(_(" -C, --create include commands to create database in dump\n"));
1349 2 : printf(_(" -e, --extension=PATTERN dump the specified extension(s) only\n"));
1350 2 : printf(_(" -E, --encoding=ENCODING dump the data in encoding ENCODING\n"));
1351 2 : printf(_(" -n, --schema=PATTERN dump the specified schema(s) only\n"));
1352 2 : printf(_(" -N, --exclude-schema=PATTERN do NOT dump the specified schema(s)\n"));
1353 2 : printf(_(" -O, --no-owner skip restoration of object ownership in\n"
1354 : " plain-text format\n"));
1355 2 : printf(_(" -s, --schema-only dump only the schema, no data or statistics\n"));
1356 2 : printf(_(" -S, --superuser=NAME superuser user name to use in plain-text format\n"));
1357 2 : printf(_(" -t, --table=PATTERN dump only the specified table(s)\n"));
1358 2 : printf(_(" -T, --exclude-table=PATTERN do NOT dump the specified table(s)\n"));
1359 2 : printf(_(" -x, --no-privileges do not dump privileges (grant/revoke)\n"));
1360 2 : printf(_(" --binary-upgrade for use by upgrade utilities only\n"));
1361 2 : printf(_(" --column-inserts dump data as INSERT commands with column names\n"));
1362 2 : printf(_(" --disable-dollar-quoting disable dollar quoting, use SQL standard quoting\n"));
1363 2 : printf(_(" --disable-triggers disable triggers during data-only restore\n"));
1364 2 : printf(_(" --enable-row-security enable row security (dump only content user has\n"
1365 : " access to)\n"));
1366 2 : printf(_(" --exclude-extension=PATTERN do NOT dump the specified extension(s)\n"));
1367 2 : printf(_(" --exclude-table-and-children=PATTERN\n"
1368 : " do NOT dump the specified table(s), including\n"
1369 : " child and partition tables\n"));
1370 2 : printf(_(" --exclude-table-data=PATTERN do NOT dump data for the specified table(s)\n"));
1371 2 : printf(_(" --exclude-table-data-and-children=PATTERN\n"
1372 : " do NOT dump data for the specified table(s),\n"
1373 : " including child and partition tables\n"));
1374 2 : printf(_(" --extra-float-digits=NUM override default setting for extra_float_digits\n"));
1375 2 : printf(_(" --filter=FILENAME include or exclude objects and data from dump\n"
1376 : " based on expressions in FILENAME\n"));
1377 2 : printf(_(" --if-exists use IF EXISTS when dropping objects\n"));
1378 2 : printf(_(" --include-foreign-data=PATTERN\n"
1379 : " include data of foreign tables on foreign\n"
1380 : " servers matching PATTERN\n"));
1381 2 : printf(_(" --inserts dump data as INSERT commands, rather than COPY\n"));
1382 2 : printf(_(" --load-via-partition-root load partitions via the root table\n"));
1383 2 : printf(_(" --no-comments do not dump comment commands\n"));
1384 2 : printf(_(" --no-data do not dump data\n"));
1385 2 : printf(_(" --no-policies do not dump row security policies\n"));
1386 2 : printf(_(" --no-publications do not dump publications\n"));
1387 2 : printf(_(" --no-schema do not dump schema\n"));
1388 2 : printf(_(" --no-security-labels do not dump security label assignments\n"));
1389 2 : printf(_(" --no-statistics do not dump statistics\n"));
1390 2 : printf(_(" --no-subscriptions do not dump subscriptions\n"));
1391 2 : printf(_(" --no-table-access-method do not dump table access methods\n"));
1392 2 : printf(_(" --no-tablespaces do not dump tablespace assignments\n"));
1393 2 : printf(_(" --no-toast-compression do not dump TOAST compression methods\n"));
1394 2 : printf(_(" --no-unlogged-table-data do not dump unlogged table data\n"));
1395 2 : printf(_(" --on-conflict-do-nothing add ON CONFLICT DO NOTHING to INSERT commands\n"));
1396 2 : printf(_(" --quote-all-identifiers quote all identifiers, even if not key words\n"));
1397 2 : printf(_(" --restrict-key=RESTRICT_KEY use provided string as psql \\restrict key\n"));
1398 2 : printf(_(" --rows-per-insert=NROWS number of rows per INSERT; implies --inserts\n"));
1399 2 : printf(_(" --section=SECTION dump named section (pre-data, data, or post-data)\n"));
1400 2 : printf(_(" --sequence-data include sequence data in dump\n"));
1401 2 : printf(_(" --serializable-deferrable wait until the dump can run without anomalies\n"));
1402 2 : printf(_(" --snapshot=SNAPSHOT use given snapshot for the dump\n"));
1403 2 : printf(_(" --statistics dump the statistics\n"));
1404 2 : printf(_(" --statistics-only dump only the statistics, not schema or data\n"));
1405 2 : printf(_(" --strict-names require table and/or schema include patterns to\n"
1406 : " match at least one entity each\n"));
1407 2 : printf(_(" --table-and-children=PATTERN dump only the specified table(s), including\n"
1408 : " child and partition tables\n"));
1409 2 : printf(_(" --use-set-session-authorization\n"
1410 : " use SET SESSION AUTHORIZATION commands instead of\n"
1411 : " ALTER OWNER commands to set ownership\n"));
1412 :
1413 2 : printf(_("\nConnection options:\n"));
1414 2 : printf(_(" -d, --dbname=DBNAME database to dump\n"));
1415 2 : printf(_(" -h, --host=HOSTNAME database server host or socket directory\n"));
1416 2 : printf(_(" -p, --port=PORT database server port number\n"));
1417 2 : printf(_(" -U, --username=NAME connect as specified database user\n"));
1418 2 : printf(_(" -w, --no-password never prompt for password\n"));
1419 2 : printf(_(" -W, --password force password prompt (should happen automatically)\n"));
1420 2 : printf(_(" --role=ROLENAME do SET ROLE before dump\n"));
1421 :
1422 2 : printf(_("\nIf no database name is supplied, then the PGDATABASE environment\n"
1423 : "variable value is used.\n\n"));
1424 2 : printf(_("Report bugs to <%s>.\n"), PACKAGE_BUGREPORT);
1425 2 : printf(_("%s home page: <%s>\n"), PACKAGE_NAME, PACKAGE_URL);
1426 2 : }
1427 :
1428 : static void
1429 442 : setup_connection(Archive *AH, const char *dumpencoding,
1430 : const char *dumpsnapshot, char *use_role)
1431 : {
1432 442 : DumpOptions *dopt = AH->dopt;
1433 442 : PGconn *conn = GetConnection(AH);
1434 :
1435 442 : PQclear(ExecuteSqlQueryForSingleRow(AH, ALWAYS_SECURE_SEARCH_PATH_SQL));
1436 :
1437 : /*
1438 : * Set the client encoding if requested.
1439 : */
1440 442 : if (dumpencoding)
1441 : {
1442 36 : if (PQsetClientEncoding(conn, dumpencoding) < 0)
1443 0 : pg_fatal("invalid client encoding \"%s\" specified",
1444 : dumpencoding);
1445 : }
1446 :
1447 : /*
1448 : * Force standard_conforming_strings on, just in case we are dumping from
1449 : * an old server that has it disabled. Without this, literals in views,
1450 : * expressions, etc, would be incorrect for modern servers.
1451 : */
1452 442 : ExecuteSqlStatement(AH, "SET standard_conforming_strings = on");
1453 :
1454 : /*
1455 : * And reflect that to AH->std_strings. You might think that we should
1456 : * just delete that variable and the code that checks it, but that would
1457 : * be problematic for pg_restore, which at least for now should still cope
1458 : * with archives containing the other setting (cf. processStdStringsEntry
1459 : * in pg_backup_archiver.c).
1460 : */
1461 442 : AH->std_strings = true;
1462 :
1463 : /*
1464 : * Get the active encoding, so we know how to escape strings.
1465 : */
1466 442 : AH->encoding = PQclientEncoding(conn);
1467 442 : setFmtEncoding(AH->encoding);
1468 :
1469 : /*
1470 : * Set the role if requested. In a parallel dump worker, we'll be passed
1471 : * use_role == NULL, but AH->use_role is already set (if user specified it
1472 : * originally) and we should use that.
1473 : */
1474 442 : if (!use_role && AH->use_role)
1475 4 : use_role = AH->use_role;
1476 :
1477 : /* Set the role if requested */
1478 442 : if (use_role)
1479 : {
1480 10 : PQExpBuffer query = createPQExpBuffer();
1481 :
1482 10 : appendPQExpBuffer(query, "SET ROLE %s", fmtId(use_role));
1483 10 : ExecuteSqlStatement(AH, query->data);
1484 10 : destroyPQExpBuffer(query);
1485 :
1486 : /* save it for possible later use by parallel workers */
1487 10 : if (!AH->use_role)
1488 6 : AH->use_role = pg_strdup(use_role);
1489 : }
1490 :
1491 : /* Set the datestyle to ISO to ensure the dump's portability */
1492 442 : ExecuteSqlStatement(AH, "SET DATESTYLE = ISO");
1493 :
1494 : /* Likewise, avoid using sql_standard intervalstyle */
1495 442 : ExecuteSqlStatement(AH, "SET INTERVALSTYLE = POSTGRES");
1496 :
1497 : /*
1498 : * Use an explicitly specified extra_float_digits if it has been provided.
1499 : * Otherwise, set extra_float_digits so that we can dump float data
1500 : * exactly (given correctly implemented float I/O code, anyway).
1501 : */
1502 442 : if (have_extra_float_digits)
1503 : {
1504 0 : PQExpBuffer q = createPQExpBuffer();
1505 :
1506 0 : appendPQExpBuffer(q, "SET extra_float_digits TO %d",
1507 : extra_float_digits);
1508 0 : ExecuteSqlStatement(AH, q->data);
1509 0 : destroyPQExpBuffer(q);
1510 : }
1511 : else
1512 442 : ExecuteSqlStatement(AH, "SET extra_float_digits TO 3");
1513 :
1514 : /*
1515 : * Disable synchronized scanning, to prevent unpredictable changes in row
1516 : * ordering across a dump and reload.
1517 : */
1518 442 : ExecuteSqlStatement(AH, "SET synchronize_seqscans TO off");
1519 :
1520 : /*
1521 : * Disable timeouts if supported.
1522 : */
1523 442 : ExecuteSqlStatement(AH, "SET statement_timeout = 0");
1524 442 : if (AH->remoteVersion >= 90300)
1525 442 : ExecuteSqlStatement(AH, "SET lock_timeout = 0");
1526 442 : if (AH->remoteVersion >= 90600)
1527 442 : ExecuteSqlStatement(AH, "SET idle_in_transaction_session_timeout = 0");
1528 442 : if (AH->remoteVersion >= 170000)
1529 442 : ExecuteSqlStatement(AH, "SET transaction_timeout = 0");
1530 :
1531 : /*
1532 : * Quote all identifiers, if requested.
1533 : */
1534 442 : if (quote_all_identifiers)
1535 72 : ExecuteSqlStatement(AH, "SET quote_all_identifiers = true");
1536 :
1537 : /*
1538 : * Adjust row-security mode, if supported.
1539 : */
1540 442 : if (AH->remoteVersion >= 90500)
1541 : {
1542 442 : if (dopt->enable_row_security)
1543 0 : ExecuteSqlStatement(AH, "SET row_security = on");
1544 : else
1545 442 : ExecuteSqlStatement(AH, "SET row_security = off");
1546 : }
1547 :
1548 : /*
1549 : * For security reasons, we restrict the expansion of non-system views and
1550 : * access to foreign tables during the pg_dump process. This restriction
1551 : * is adjusted when dumping foreign table data.
1552 : */
1553 442 : set_restrict_relation_kind(AH, "view, foreign-table");
1554 :
1555 : /*
1556 : * Initialize prepared-query state to "nothing prepared". We do this here
1557 : * so that a parallel dump worker will have its own state.
1558 : */
1559 442 : AH->is_prepared = (bool *) pg_malloc0(NUM_PREP_QUERIES * sizeof(bool));
1560 :
1561 : /*
1562 : * Start transaction-snapshot mode transaction to dump consistent data.
1563 : */
1564 442 : ExecuteSqlStatement(AH, "BEGIN");
1565 :
1566 : /*
1567 : * To support the combination of serializable_deferrable with the jobs
1568 : * option we use REPEATABLE READ for the worker connections that are
1569 : * passed a snapshot. As long as the snapshot is acquired in a
1570 : * SERIALIZABLE, READ ONLY, DEFERRABLE transaction, its use within a
1571 : * REPEATABLE READ transaction provides the appropriate integrity
1572 : * guarantees. This is a kluge, but safe for back-patching.
1573 : */
1574 442 : if (dopt->serializable_deferrable && AH->sync_snapshot_id == NULL)
1575 0 : ExecuteSqlStatement(AH,
1576 : "SET TRANSACTION ISOLATION LEVEL "
1577 : "SERIALIZABLE, READ ONLY, DEFERRABLE");
1578 : else
1579 442 : ExecuteSqlStatement(AH,
1580 : "SET TRANSACTION ISOLATION LEVEL "
1581 : "REPEATABLE READ, READ ONLY");
1582 :
1583 : /*
1584 : * If user specified a snapshot to use, select that. In a parallel dump
1585 : * worker, we'll be passed dumpsnapshot == NULL, but AH->sync_snapshot_id
1586 : * is already set (if the server can handle it) and we should use that.
1587 : */
1588 442 : if (dumpsnapshot)
1589 0 : AH->sync_snapshot_id = pg_strdup(dumpsnapshot);
1590 :
1591 442 : if (AH->sync_snapshot_id)
1592 : {
1593 32 : PQExpBuffer query = createPQExpBuffer();
1594 :
1595 32 : appendPQExpBufferStr(query, "SET TRANSACTION SNAPSHOT ");
1596 32 : appendStringLiteralConn(query, AH->sync_snapshot_id, conn);
1597 32 : ExecuteSqlStatement(AH, query->data);
1598 32 : destroyPQExpBuffer(query);
1599 : }
1600 410 : else if (AH->numWorkers > 1)
1601 : {
1602 16 : if (AH->isStandby && AH->remoteVersion < 100000)
1603 0 : pg_fatal("parallel dumps from standby servers are not supported by this server version");
1604 16 : AH->sync_snapshot_id = get_synchronized_snapshot(AH);
1605 : }
1606 442 : }
1607 :
1608 : /* Set up connection for a parallel worker process */
1609 : static void
1610 32 : setupDumpWorker(Archive *AH)
1611 : {
1612 : /*
1613 : * We want to re-select all the same values the leader connection is
1614 : * using. We'll have inherited directly-usable values in
1615 : * AH->sync_snapshot_id and AH->use_role, but we need to translate the
1616 : * inherited encoding value back to a string to pass to setup_connection.
1617 : */
1618 32 : setup_connection(AH,
1619 : pg_encoding_to_char(AH->encoding),
1620 : NULL,
1621 : NULL);
1622 32 : }
1623 :
1624 : static char *
1625 16 : get_synchronized_snapshot(Archive *fout)
1626 : {
1627 16 : char *query = "SELECT pg_catalog.pg_export_snapshot()";
1628 : char *result;
1629 : PGresult *res;
1630 :
1631 16 : res = ExecuteSqlQueryForSingleRow(fout, query);
1632 16 : result = pg_strdup(PQgetvalue(res, 0, 0));
1633 16 : PQclear(res);
1634 :
1635 16 : return result;
1636 : }
1637 :
1638 : static ArchiveFormat
1639 428 : parseArchiveFormat(const char *format, ArchiveMode *mode)
1640 : {
1641 : ArchiveFormat archiveFormat;
1642 :
1643 428 : *mode = archModeWrite;
1644 :
1645 428 : if (pg_strcasecmp(format, "a") == 0 || pg_strcasecmp(format, "append") == 0)
1646 : {
1647 : /* This is used by pg_dumpall, and is not documented */
1648 98 : archiveFormat = archNull;
1649 98 : *mode = archModeAppend;
1650 : }
1651 330 : else if (pg_strcasecmp(format, "c") == 0)
1652 0 : archiveFormat = archCustom;
1653 330 : else if (pg_strcasecmp(format, "custom") == 0)
1654 92 : archiveFormat = archCustom;
1655 238 : else if (pg_strcasecmp(format, "d") == 0)
1656 0 : archiveFormat = archDirectory;
1657 238 : else if (pg_strcasecmp(format, "directory") == 0)
1658 20 : archiveFormat = archDirectory;
1659 218 : else if (pg_strcasecmp(format, "p") == 0)
1660 204 : archiveFormat = archNull;
1661 14 : else if (pg_strcasecmp(format, "plain") == 0)
1662 6 : archiveFormat = archNull;
1663 8 : else if (pg_strcasecmp(format, "t") == 0)
1664 0 : archiveFormat = archTar;
1665 8 : else if (pg_strcasecmp(format, "tar") == 0)
1666 6 : archiveFormat = archTar;
1667 : else
1668 2 : pg_fatal("invalid output format \"%s\" specified", format);
1669 426 : return archiveFormat;
1670 : }
1671 :
1672 : /*
1673 : * Find the OIDs of all schemas matching the given list of patterns,
1674 : * and append them to the given OID list.
1675 : */
1676 : static void
1677 432 : expand_schema_name_patterns(Archive *fout,
1678 : SimpleStringList *patterns,
1679 : SimpleOidList *oids,
1680 : bool strict_names)
1681 : {
1682 : PQExpBuffer query;
1683 : PGresult *res;
1684 : SimpleStringListCell *cell;
1685 : int i;
1686 :
1687 432 : if (patterns->head == NULL)
1688 390 : return; /* nothing to do */
1689 :
1690 42 : query = createPQExpBuffer();
1691 :
1692 : /*
1693 : * The loop below runs multiple SELECTs might sometimes result in
1694 : * duplicate entries in the OID list, but we don't care.
1695 : */
1696 :
1697 72 : for (cell = patterns->head; cell; cell = cell->next)
1698 : {
1699 : PQExpBufferData dbbuf;
1700 : int dotcnt;
1701 :
1702 42 : appendPQExpBufferStr(query,
1703 : "SELECT oid FROM pg_catalog.pg_namespace n\n");
1704 42 : initPQExpBuffer(&dbbuf);
1705 42 : processSQLNamePattern(GetConnection(fout), query, cell->val, false,
1706 : false, NULL, "n.nspname", NULL, NULL, &dbbuf,
1707 : &dotcnt);
1708 42 : if (dotcnt > 1)
1709 4 : pg_fatal("improper qualified name (too many dotted names): %s",
1710 : cell->val);
1711 38 : else if (dotcnt == 1)
1712 6 : prohibit_crossdb_refs(GetConnection(fout), dbbuf.data, cell->val);
1713 32 : termPQExpBuffer(&dbbuf);
1714 :
1715 32 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1716 32 : if (strict_names && PQntuples(res) == 0)
1717 2 : pg_fatal("no matching schemas were found for pattern \"%s\"", cell->val);
1718 :
1719 58 : for (i = 0; i < PQntuples(res); i++)
1720 : {
1721 28 : simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1722 : }
1723 :
1724 30 : PQclear(res);
1725 30 : resetPQExpBuffer(query);
1726 : }
1727 :
1728 30 : destroyPQExpBuffer(query);
1729 : }
1730 :
1731 : /*
1732 : * Find the OIDs of all extensions matching the given list of patterns,
1733 : * and append them to the given OID list.
1734 : */
1735 : static void
1736 388 : expand_extension_name_patterns(Archive *fout,
1737 : SimpleStringList *patterns,
1738 : SimpleOidList *oids,
1739 : bool strict_names)
1740 : {
1741 : PQExpBuffer query;
1742 : PGresult *res;
1743 : SimpleStringListCell *cell;
1744 : int i;
1745 :
1746 388 : if (patterns->head == NULL)
1747 374 : return; /* nothing to do */
1748 :
1749 14 : query = createPQExpBuffer();
1750 :
1751 : /*
1752 : * The loop below runs multiple SELECTs might sometimes result in
1753 : * duplicate entries in the OID list, but we don't care.
1754 : */
1755 28 : for (cell = patterns->head; cell; cell = cell->next)
1756 : {
1757 : int dotcnt;
1758 :
1759 14 : appendPQExpBufferStr(query,
1760 : "SELECT oid FROM pg_catalog.pg_extension e\n");
1761 14 : processSQLNamePattern(GetConnection(fout), query, cell->val, false,
1762 : false, NULL, "e.extname", NULL, NULL, NULL,
1763 : &dotcnt);
1764 14 : if (dotcnt > 0)
1765 0 : pg_fatal("improper qualified name (too many dotted names): %s",
1766 : cell->val);
1767 :
1768 14 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1769 14 : if (strict_names && PQntuples(res) == 0)
1770 0 : pg_fatal("no matching extensions were found for pattern \"%s\"", cell->val);
1771 :
1772 26 : for (i = 0; i < PQntuples(res); i++)
1773 : {
1774 12 : simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1775 : }
1776 :
1777 14 : PQclear(res);
1778 14 : resetPQExpBuffer(query);
1779 : }
1780 :
1781 14 : destroyPQExpBuffer(query);
1782 : }
1783 :
1784 : /*
1785 : * Find the OIDs of all foreign servers matching the given list of patterns,
1786 : * and append them to the given OID list.
1787 : */
1788 : static void
1789 382 : expand_foreign_server_name_patterns(Archive *fout,
1790 : SimpleStringList *patterns,
1791 : SimpleOidList *oids)
1792 : {
1793 : PQExpBuffer query;
1794 : PGresult *res;
1795 : SimpleStringListCell *cell;
1796 : int i;
1797 :
1798 382 : if (patterns->head == NULL)
1799 376 : return; /* nothing to do */
1800 :
1801 6 : query = createPQExpBuffer();
1802 :
1803 : /*
1804 : * The loop below runs multiple SELECTs might sometimes result in
1805 : * duplicate entries in the OID list, but we don't care.
1806 : */
1807 :
1808 10 : for (cell = patterns->head; cell; cell = cell->next)
1809 : {
1810 : int dotcnt;
1811 :
1812 6 : appendPQExpBufferStr(query,
1813 : "SELECT oid FROM pg_catalog.pg_foreign_server s\n");
1814 6 : processSQLNamePattern(GetConnection(fout), query, cell->val, false,
1815 : false, NULL, "s.srvname", NULL, NULL, NULL,
1816 : &dotcnt);
1817 6 : if (dotcnt > 0)
1818 0 : pg_fatal("improper qualified name (too many dotted names): %s",
1819 : cell->val);
1820 :
1821 6 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1822 6 : if (PQntuples(res) == 0)
1823 2 : pg_fatal("no matching foreign servers were found for pattern \"%s\"", cell->val);
1824 :
1825 8 : for (i = 0; i < PQntuples(res); i++)
1826 4 : simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1827 :
1828 4 : PQclear(res);
1829 4 : resetPQExpBuffer(query);
1830 : }
1831 :
1832 4 : destroyPQExpBuffer(query);
1833 : }
1834 :
1835 : /*
1836 : * Find the OIDs of all tables matching the given list of patterns,
1837 : * and append them to the given OID list. See also expand_dbname_patterns()
1838 : * in pg_dumpall.c
1839 : */
1840 : static void
1841 2310 : expand_table_name_patterns(Archive *fout,
1842 : SimpleStringList *patterns, SimpleOidList *oids,
1843 : bool strict_names, bool with_child_tables)
1844 : {
1845 : PQExpBuffer query;
1846 : PGresult *res;
1847 : SimpleStringListCell *cell;
1848 : int i;
1849 :
1850 2310 : if (patterns->head == NULL)
1851 2252 : return; /* nothing to do */
1852 :
1853 58 : query = createPQExpBuffer();
1854 :
1855 : /*
1856 : * this might sometimes result in duplicate entries in the OID list, but
1857 : * we don't care.
1858 : */
1859 :
1860 118 : for (cell = patterns->head; cell; cell = cell->next)
1861 : {
1862 : PQExpBufferData dbbuf;
1863 : int dotcnt;
1864 :
1865 : /*
1866 : * Query must remain ABSOLUTELY devoid of unqualified names. This
1867 : * would be unnecessary given a pg_table_is_visible() variant taking a
1868 : * search_path argument.
1869 : *
1870 : * For with_child_tables, we start with the basic query's results and
1871 : * recursively search the inheritance tree to add child tables.
1872 : */
1873 70 : if (with_child_tables)
1874 : {
1875 12 : appendPQExpBufferStr(query, "WITH RECURSIVE partition_tree (relid) AS (\n");
1876 : }
1877 :
1878 70 : appendPQExpBuffer(query,
1879 : "SELECT c.oid"
1880 : "\nFROM pg_catalog.pg_class c"
1881 : "\n LEFT JOIN pg_catalog.pg_namespace n"
1882 : "\n ON n.oid OPERATOR(pg_catalog.=) c.relnamespace"
1883 : "\nWHERE c.relkind OPERATOR(pg_catalog.=) ANY"
1884 : "\n (array['%c', '%c', '%c', '%c', '%c', '%c'])\n",
1885 : RELKIND_RELATION, RELKIND_SEQUENCE, RELKIND_VIEW,
1886 : RELKIND_MATVIEW, RELKIND_FOREIGN_TABLE,
1887 : RELKIND_PARTITIONED_TABLE);
1888 70 : initPQExpBuffer(&dbbuf);
1889 70 : processSQLNamePattern(GetConnection(fout), query, cell->val, true,
1890 : false, "n.nspname", "c.relname", NULL,
1891 : "pg_catalog.pg_table_is_visible(c.oid)", &dbbuf,
1892 : &dotcnt);
1893 70 : if (dotcnt > 2)
1894 2 : pg_fatal("improper relation name (too many dotted names): %s",
1895 : cell->val);
1896 68 : else if (dotcnt == 2)
1897 4 : prohibit_crossdb_refs(GetConnection(fout), dbbuf.data, cell->val);
1898 64 : termPQExpBuffer(&dbbuf);
1899 :
1900 64 : if (with_child_tables)
1901 : {
1902 12 : appendPQExpBufferStr(query, "UNION"
1903 : "\nSELECT i.inhrelid"
1904 : "\nFROM partition_tree p"
1905 : "\n JOIN pg_catalog.pg_inherits i"
1906 : "\n ON p.relid OPERATOR(pg_catalog.=) i.inhparent"
1907 : "\n)"
1908 : "\nSELECT relid FROM partition_tree");
1909 : }
1910 :
1911 64 : ExecuteSqlStatement(fout, "RESET search_path");
1912 64 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
1913 64 : PQclear(ExecuteSqlQueryForSingleRow(fout,
1914 : ALWAYS_SECURE_SEARCH_PATH_SQL));
1915 64 : if (strict_names && PQntuples(res) == 0)
1916 4 : pg_fatal("no matching tables were found for pattern \"%s\"", cell->val);
1917 :
1918 148 : for (i = 0; i < PQntuples(res); i++)
1919 : {
1920 88 : simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
1921 : }
1922 :
1923 60 : PQclear(res);
1924 60 : resetPQExpBuffer(query);
1925 : }
1926 :
1927 48 : destroyPQExpBuffer(query);
1928 : }
1929 :
1930 : /*
1931 : * Verifies that the connected database name matches the given database name,
1932 : * and if not, dies with an error about the given pattern.
1933 : *
1934 : * The 'dbname' argument should be a literal name parsed from 'pattern'.
1935 : */
1936 : static void
1937 10 : prohibit_crossdb_refs(PGconn *conn, const char *dbname, const char *pattern)
1938 : {
1939 : const char *db;
1940 :
1941 10 : db = PQdb(conn);
1942 10 : if (db == NULL)
1943 0 : pg_fatal("You are currently not connected to a database.");
1944 :
1945 10 : if (strcmp(db, dbname) != 0)
1946 10 : pg_fatal("cross-database references are not implemented: %s",
1947 : pattern);
1948 0 : }
1949 :
1950 : /*
1951 : * checkExtensionMembership
1952 : * Determine whether object is an extension member, and if so,
1953 : * record an appropriate dependency and set the object's dump flag.
1954 : *
1955 : * It's important to call this for each object that could be an extension
1956 : * member. Generally, we integrate this with determining the object's
1957 : * to-be-dumped-ness, since extension membership overrides other rules for that.
1958 : *
1959 : * Returns true if object is an extension member, else false.
1960 : */
1961 : static bool
1962 1193764 : checkExtensionMembership(DumpableObject *dobj, Archive *fout)
1963 : {
1964 1193764 : ExtensionInfo *ext = findOwningExtension(dobj->catId);
1965 :
1966 1193764 : if (ext == NULL)
1967 1192150 : return false;
1968 :
1969 1614 : dobj->ext_member = true;
1970 :
1971 : /* Record dependency so that getDependencies needn't deal with that */
1972 1614 : addObjectDependency(dobj, ext->dobj.dumpId);
1973 :
1974 : /*
1975 : * In 9.6 and above, mark the member object to have any non-initial ACLs
1976 : * dumped. (Any initial ACLs will be removed later, using data from
1977 : * pg_init_privs, so that we'll dump only the delta from the extension's
1978 : * initial setup.)
1979 : *
1980 : * Prior to 9.6, we do not include any extension member components.
1981 : *
1982 : * In binary upgrades, we still dump all components of the members
1983 : * individually, since the idea is to exactly reproduce the database
1984 : * contents rather than replace the extension contents with something
1985 : * different.
1986 : *
1987 : * Note: it might be interesting someday to implement storage and delta
1988 : * dumping of extension members' RLS policies and/or security labels.
1989 : * However there is a pitfall for RLS policies: trying to dump them
1990 : * requires getting a lock on their tables, and the calling user might not
1991 : * have privileges for that. We need no lock to examine a table's ACLs,
1992 : * so the current feature doesn't have a problem of that sort.
1993 : */
1994 1614 : if (fout->dopt->binary_upgrade)
1995 354 : dobj->dump = ext->dobj.dump;
1996 : else
1997 : {
1998 1260 : if (fout->remoteVersion < 90600)
1999 0 : dobj->dump = DUMP_COMPONENT_NONE;
2000 : else
2001 1260 : dobj->dump = ext->dobj.dump_contains & (DUMP_COMPONENT_ACL);
2002 : }
2003 :
2004 1614 : return true;
2005 : }
2006 :
2007 : /*
2008 : * selectDumpableNamespace: policy-setting subroutine
2009 : * Mark a namespace as to be dumped or not
2010 : */
2011 : static void
2012 2900 : selectDumpableNamespace(NamespaceInfo *nsinfo, Archive *fout)
2013 : {
2014 : /*
2015 : * DUMP_COMPONENT_DEFINITION typically implies a CREATE SCHEMA statement
2016 : * and (for --clean) a DROP SCHEMA statement. (In the absence of
2017 : * DUMP_COMPONENT_DEFINITION, this value is irrelevant.)
2018 : */
2019 2900 : nsinfo->create = true;
2020 :
2021 : /*
2022 : * If specific tables are being dumped, do not dump any complete
2023 : * namespaces. If specific namespaces are being dumped, dump just those
2024 : * namespaces. Otherwise, dump all non-system namespaces.
2025 : */
2026 2900 : if (table_include_oids.head != NULL)
2027 100 : nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
2028 2800 : else if (schema_include_oids.head != NULL)
2029 374 : nsinfo->dobj.dump_contains = nsinfo->dobj.dump =
2030 374 : simple_oid_list_member(&schema_include_oids,
2031 : nsinfo->dobj.catId.oid) ?
2032 374 : DUMP_COMPONENT_ALL : DUMP_COMPONENT_NONE;
2033 2426 : else if (fout->remoteVersion >= 90600 &&
2034 2426 : strcmp(nsinfo->dobj.name, "pg_catalog") == 0)
2035 : {
2036 : /*
2037 : * In 9.6 and above, we dump out any ACLs defined in pg_catalog, if
2038 : * they are interesting (and not the original ACLs which were set at
2039 : * initdb time, see pg_init_privs).
2040 : */
2041 334 : nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_ACL;
2042 : }
2043 2092 : else if (strncmp(nsinfo->dobj.name, "pg_", 3) == 0 ||
2044 1014 : strcmp(nsinfo->dobj.name, "information_schema") == 0)
2045 : {
2046 : /* Other system schemas don't get dumped */
2047 1412 : nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
2048 : }
2049 680 : else if (strcmp(nsinfo->dobj.name, "public") == 0)
2050 : {
2051 : /*
2052 : * The public schema is a strange beast that sits in a sort of
2053 : * no-mans-land between being a system object and a user object.
2054 : * CREATE SCHEMA would fail, so its DUMP_COMPONENT_DEFINITION is just
2055 : * a comment and an indication of ownership. If the owner is the
2056 : * default, omit that superfluous DUMP_COMPONENT_DEFINITION. Before
2057 : * v15, the default owner was BOOTSTRAP_SUPERUSERID.
2058 : */
2059 326 : nsinfo->create = false;
2060 326 : nsinfo->dobj.dump = DUMP_COMPONENT_ALL;
2061 326 : if (nsinfo->nspowner == ROLE_PG_DATABASE_OWNER)
2062 242 : nsinfo->dobj.dump &= ~DUMP_COMPONENT_DEFINITION;
2063 326 : nsinfo->dobj.dump_contains = DUMP_COMPONENT_ALL;
2064 :
2065 : /*
2066 : * Also, make like it has a comment even if it doesn't; this is so
2067 : * that we'll emit a command to drop the comment, if appropriate.
2068 : * (Without this, we'd not call dumpCommentExtended for it.)
2069 : */
2070 326 : nsinfo->dobj.components |= DUMP_COMPONENT_COMMENT;
2071 : }
2072 : else
2073 354 : nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_ALL;
2074 :
2075 : /*
2076 : * In any case, a namespace can be excluded by an exclusion switch
2077 : */
2078 3936 : if (nsinfo->dobj.dump_contains &&
2079 1036 : simple_oid_list_member(&schema_exclude_oids,
2080 : nsinfo->dobj.catId.oid))
2081 6 : nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
2082 :
2083 : /*
2084 : * If the schema belongs to an extension, allow extension membership to
2085 : * override the dump decision for the schema itself. However, this does
2086 : * not change dump_contains, so this won't change what we do with objects
2087 : * within the schema. (If they belong to the extension, they'll get
2088 : * suppressed by it, otherwise not.)
2089 : */
2090 2900 : (void) checkExtensionMembership(&nsinfo->dobj, fout);
2091 2900 : }
2092 :
2093 : /*
2094 : * selectDumpableTable: policy-setting subroutine
2095 : * Mark a table as to be dumped or not
2096 : */
2097 : static void
2098 99148 : selectDumpableTable(TableInfo *tbinfo, Archive *fout)
2099 : {
2100 99148 : if (checkExtensionMembership(&tbinfo->dobj, fout))
2101 450 : return; /* extension membership overrides all else */
2102 :
2103 : /*
2104 : * If specific tables are being dumped, dump just those tables; else, dump
2105 : * according to the parent namespace's dump flag.
2106 : */
2107 98698 : if (table_include_oids.head != NULL)
2108 10376 : tbinfo->dobj.dump = simple_oid_list_member(&table_include_oids,
2109 : tbinfo->dobj.catId.oid) ?
2110 5188 : DUMP_COMPONENT_ALL : DUMP_COMPONENT_NONE;
2111 : else
2112 93510 : tbinfo->dobj.dump = tbinfo->dobj.namespace->dobj.dump_contains;
2113 :
2114 : /*
2115 : * In any case, a table can be excluded by an exclusion switch
2116 : */
2117 161504 : if (tbinfo->dobj.dump &&
2118 62806 : simple_oid_list_member(&table_exclude_oids,
2119 : tbinfo->dobj.catId.oid))
2120 24 : tbinfo->dobj.dump = DUMP_COMPONENT_NONE;
2121 : }
2122 :
2123 : /*
2124 : * selectDumpableType: policy-setting subroutine
2125 : * Mark a type as to be dumped or not
2126 : *
2127 : * If it's a table's rowtype or an autogenerated array type, we also apply a
2128 : * special type code to facilitate sorting into the desired order. (We don't
2129 : * want to consider those to be ordinary types because that would bring tables
2130 : * up into the datatype part of the dump order.) We still set the object's
2131 : * dump flag; that's not going to cause the dummy type to be dumped, but we
2132 : * need it so that casts involving such types will be dumped correctly -- see
2133 : * dumpCast. This means the flag should be set the same as for the underlying
2134 : * object (the table or base type).
2135 : */
2136 : static void
2137 273084 : selectDumpableType(TypeInfo *tyinfo, Archive *fout)
2138 : {
2139 : /* skip complex types, except for standalone composite types */
2140 273084 : if (OidIsValid(tyinfo->typrelid) &&
2141 97696 : tyinfo->typrelkind != RELKIND_COMPOSITE_TYPE)
2142 : {
2143 97332 : TableInfo *tytable = findTableByOid(tyinfo->typrelid);
2144 :
2145 97332 : tyinfo->dobj.objType = DO_DUMMY_TYPE;
2146 97332 : if (tytable != NULL)
2147 97332 : tyinfo->dobj.dump = tytable->dobj.dump;
2148 : else
2149 0 : tyinfo->dobj.dump = DUMP_COMPONENT_NONE;
2150 97332 : return;
2151 : }
2152 :
2153 : /* skip auto-generated array and multirange types */
2154 175752 : if (tyinfo->isArray || tyinfo->isMultirange)
2155 : {
2156 133558 : tyinfo->dobj.objType = DO_DUMMY_TYPE;
2157 :
2158 : /*
2159 : * Fall through to set the dump flag; we assume that the subsequent
2160 : * rules will do the same thing as they would for the array's base
2161 : * type or multirange's range type. (We cannot reliably look up the
2162 : * base type here, since getTypes may not have processed it yet.)
2163 : */
2164 : }
2165 :
2166 175752 : if (checkExtensionMembership(&tyinfo->dobj, fout))
2167 300 : return; /* extension membership overrides all else */
2168 :
2169 : /* Dump based on if the contents of the namespace are being dumped */
2170 175452 : tyinfo->dobj.dump = tyinfo->dobj.namespace->dobj.dump_contains;
2171 : }
2172 :
2173 : /*
2174 : * selectDumpableDefaultACL: policy-setting subroutine
2175 : * Mark a default ACL as to be dumped or not
2176 : *
2177 : * For per-schema default ACLs, dump if the schema is to be dumped.
2178 : * Otherwise dump if we are dumping "everything". Note that dumpSchema
2179 : * and aclsSkip are checked separately.
2180 : */
2181 : static void
2182 388 : selectDumpableDefaultACL(DefaultACLInfo *dinfo, DumpOptions *dopt)
2183 : {
2184 : /* Default ACLs can't be extension members */
2185 :
2186 388 : if (dinfo->dobj.namespace)
2187 : /* default ACLs are considered part of the namespace */
2188 180 : dinfo->dobj.dump = dinfo->dobj.namespace->dobj.dump_contains;
2189 : else
2190 208 : dinfo->dobj.dump = dopt->include_everything ?
2191 208 : DUMP_COMPONENT_ALL : DUMP_COMPONENT_NONE;
2192 388 : }
2193 :
2194 : /*
2195 : * selectDumpableCast: policy-setting subroutine
2196 : * Mark a cast as to be dumped or not
2197 : *
2198 : * Casts do not belong to any particular namespace (since they haven't got
2199 : * names), nor do they have identifiable owners. To distinguish user-defined
2200 : * casts from built-in ones, we must resort to checking whether the cast's
2201 : * OID is in the range reserved for initdb.
2202 : */
2203 : static void
2204 90790 : selectDumpableCast(CastInfo *cast, Archive *fout)
2205 : {
2206 90790 : if (checkExtensionMembership(&cast->dobj, fout))
2207 0 : return; /* extension membership overrides all else */
2208 :
2209 : /*
2210 : * This would be DUMP_COMPONENT_ACL for from-initdb casts, but they do not
2211 : * support ACLs currently.
2212 : */
2213 90790 : if (cast->dobj.catId.oid <= g_last_builtin_oid)
2214 90616 : cast->dobj.dump = DUMP_COMPONENT_NONE;
2215 : else
2216 174 : cast->dobj.dump = fout->dopt->include_everything ?
2217 174 : DUMP_COMPONENT_ALL : DUMP_COMPONENT_NONE;
2218 : }
2219 :
2220 : /*
2221 : * selectDumpableProcLang: policy-setting subroutine
2222 : * Mark a procedural language as to be dumped or not
2223 : *
2224 : * Procedural languages do not belong to any particular namespace. To
2225 : * identify built-in languages, we must resort to checking whether the
2226 : * language's OID is in the range reserved for initdb.
2227 : */
2228 : static void
2229 466 : selectDumpableProcLang(ProcLangInfo *plang, Archive *fout)
2230 : {
2231 466 : if (checkExtensionMembership(&plang->dobj, fout))
2232 376 : return; /* extension membership overrides all else */
2233 :
2234 : /*
2235 : * Only include procedural languages when we are dumping everything.
2236 : *
2237 : * For from-initdb procedural languages, only include ACLs, as we do for
2238 : * the pg_catalog namespace. We need this because procedural languages do
2239 : * not live in any namespace.
2240 : */
2241 90 : if (!fout->dopt->include_everything)
2242 16 : plang->dobj.dump = DUMP_COMPONENT_NONE;
2243 : else
2244 : {
2245 74 : if (plang->dobj.catId.oid <= g_last_builtin_oid)
2246 0 : plang->dobj.dump = fout->remoteVersion < 90600 ?
2247 0 : DUMP_COMPONENT_NONE : DUMP_COMPONENT_ACL;
2248 : else
2249 74 : plang->dobj.dump = DUMP_COMPONENT_ALL;
2250 : }
2251 : }
2252 :
2253 : /*
2254 : * selectDumpableAccessMethod: policy-setting subroutine
2255 : * Mark an access method as to be dumped or not
2256 : *
2257 : * Access methods do not belong to any particular namespace. To identify
2258 : * built-in access methods, we must resort to checking whether the
2259 : * method's OID is in the range reserved for initdb.
2260 : */
2261 : static void
2262 2876 : selectDumpableAccessMethod(AccessMethodInfo *method, Archive *fout)
2263 : {
2264 : /* see getAccessMethods() comment about v9.6. */
2265 2876 : if (fout->remoteVersion < 90600)
2266 : {
2267 0 : method->dobj.dump = DUMP_COMPONENT_NONE;
2268 0 : return;
2269 : }
2270 :
2271 2876 : if (checkExtensionMembership(&method->dobj, fout))
2272 50 : return; /* extension membership overrides all else */
2273 :
2274 : /*
2275 : * This would be DUMP_COMPONENT_ACL for from-initdb access methods, but
2276 : * they do not support ACLs currently.
2277 : */
2278 2826 : if (method->dobj.catId.oid <= g_last_builtin_oid)
2279 2632 : method->dobj.dump = DUMP_COMPONENT_NONE;
2280 : else
2281 194 : method->dobj.dump = fout->dopt->include_everything ?
2282 194 : DUMP_COMPONENT_ALL : DUMP_COMPONENT_NONE;
2283 : }
2284 :
2285 : /*
2286 : * selectDumpableExtension: policy-setting subroutine
2287 : * Mark an extension as to be dumped or not
2288 : *
2289 : * Built-in extensions should be skipped except for checking ACLs, since we
2290 : * assume those will already be installed in the target database. We identify
2291 : * such extensions by their having OIDs in the range reserved for initdb.
2292 : * We dump all user-added extensions by default. No extensions are dumped
2293 : * if include_everything is false (i.e., a --schema or --table switch was
2294 : * given), except if --extension specifies a list of extensions to dump.
2295 : */
2296 : static void
2297 438 : selectDumpableExtension(ExtensionInfo *extinfo, DumpOptions *dopt)
2298 : {
2299 : /*
2300 : * Use DUMP_COMPONENT_ACL for built-in extensions, to allow users to
2301 : * change permissions on their member objects, if they wish to, and have
2302 : * those changes preserved.
2303 : */
2304 438 : if (extinfo->dobj.catId.oid <= g_last_builtin_oid)
2305 378 : extinfo->dobj.dump = extinfo->dobj.dump_contains = DUMP_COMPONENT_ACL;
2306 : else
2307 : {
2308 : /* check if there is a list of extensions to dump */
2309 60 : if (extension_include_oids.head != NULL)
2310 8 : extinfo->dobj.dump = extinfo->dobj.dump_contains =
2311 8 : simple_oid_list_member(&extension_include_oids,
2312 : extinfo->dobj.catId.oid) ?
2313 8 : DUMP_COMPONENT_ALL : DUMP_COMPONENT_NONE;
2314 : else
2315 52 : extinfo->dobj.dump = extinfo->dobj.dump_contains =
2316 52 : dopt->include_everything ?
2317 52 : DUMP_COMPONENT_ALL : DUMP_COMPONENT_NONE;
2318 :
2319 : /* check that the extension is not explicitly excluded */
2320 112 : if (extinfo->dobj.dump &&
2321 52 : simple_oid_list_member(&extension_exclude_oids,
2322 : extinfo->dobj.catId.oid))
2323 4 : extinfo->dobj.dump = extinfo->dobj.dump_contains = DUMP_COMPONENT_NONE;
2324 : }
2325 438 : }
2326 :
2327 : /*
2328 : * selectDumpablePublicationObject: policy-setting subroutine
2329 : * Mark a publication object as to be dumped or not
2330 : *
2331 : * A publication can have schemas and tables which have schemas, but those are
2332 : * ignored in decision making, because publications are only dumped when we are
2333 : * dumping everything.
2334 : */
2335 : static void
2336 950 : selectDumpablePublicationObject(DumpableObject *dobj, Archive *fout)
2337 : {
2338 950 : if (checkExtensionMembership(dobj, fout))
2339 0 : return; /* extension membership overrides all else */
2340 :
2341 950 : dobj->dump = fout->dopt->include_everything ?
2342 950 : DUMP_COMPONENT_ALL : DUMP_COMPONENT_NONE;
2343 : }
2344 :
2345 : /*
2346 : * selectDumpableStatisticsObject: policy-setting subroutine
2347 : * Mark an extended statistics object as to be dumped or not
2348 : *
2349 : * We dump an extended statistics object if the schema it's in and the table
2350 : * it's for are being dumped. (This'll need more thought if statistics
2351 : * objects ever support cross-table stats.)
2352 : */
2353 : static void
2354 416 : selectDumpableStatisticsObject(StatsExtInfo *sobj, Archive *fout)
2355 : {
2356 416 : if (checkExtensionMembership(&sobj->dobj, fout))
2357 0 : return; /* extension membership overrides all else */
2358 :
2359 416 : sobj->dobj.dump = sobj->dobj.namespace->dobj.dump_contains;
2360 416 : if (sobj->stattable == NULL ||
2361 416 : !(sobj->stattable->dobj.dump & DUMP_COMPONENT_DEFINITION))
2362 70 : sobj->dobj.dump = DUMP_COMPONENT_NONE;
2363 : }
2364 :
2365 : /*
2366 : * selectDumpableObject: policy-setting subroutine
2367 : * Mark a generic dumpable object as to be dumped or not
2368 : *
2369 : * Use this only for object types without a special-case routine above.
2370 : */
2371 : static void
2372 820466 : selectDumpableObject(DumpableObject *dobj, Archive *fout)
2373 : {
2374 820466 : if (checkExtensionMembership(dobj, fout))
2375 388 : return; /* extension membership overrides all else */
2376 :
2377 : /*
2378 : * Default policy is to dump if parent namespace is dumpable, or for
2379 : * non-namespace-associated items, dump if we're dumping "everything".
2380 : */
2381 820078 : if (dobj->namespace)
2382 818620 : dobj->dump = dobj->namespace->dobj.dump_contains;
2383 : else
2384 1458 : dobj->dump = fout->dopt->include_everything ?
2385 1458 : DUMP_COMPONENT_ALL : DUMP_COMPONENT_NONE;
2386 : }
2387 :
2388 : /*
2389 : * Dump a table's contents for loading using the COPY command
2390 : * - this routine is called by the Archiver when it wants the table
2391 : * to be dumped.
2392 : */
2393 : static int
2394 8190 : dumpTableData_copy(Archive *fout, const void *dcontext)
2395 : {
2396 8190 : const TableDataInfo *tdinfo = dcontext;
2397 8190 : const TableInfo *tbinfo = tdinfo->tdtable;
2398 8190 : const char *classname = tbinfo->dobj.name;
2399 8190 : PQExpBuffer q = createPQExpBuffer();
2400 :
2401 : /*
2402 : * Note: can't use getThreadLocalPQExpBuffer() here, we're calling fmtId
2403 : * which uses it already.
2404 : */
2405 8190 : PQExpBuffer clistBuf = createPQExpBuffer();
2406 8190 : PGconn *conn = GetConnection(fout);
2407 : PGresult *res;
2408 : int ret;
2409 : char *copybuf;
2410 : const char *column_list;
2411 :
2412 8190 : pg_log_info("dumping contents of table \"%s.%s\"",
2413 : tbinfo->dobj.namespace->dobj.name, classname);
2414 :
2415 : /*
2416 : * Specify the column list explicitly so that we have no possibility of
2417 : * retrieving data in the wrong column order. (The default column
2418 : * ordering of COPY will not be what we want in certain corner cases
2419 : * involving ADD COLUMN and inheritance.)
2420 : */
2421 8190 : column_list = fmtCopyColumnList(tbinfo, clistBuf);
2422 :
2423 : /*
2424 : * Use COPY (SELECT ...) TO when dumping a foreign table's data, and when
2425 : * a filter condition was specified. For other cases a simple COPY
2426 : * suffices.
2427 : */
2428 8190 : if (tdinfo->filtercond || tbinfo->relkind == RELKIND_FOREIGN_TABLE)
2429 : {
2430 : /* Temporary allows to access to foreign tables to dump data */
2431 154 : if (tbinfo->relkind == RELKIND_FOREIGN_TABLE)
2432 2 : set_restrict_relation_kind(fout, "view");
2433 :
2434 154 : appendPQExpBufferStr(q, "COPY (SELECT ");
2435 : /* klugery to get rid of parens in column list */
2436 154 : if (strlen(column_list) > 2)
2437 : {
2438 154 : appendPQExpBufferStr(q, column_list + 1);
2439 154 : q->data[q->len - 1] = ' ';
2440 : }
2441 : else
2442 0 : appendPQExpBufferStr(q, "* ");
2443 :
2444 308 : appendPQExpBuffer(q, "FROM %s %s) TO stdout;",
2445 154 : fmtQualifiedDumpable(tbinfo),
2446 154 : tdinfo->filtercond ? tdinfo->filtercond : "");
2447 : }
2448 : else
2449 : {
2450 8036 : appendPQExpBuffer(q, "COPY %s %s TO stdout;",
2451 8036 : fmtQualifiedDumpable(tbinfo),
2452 : column_list);
2453 : }
2454 8190 : res = ExecuteSqlQuery(fout, q->data, PGRES_COPY_OUT);
2455 8188 : PQclear(res);
2456 8188 : destroyPQExpBuffer(clistBuf);
2457 :
2458 : for (;;)
2459 : {
2460 3627520 : ret = PQgetCopyData(conn, ©buf, 0);
2461 :
2462 3627520 : if (ret < 0)
2463 8188 : break; /* done or error */
2464 :
2465 3619332 : if (copybuf)
2466 : {
2467 3619332 : WriteData(fout, copybuf, ret);
2468 3619332 : PQfreemem(copybuf);
2469 : }
2470 :
2471 : /* ----------
2472 : * THROTTLE:
2473 : *
2474 : * There was considerable discussion in late July, 2000 regarding
2475 : * slowing down pg_dump when backing up large tables. Users with both
2476 : * slow & fast (multi-processor) machines experienced performance
2477 : * degradation when doing a backup.
2478 : *
2479 : * Initial attempts based on sleeping for a number of ms for each ms
2480 : * of work were deemed too complex, then a simple 'sleep in each loop'
2481 : * implementation was suggested. The latter failed because the loop
2482 : * was too tight. Finally, the following was implemented:
2483 : *
2484 : * If throttle is non-zero, then
2485 : * See how long since the last sleep.
2486 : * Work out how long to sleep (based on ratio).
2487 : * If sleep is more than 100ms, then
2488 : * sleep
2489 : * reset timer
2490 : * EndIf
2491 : * EndIf
2492 : *
2493 : * where the throttle value was the number of ms to sleep per ms of
2494 : * work. The calculation was done in each loop.
2495 : *
2496 : * Most of the hard work is done in the backend, and this solution
2497 : * still did not work particularly well: on slow machines, the ratio
2498 : * was 50:1, and on medium paced machines, 1:1, and on fast
2499 : * multi-processor machines, it had little or no effect, for reasons
2500 : * that were unclear.
2501 : *
2502 : * Further discussion ensued, and the proposal was dropped.
2503 : *
2504 : * For those people who want this feature, it can be implemented using
2505 : * gettimeofday in each loop, calculating the time since last sleep,
2506 : * multiplying that by the sleep ratio, then if the result is more
2507 : * than a preset 'minimum sleep time' (say 100ms), call the 'select'
2508 : * function to sleep for a subsecond period ie.
2509 : *
2510 : * select(0, NULL, NULL, NULL, &tvi);
2511 : *
2512 : * This will return after the interval specified in the structure tvi.
2513 : * Finally, call gettimeofday again to save the 'last sleep time'.
2514 : * ----------
2515 : */
2516 : }
2517 8188 : archprintf(fout, "\\.\n\n\n");
2518 :
2519 8188 : if (ret == -2)
2520 : {
2521 : /* copy data transfer failed */
2522 0 : pg_log_error("Dumping the contents of table \"%s\" failed: PQgetCopyData() failed.", classname);
2523 0 : pg_log_error_detail("Error message from server: %s", PQerrorMessage(conn));
2524 0 : pg_log_error_detail("Command was: %s", q->data);
2525 0 : exit_nicely(1);
2526 : }
2527 :
2528 : /* Check command status and return to normal libpq state */
2529 8188 : res = PQgetResult(conn);
2530 8188 : if (PQresultStatus(res) != PGRES_COMMAND_OK)
2531 : {
2532 0 : pg_log_error("Dumping the contents of table \"%s\" failed: PQgetResult() failed.", classname);
2533 0 : pg_log_error_detail("Error message from server: %s", PQerrorMessage(conn));
2534 0 : pg_log_error_detail("Command was: %s", q->data);
2535 0 : exit_nicely(1);
2536 : }
2537 8188 : PQclear(res);
2538 :
2539 : /* Do this to ensure we've pumped libpq back to idle state */
2540 8188 : if (PQgetResult(conn) != NULL)
2541 0 : pg_log_warning("unexpected extra results during COPY of table \"%s\"",
2542 : classname);
2543 :
2544 8188 : destroyPQExpBuffer(q);
2545 :
2546 : /* Revert back the setting */
2547 8188 : if (tbinfo->relkind == RELKIND_FOREIGN_TABLE)
2548 0 : set_restrict_relation_kind(fout, "view, foreign-table");
2549 :
2550 8188 : return 1;
2551 : }
2552 :
2553 : /*
2554 : * Dump table data using INSERT commands.
2555 : *
2556 : * Caution: when we restore from an archive file direct to database, the
2557 : * INSERT commands emitted by this function have to be parsed by
2558 : * pg_backup_db.c's ExecuteSimpleCommands(), which will not handle comments,
2559 : * E'' strings, or dollar-quoted strings. So don't emit anything like that.
2560 : */
2561 : static int
2562 166 : dumpTableData_insert(Archive *fout, const void *dcontext)
2563 : {
2564 166 : const TableDataInfo *tdinfo = dcontext;
2565 166 : const TableInfo *tbinfo = tdinfo->tdtable;
2566 166 : DumpOptions *dopt = fout->dopt;
2567 166 : PQExpBuffer q = createPQExpBuffer();
2568 166 : PQExpBuffer insertStmt = NULL;
2569 : char *attgenerated;
2570 : PGresult *res;
2571 : int nfields,
2572 : i;
2573 166 : int rows_per_statement = dopt->dump_inserts;
2574 166 : int rows_this_statement = 0;
2575 :
2576 : /* Temporary allows to access to foreign tables to dump data */
2577 166 : if (tbinfo->relkind == RELKIND_FOREIGN_TABLE)
2578 0 : set_restrict_relation_kind(fout, "view");
2579 :
2580 : /*
2581 : * If we're going to emit INSERTs with column names, the most efficient
2582 : * way to deal with generated columns is to exclude them entirely. For
2583 : * INSERTs without column names, we have to emit DEFAULT rather than the
2584 : * actual column value --- but we can save a few cycles by fetching nulls
2585 : * rather than the uninteresting-to-us value.
2586 : */
2587 166 : attgenerated = (char *) pg_malloc(tbinfo->numatts * sizeof(char));
2588 166 : appendPQExpBufferStr(q, "DECLARE _pg_dump_cursor CURSOR FOR SELECT ");
2589 166 : nfields = 0;
2590 514 : for (i = 0; i < tbinfo->numatts; i++)
2591 : {
2592 348 : if (tbinfo->attisdropped[i])
2593 4 : continue;
2594 344 : if (tbinfo->attgenerated[i] && dopt->column_inserts)
2595 16 : continue;
2596 328 : if (nfields > 0)
2597 176 : appendPQExpBufferStr(q, ", ");
2598 328 : if (tbinfo->attgenerated[i])
2599 16 : appendPQExpBufferStr(q, "NULL");
2600 : else
2601 312 : appendPQExpBufferStr(q, fmtId(tbinfo->attnames[i]));
2602 328 : attgenerated[nfields] = tbinfo->attgenerated[i];
2603 328 : nfields++;
2604 : }
2605 : /* Servers before 9.4 will complain about zero-column SELECT */
2606 166 : if (nfields == 0)
2607 14 : appendPQExpBufferStr(q, "NULL");
2608 166 : appendPQExpBuffer(q, " FROM ONLY %s",
2609 166 : fmtQualifiedDumpable(tbinfo));
2610 166 : if (tdinfo->filtercond)
2611 0 : appendPQExpBuffer(q, " %s", tdinfo->filtercond);
2612 :
2613 166 : ExecuteSqlStatement(fout, q->data);
2614 :
2615 : while (1)
2616 : {
2617 270 : res = ExecuteSqlQuery(fout, "FETCH 100 FROM _pg_dump_cursor",
2618 : PGRES_TUPLES_OK);
2619 :
2620 : /* cross-check field count, allowing for dummy NULL if any */
2621 270 : if (nfields != PQnfields(res) &&
2622 20 : !(nfields == 0 && PQnfields(res) == 1))
2623 0 : pg_fatal("wrong number of fields retrieved from table \"%s\"",
2624 : tbinfo->dobj.name);
2625 :
2626 : /*
2627 : * First time through, we build as much of the INSERT statement as
2628 : * possible in "insertStmt", which we can then just print for each
2629 : * statement. If the table happens to have zero dumpable columns then
2630 : * this will be a complete statement, otherwise it will end in
2631 : * "VALUES" and be ready to have the row's column values printed.
2632 : */
2633 270 : if (insertStmt == NULL)
2634 : {
2635 : const TableInfo *targettab;
2636 :
2637 166 : insertStmt = createPQExpBuffer();
2638 :
2639 : /*
2640 : * When load-via-partition-root is set or forced, get the root
2641 : * table name for the partition table, so that we can reload data
2642 : * through the root table.
2643 : */
2644 166 : if (tbinfo->ispartition &&
2645 96 : (dopt->load_via_partition_root ||
2646 48 : forcePartitionRootLoad(tbinfo)))
2647 14 : targettab = getRootTableInfo(tbinfo);
2648 : else
2649 152 : targettab = tbinfo;
2650 :
2651 166 : appendPQExpBuffer(insertStmt, "INSERT INTO %s ",
2652 166 : fmtQualifiedDumpable(targettab));
2653 :
2654 : /* corner case for zero-column table */
2655 166 : if (nfields == 0)
2656 : {
2657 14 : appendPQExpBufferStr(insertStmt, "DEFAULT VALUES;\n");
2658 : }
2659 : else
2660 : {
2661 : /* append the list of column names if required */
2662 152 : if (dopt->column_inserts)
2663 : {
2664 68 : appendPQExpBufferChar(insertStmt, '(');
2665 206 : for (int field = 0; field < nfields; field++)
2666 : {
2667 138 : if (field > 0)
2668 70 : appendPQExpBufferStr(insertStmt, ", ");
2669 138 : appendPQExpBufferStr(insertStmt,
2670 138 : fmtId(PQfname(res, field)));
2671 : }
2672 68 : appendPQExpBufferStr(insertStmt, ") ");
2673 : }
2674 :
2675 152 : if (tbinfo->needs_override)
2676 4 : appendPQExpBufferStr(insertStmt, "OVERRIDING SYSTEM VALUE ");
2677 :
2678 152 : appendPQExpBufferStr(insertStmt, "VALUES");
2679 : }
2680 : }
2681 :
2682 7208 : for (int tuple = 0; tuple < PQntuples(res); tuple++)
2683 : {
2684 : /* Write the INSERT if not in the middle of a multi-row INSERT. */
2685 6938 : if (rows_this_statement == 0)
2686 6926 : archputs(insertStmt->data, fout);
2687 :
2688 : /*
2689 : * If it is zero-column table then we've already written the
2690 : * complete statement, which will mean we've disobeyed
2691 : * --rows-per-insert when it's set greater than 1. We do support
2692 : * a way to make this multi-row with: SELECT UNION ALL SELECT
2693 : * UNION ALL ... but that's non-standard so we should avoid it
2694 : * given that using INSERTs is mostly only ever needed for
2695 : * cross-database exports.
2696 : */
2697 6938 : if (nfields == 0)
2698 12 : continue;
2699 :
2700 : /* Emit a row heading */
2701 6926 : if (rows_per_statement == 1)
2702 6908 : archputs(" (", fout);
2703 18 : else if (rows_this_statement > 0)
2704 12 : archputs(",\n\t(", fout);
2705 : else
2706 6 : archputs("\n\t(", fout);
2707 :
2708 20890 : for (int field = 0; field < nfields; field++)
2709 : {
2710 13964 : if (field > 0)
2711 7038 : archputs(", ", fout);
2712 13964 : if (attgenerated[field])
2713 : {
2714 4 : archputs("DEFAULT", fout);
2715 4 : continue;
2716 : }
2717 13960 : if (PQgetisnull(res, tuple, field))
2718 : {
2719 166 : archputs("NULL", fout);
2720 166 : continue;
2721 : }
2722 :
2723 : /* XXX This code is partially duplicated in ruleutils.c */
2724 13794 : switch (PQftype(res, field))
2725 : {
2726 9738 : case INT2OID:
2727 : case INT4OID:
2728 : case INT8OID:
2729 : case OIDOID:
2730 : case FLOAT4OID:
2731 : case FLOAT8OID:
2732 : case NUMERICOID:
2733 : {
2734 : /*
2735 : * These types are printed without quotes unless
2736 : * they contain values that aren't accepted by the
2737 : * scanner unquoted (e.g., 'NaN'). Note that
2738 : * strtod() and friends might accept NaN, so we
2739 : * can't use that to test.
2740 : *
2741 : * In reality we only need to defend against
2742 : * infinity and NaN, so we need not get too crazy
2743 : * about pattern matching here.
2744 : */
2745 9738 : const char *s = PQgetvalue(res, tuple, field);
2746 :
2747 9738 : if (strspn(s, "0123456789 +-eE.") == strlen(s))
2748 9734 : archputs(s, fout);
2749 : else
2750 4 : archprintf(fout, "'%s'", s);
2751 : }
2752 9738 : break;
2753 :
2754 4 : case BITOID:
2755 : case VARBITOID:
2756 4 : archprintf(fout, "B'%s'",
2757 : PQgetvalue(res, tuple, field));
2758 4 : break;
2759 :
2760 8 : case BOOLOID:
2761 8 : if (strcmp(PQgetvalue(res, tuple, field), "t") == 0)
2762 4 : archputs("true", fout);
2763 : else
2764 4 : archputs("false", fout);
2765 8 : break;
2766 :
2767 4044 : default:
2768 : /* All other types are printed as string literals. */
2769 4044 : resetPQExpBuffer(q);
2770 4044 : appendStringLiteralAH(q,
2771 : PQgetvalue(res, tuple, field),
2772 : fout);
2773 4044 : archputs(q->data, fout);
2774 4044 : break;
2775 : }
2776 : }
2777 :
2778 : /* Terminate the row ... */
2779 6926 : archputs(")", fout);
2780 :
2781 : /* ... and the statement, if the target no. of rows is reached */
2782 6926 : if (++rows_this_statement >= rows_per_statement)
2783 : {
2784 6912 : if (dopt->do_nothing)
2785 0 : archputs(" ON CONFLICT DO NOTHING;\n", fout);
2786 : else
2787 6912 : archputs(";\n", fout);
2788 : /* Reset the row counter */
2789 6912 : rows_this_statement = 0;
2790 : }
2791 : }
2792 :
2793 270 : if (PQntuples(res) <= 0)
2794 : {
2795 166 : PQclear(res);
2796 166 : break;
2797 : }
2798 104 : PQclear(res);
2799 : }
2800 :
2801 : /* Terminate any statements that didn't make the row count. */
2802 166 : if (rows_this_statement > 0)
2803 : {
2804 2 : if (dopt->do_nothing)
2805 0 : archputs(" ON CONFLICT DO NOTHING;\n", fout);
2806 : else
2807 2 : archputs(";\n", fout);
2808 : }
2809 :
2810 166 : archputs("\n\n", fout);
2811 :
2812 166 : ExecuteSqlStatement(fout, "CLOSE _pg_dump_cursor");
2813 :
2814 166 : destroyPQExpBuffer(q);
2815 166 : if (insertStmt != NULL)
2816 166 : destroyPQExpBuffer(insertStmt);
2817 166 : free(attgenerated);
2818 :
2819 : /* Revert back the setting */
2820 166 : if (tbinfo->relkind == RELKIND_FOREIGN_TABLE)
2821 0 : set_restrict_relation_kind(fout, "view, foreign-table");
2822 :
2823 166 : return 1;
2824 : }
2825 :
2826 : /*
2827 : * getRootTableInfo:
2828 : * get the root TableInfo for the given partition table.
2829 : */
2830 : static TableInfo *
2831 158 : getRootTableInfo(const TableInfo *tbinfo)
2832 : {
2833 : TableInfo *parentTbinfo;
2834 :
2835 : Assert(tbinfo->ispartition);
2836 : Assert(tbinfo->numParents == 1);
2837 :
2838 158 : parentTbinfo = tbinfo->parents[0];
2839 158 : while (parentTbinfo->ispartition)
2840 : {
2841 : Assert(parentTbinfo->numParents == 1);
2842 0 : parentTbinfo = parentTbinfo->parents[0];
2843 : }
2844 :
2845 158 : return parentTbinfo;
2846 : }
2847 :
2848 : /*
2849 : * forcePartitionRootLoad
2850 : * Check if we must force load_via_partition_root for this partition.
2851 : *
2852 : * This is required if any level of ancestral partitioned table has an
2853 : * unsafe partitioning scheme.
2854 : */
2855 : static bool
2856 2104 : forcePartitionRootLoad(const TableInfo *tbinfo)
2857 : {
2858 : TableInfo *parentTbinfo;
2859 :
2860 : Assert(tbinfo->ispartition);
2861 : Assert(tbinfo->numParents == 1);
2862 :
2863 2104 : parentTbinfo = tbinfo->parents[0];
2864 2104 : if (parentTbinfo->unsafe_partitions)
2865 158 : return true;
2866 2386 : while (parentTbinfo->ispartition)
2867 : {
2868 : Assert(parentTbinfo->numParents == 1);
2869 440 : parentTbinfo = parentTbinfo->parents[0];
2870 440 : if (parentTbinfo->unsafe_partitions)
2871 0 : return true;
2872 : }
2873 :
2874 1946 : return false;
2875 : }
2876 :
2877 : /*
2878 : * dumpTableData -
2879 : * dump the contents of a single table
2880 : *
2881 : * Actually, this just makes an ArchiveEntry for the table contents.
2882 : */
2883 : static void
2884 8520 : dumpTableData(Archive *fout, const TableDataInfo *tdinfo)
2885 : {
2886 8520 : DumpOptions *dopt = fout->dopt;
2887 8520 : const TableInfo *tbinfo = tdinfo->tdtable;
2888 8520 : PQExpBuffer copyBuf = createPQExpBuffer();
2889 8520 : PQExpBuffer clistBuf = createPQExpBuffer();
2890 : DataDumperPtr dumpFn;
2891 8520 : char *tdDefn = NULL;
2892 : char *copyStmt;
2893 : const char *copyFrom;
2894 :
2895 : /* We had better have loaded per-column details about this table */
2896 : Assert(tbinfo->interesting);
2897 :
2898 : /*
2899 : * When load-via-partition-root is set or forced, get the root table name
2900 : * for the partition table, so that we can reload data through the root
2901 : * table. Then construct a comment to be inserted into the TOC entry's
2902 : * defn field, so that such cases can be identified reliably.
2903 : */
2904 8520 : if (tbinfo->ispartition &&
2905 4112 : (dopt->load_via_partition_root ||
2906 2056 : forcePartitionRootLoad(tbinfo)))
2907 144 : {
2908 : const TableInfo *parentTbinfo;
2909 : char *sanitized;
2910 :
2911 144 : parentTbinfo = getRootTableInfo(tbinfo);
2912 144 : copyFrom = fmtQualifiedDumpable(parentTbinfo);
2913 144 : sanitized = sanitize_line(copyFrom, true);
2914 144 : printfPQExpBuffer(copyBuf, "-- load via partition root %s",
2915 : sanitized);
2916 144 : free(sanitized);
2917 144 : tdDefn = pg_strdup(copyBuf->data);
2918 : }
2919 : else
2920 8376 : copyFrom = fmtQualifiedDumpable(tbinfo);
2921 :
2922 8520 : if (dopt->dump_inserts == 0)
2923 : {
2924 : /* Dump/restore using COPY */
2925 8354 : dumpFn = dumpTableData_copy;
2926 : /* must use 2 steps here 'cause fmtId is nonreentrant */
2927 8354 : printfPQExpBuffer(copyBuf, "COPY %s ",
2928 : copyFrom);
2929 8354 : appendPQExpBuffer(copyBuf, "%s FROM stdin;\n",
2930 : fmtCopyColumnList(tbinfo, clistBuf));
2931 8354 : copyStmt = copyBuf->data;
2932 : }
2933 : else
2934 : {
2935 : /* Restore using INSERT */
2936 166 : dumpFn = dumpTableData_insert;
2937 166 : copyStmt = NULL;
2938 : }
2939 :
2940 : /*
2941 : * Note: although the TableDataInfo is a full DumpableObject, we treat its
2942 : * dependency on its table as "special" and pass it to ArchiveEntry now.
2943 : * See comments for BuildArchiveDependencies.
2944 : */
2945 8520 : if (tdinfo->dobj.dump & DUMP_COMPONENT_DATA)
2946 : {
2947 : TocEntry *te;
2948 :
2949 8520 : te = ArchiveEntry(fout, tdinfo->dobj.catId, tdinfo->dobj.dumpId,
2950 8520 : ARCHIVE_OPTS(.tag = tbinfo->dobj.name,
2951 : .namespace = tbinfo->dobj.namespace->dobj.name,
2952 : .owner = tbinfo->rolname,
2953 : .description = "TABLE DATA",
2954 : .section = SECTION_DATA,
2955 : .createStmt = tdDefn,
2956 : .copyStmt = copyStmt,
2957 : .deps = &(tbinfo->dobj.dumpId),
2958 : .nDeps = 1,
2959 : .dumpFn = dumpFn,
2960 : .dumpArg = tdinfo));
2961 :
2962 : /*
2963 : * Set the TocEntry's dataLength in case we are doing a parallel dump
2964 : * and want to order dump jobs by table size. We choose to measure
2965 : * dataLength in table pages (including TOAST pages) during dump, so
2966 : * no scaling is needed.
2967 : *
2968 : * However, relpages is declared as "integer" in pg_class, and hence
2969 : * also in TableInfo, but it's really BlockNumber a/k/a unsigned int.
2970 : * Cast so that we get the right interpretation of table sizes
2971 : * exceeding INT_MAX pages.
2972 : */
2973 8520 : te->dataLength = (BlockNumber) tbinfo->relpages;
2974 8520 : te->dataLength += (BlockNumber) tbinfo->toastpages;
2975 :
2976 : /*
2977 : * If pgoff_t is only 32 bits wide, the above refinement is useless,
2978 : * and instead we'd better worry about integer overflow. Clamp to
2979 : * INT_MAX if the correct result exceeds that.
2980 : */
2981 : if (sizeof(te->dataLength) == 4 &&
2982 : (tbinfo->relpages < 0 || tbinfo->toastpages < 0 ||
2983 : te->dataLength < 0))
2984 : te->dataLength = INT_MAX;
2985 : }
2986 :
2987 8520 : destroyPQExpBuffer(copyBuf);
2988 8520 : destroyPQExpBuffer(clistBuf);
2989 8520 : }
2990 :
2991 : /*
2992 : * refreshMatViewData -
2993 : * load or refresh the contents of a single materialized view
2994 : *
2995 : * Actually, this just makes an ArchiveEntry for the REFRESH MATERIALIZED VIEW
2996 : * statement.
2997 : */
2998 : static void
2999 690 : refreshMatViewData(Archive *fout, const TableDataInfo *tdinfo)
3000 : {
3001 690 : TableInfo *tbinfo = tdinfo->tdtable;
3002 : PQExpBuffer q;
3003 :
3004 : /* If the materialized view is not flagged as populated, skip this. */
3005 690 : if (!tbinfo->relispopulated)
3006 136 : return;
3007 :
3008 554 : q = createPQExpBuffer();
3009 :
3010 554 : appendPQExpBuffer(q, "REFRESH MATERIALIZED VIEW %s;\n",
3011 554 : fmtQualifiedDumpable(tbinfo));
3012 :
3013 554 : if (tdinfo->dobj.dump & DUMP_COMPONENT_DATA)
3014 554 : ArchiveEntry(fout,
3015 : tdinfo->dobj.catId, /* catalog ID */
3016 554 : tdinfo->dobj.dumpId, /* dump ID */
3017 554 : ARCHIVE_OPTS(.tag = tbinfo->dobj.name,
3018 : .namespace = tbinfo->dobj.namespace->dobj.name,
3019 : .owner = tbinfo->rolname,
3020 : .description = "MATERIALIZED VIEW DATA",
3021 : .section = SECTION_POST_DATA,
3022 : .createStmt = q->data,
3023 : .deps = tdinfo->dobj.dependencies,
3024 : .nDeps = tdinfo->dobj.nDeps));
3025 :
3026 554 : destroyPQExpBuffer(q);
3027 : }
3028 :
3029 : /*
3030 : * getTableData -
3031 : * set up dumpable objects representing the contents of tables
3032 : */
3033 : static void
3034 360 : getTableData(DumpOptions *dopt, TableInfo *tblinfo, int numTables, char relkind)
3035 : {
3036 : int i;
3037 :
3038 95522 : for (i = 0; i < numTables; i++)
3039 : {
3040 95162 : if (tblinfo[i].dobj.dump & DUMP_COMPONENT_DATA &&
3041 1866 : (!relkind || tblinfo[i].relkind == relkind))
3042 11852 : makeTableDataInfo(dopt, &(tblinfo[i]));
3043 : }
3044 360 : }
3045 :
3046 : /*
3047 : * Make a dumpable object for the data of this specific table
3048 : *
3049 : * Note: we make a TableDataInfo if and only if we are going to dump the
3050 : * table data; the "dump" field in such objects isn't very interesting.
3051 : */
3052 : static void
3053 12082 : makeTableDataInfo(DumpOptions *dopt, TableInfo *tbinfo)
3054 : {
3055 : TableDataInfo *tdinfo;
3056 :
3057 : /*
3058 : * Nothing to do if we already decided to dump the table. This will
3059 : * happen for "config" tables.
3060 : */
3061 12082 : if (tbinfo->dataObj != NULL)
3062 2 : return;
3063 :
3064 : /* Skip VIEWs (no data to dump) */
3065 12080 : if (tbinfo->relkind == RELKIND_VIEW)
3066 968 : return;
3067 : /* Skip FOREIGN TABLEs (no data to dump) unless requested explicitly */
3068 11112 : if (tbinfo->relkind == RELKIND_FOREIGN_TABLE &&
3069 76 : (foreign_servers_include_oids.head == NULL ||
3070 8 : !simple_oid_list_member(&foreign_servers_include_oids,
3071 : tbinfo->foreign_server)))
3072 74 : return;
3073 : /* Skip partitioned tables (data in partitions) */
3074 11038 : if (tbinfo->relkind == RELKIND_PARTITIONED_TABLE)
3075 990 : return;
3076 :
3077 : /* Don't dump data in unlogged tables, if so requested */
3078 10048 : if (tbinfo->relpersistence == RELPERSISTENCE_UNLOGGED &&
3079 82 : dopt->no_unlogged_table_data)
3080 36 : return;
3081 :
3082 : /* Check that the data is not explicitly excluded */
3083 10012 : if (simple_oid_list_member(&tabledata_exclude_oids,
3084 : tbinfo->dobj.catId.oid))
3085 16 : return;
3086 :
3087 : /* OK, let's dump it */
3088 9996 : tdinfo = (TableDataInfo *) pg_malloc(sizeof(TableDataInfo));
3089 :
3090 9996 : if (tbinfo->relkind == RELKIND_MATVIEW)
3091 690 : tdinfo->dobj.objType = DO_REFRESH_MATVIEW;
3092 9306 : else if (tbinfo->relkind == RELKIND_SEQUENCE)
3093 786 : tdinfo->dobj.objType = DO_SEQUENCE_SET;
3094 : else
3095 8520 : tdinfo->dobj.objType = DO_TABLE_DATA;
3096 :
3097 : /*
3098 : * Note: use tableoid 0 so that this object won't be mistaken for
3099 : * something that pg_depend entries apply to.
3100 : */
3101 9996 : tdinfo->dobj.catId.tableoid = 0;
3102 9996 : tdinfo->dobj.catId.oid = tbinfo->dobj.catId.oid;
3103 9996 : AssignDumpId(&tdinfo->dobj);
3104 9996 : tdinfo->dobj.name = tbinfo->dobj.name;
3105 9996 : tdinfo->dobj.namespace = tbinfo->dobj.namespace;
3106 9996 : tdinfo->tdtable = tbinfo;
3107 9996 : tdinfo->filtercond = NULL; /* might get set later */
3108 9996 : addObjectDependency(&tdinfo->dobj, tbinfo->dobj.dumpId);
3109 :
3110 : /* A TableDataInfo contains data, of course */
3111 9996 : tdinfo->dobj.components |= DUMP_COMPONENT_DATA;
3112 :
3113 9996 : tbinfo->dataObj = tdinfo;
3114 :
3115 : /*
3116 : * Materialized view statistics must be restored after the data, because
3117 : * REFRESH MATERIALIZED VIEW replaces the storage and resets the stats.
3118 : *
3119 : * The dependency is added here because the statistics objects are created
3120 : * first.
3121 : */
3122 9996 : if (tbinfo->relkind == RELKIND_MATVIEW && tbinfo->stats != NULL)
3123 : {
3124 536 : tbinfo->stats->section = SECTION_POST_DATA;
3125 536 : addObjectDependency(&tbinfo->stats->dobj, tdinfo->dobj.dumpId);
3126 : }
3127 :
3128 : /* Make sure that we'll collect per-column info for this table. */
3129 9996 : tbinfo->interesting = true;
3130 : }
3131 :
3132 : /*
3133 : * The refresh for a materialized view must be dependent on the refresh for
3134 : * any materialized view that this one is dependent on.
3135 : *
3136 : * This must be called after all the objects are created, but before they are
3137 : * sorted.
3138 : */
3139 : static void
3140 292 : buildMatViewRefreshDependencies(Archive *fout)
3141 : {
3142 : PQExpBuffer query;
3143 : PGresult *res;
3144 : int ntups,
3145 : i;
3146 : int i_classid,
3147 : i_objid,
3148 : i_refobjid;
3149 :
3150 : /* No Mat Views before 9.3. */
3151 292 : if (fout->remoteVersion < 90300)
3152 0 : return;
3153 :
3154 292 : query = createPQExpBuffer();
3155 :
3156 292 : appendPQExpBufferStr(query, "WITH RECURSIVE w AS "
3157 : "( "
3158 : "SELECT d1.objid, d2.refobjid, c2.relkind AS refrelkind "
3159 : "FROM pg_depend d1 "
3160 : "JOIN pg_class c1 ON c1.oid = d1.objid "
3161 : "AND c1.relkind = " CppAsString2(RELKIND_MATVIEW)
3162 : " JOIN pg_rewrite r1 ON r1.ev_class = d1.objid "
3163 : "JOIN pg_depend d2 ON d2.classid = 'pg_rewrite'::regclass "
3164 : "AND d2.objid = r1.oid "
3165 : "AND d2.refobjid <> d1.objid "
3166 : "JOIN pg_class c2 ON c2.oid = d2.refobjid "
3167 : "AND c2.relkind IN (" CppAsString2(RELKIND_MATVIEW) ","
3168 : CppAsString2(RELKIND_VIEW) ") "
3169 : "WHERE d1.classid = 'pg_class'::regclass "
3170 : "UNION "
3171 : "SELECT w.objid, d3.refobjid, c3.relkind "
3172 : "FROM w "
3173 : "JOIN pg_rewrite r3 ON r3.ev_class = w.refobjid "
3174 : "JOIN pg_depend d3 ON d3.classid = 'pg_rewrite'::regclass "
3175 : "AND d3.objid = r3.oid "
3176 : "AND d3.refobjid <> w.refobjid "
3177 : "JOIN pg_class c3 ON c3.oid = d3.refobjid "
3178 : "AND c3.relkind IN (" CppAsString2(RELKIND_MATVIEW) ","
3179 : CppAsString2(RELKIND_VIEW) ") "
3180 : ") "
3181 : "SELECT 'pg_class'::regclass::oid AS classid, objid, refobjid "
3182 : "FROM w "
3183 : "WHERE refrelkind = " CppAsString2(RELKIND_MATVIEW));
3184 :
3185 292 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
3186 :
3187 292 : ntups = PQntuples(res);
3188 :
3189 292 : i_classid = PQfnumber(res, "classid");
3190 292 : i_objid = PQfnumber(res, "objid");
3191 292 : i_refobjid = PQfnumber(res, "refobjid");
3192 :
3193 820 : for (i = 0; i < ntups; i++)
3194 : {
3195 : CatalogId objId;
3196 : CatalogId refobjId;
3197 : DumpableObject *dobj;
3198 : DumpableObject *refdobj;
3199 : TableInfo *tbinfo;
3200 : TableInfo *reftbinfo;
3201 :
3202 528 : objId.tableoid = atooid(PQgetvalue(res, i, i_classid));
3203 528 : objId.oid = atooid(PQgetvalue(res, i, i_objid));
3204 528 : refobjId.tableoid = objId.tableoid;
3205 528 : refobjId.oid = atooid(PQgetvalue(res, i, i_refobjid));
3206 :
3207 528 : dobj = findObjectByCatalogId(objId);
3208 528 : if (dobj == NULL)
3209 96 : continue;
3210 :
3211 : Assert(dobj->objType == DO_TABLE);
3212 528 : tbinfo = (TableInfo *) dobj;
3213 : Assert(tbinfo->relkind == RELKIND_MATVIEW);
3214 528 : dobj = (DumpableObject *) tbinfo->dataObj;
3215 528 : if (dobj == NULL)
3216 96 : continue;
3217 : Assert(dobj->objType == DO_REFRESH_MATVIEW);
3218 :
3219 432 : refdobj = findObjectByCatalogId(refobjId);
3220 432 : if (refdobj == NULL)
3221 0 : continue;
3222 :
3223 : Assert(refdobj->objType == DO_TABLE);
3224 432 : reftbinfo = (TableInfo *) refdobj;
3225 : Assert(reftbinfo->relkind == RELKIND_MATVIEW);
3226 432 : refdobj = (DumpableObject *) reftbinfo->dataObj;
3227 432 : if (refdobj == NULL)
3228 0 : continue;
3229 : Assert(refdobj->objType == DO_REFRESH_MATVIEW);
3230 :
3231 432 : addObjectDependency(dobj, refdobj->dumpId);
3232 :
3233 432 : if (!reftbinfo->relispopulated)
3234 68 : tbinfo->relispopulated = false;
3235 : }
3236 :
3237 292 : PQclear(res);
3238 :
3239 292 : destroyPQExpBuffer(query);
3240 : }
3241 :
3242 : /*
3243 : * getTableDataFKConstraints -
3244 : * add dump-order dependencies reflecting foreign key constraints
3245 : *
3246 : * This code is executed only in a data-only dump --- in schema+data dumps
3247 : * we handle foreign key issues by not creating the FK constraints until
3248 : * after the data is loaded. In a data-only dump, however, we want to
3249 : * order the table data objects in such a way that a table's referenced
3250 : * tables are restored first. (In the presence of circular references or
3251 : * self-references this may be impossible; we'll detect and complain about
3252 : * that during the dependency sorting step.)
3253 : */
3254 : static void
3255 14 : getTableDataFKConstraints(void)
3256 : {
3257 : DumpableObject **dobjs;
3258 : int numObjs;
3259 : int i;
3260 :
3261 : /* Search through all the dumpable objects for FK constraints */
3262 14 : getDumpableObjects(&dobjs, &numObjs);
3263 51888 : for (i = 0; i < numObjs; i++)
3264 : {
3265 51874 : if (dobjs[i]->objType == DO_FK_CONSTRAINT)
3266 : {
3267 16 : ConstraintInfo *cinfo = (ConstraintInfo *) dobjs[i];
3268 : TableInfo *ftable;
3269 :
3270 : /* Not interesting unless both tables are to be dumped */
3271 16 : if (cinfo->contable == NULL ||
3272 16 : cinfo->contable->dataObj == NULL)
3273 8 : continue;
3274 8 : ftable = findTableByOid(cinfo->confrelid);
3275 8 : if (ftable == NULL ||
3276 8 : ftable->dataObj == NULL)
3277 0 : continue;
3278 :
3279 : /*
3280 : * Okay, make referencing table's TABLE_DATA object depend on the
3281 : * referenced table's TABLE_DATA object.
3282 : */
3283 8 : addObjectDependency(&cinfo->contable->dataObj->dobj,
3284 8 : ftable->dataObj->dobj.dumpId);
3285 : }
3286 : }
3287 14 : free(dobjs);
3288 14 : }
3289 :
3290 :
3291 : /*
3292 : * dumpDatabase:
3293 : * dump the database definition
3294 : */
3295 : static void
3296 174 : dumpDatabase(Archive *fout)
3297 : {
3298 174 : DumpOptions *dopt = fout->dopt;
3299 174 : PQExpBuffer dbQry = createPQExpBuffer();
3300 174 : PQExpBuffer delQry = createPQExpBuffer();
3301 174 : PQExpBuffer creaQry = createPQExpBuffer();
3302 174 : PQExpBuffer labelq = createPQExpBuffer();
3303 174 : PGconn *conn = GetConnection(fout);
3304 : PGresult *res;
3305 : int i_tableoid,
3306 : i_oid,
3307 : i_datname,
3308 : i_datdba,
3309 : i_encoding,
3310 : i_datlocprovider,
3311 : i_collate,
3312 : i_ctype,
3313 : i_datlocale,
3314 : i_daticurules,
3315 : i_frozenxid,
3316 : i_minmxid,
3317 : i_datacl,
3318 : i_acldefault,
3319 : i_datistemplate,
3320 : i_datconnlimit,
3321 : i_datcollversion,
3322 : i_tablespace;
3323 : CatalogId dbCatId;
3324 : DumpId dbDumpId;
3325 : DumpableAcl dbdacl;
3326 : const char *datname,
3327 : *dba,
3328 : *encoding,
3329 : *datlocprovider,
3330 : *collate,
3331 : *ctype,
3332 : *locale,
3333 : *icurules,
3334 : *datistemplate,
3335 : *datconnlimit,
3336 : *tablespace;
3337 : uint32 frozenxid,
3338 : minmxid;
3339 : char *qdatname;
3340 :
3341 174 : pg_log_info("saving database definition");
3342 :
3343 : /*
3344 : * Fetch the database-level properties for this database.
3345 : */
3346 174 : appendPQExpBufferStr(dbQry, "SELECT tableoid, oid, datname, "
3347 : "datdba, "
3348 : "pg_encoding_to_char(encoding) AS encoding, "
3349 : "datcollate, datctype, datfrozenxid, "
3350 : "datacl, acldefault('d', datdba) AS acldefault, "
3351 : "datistemplate, datconnlimit, ");
3352 174 : if (fout->remoteVersion >= 90300)
3353 174 : appendPQExpBufferStr(dbQry, "datminmxid, ");
3354 : else
3355 0 : appendPQExpBufferStr(dbQry, "0 AS datminmxid, ");
3356 174 : if (fout->remoteVersion >= 170000)
3357 174 : appendPQExpBufferStr(dbQry, "datlocprovider, datlocale, datcollversion, ");
3358 0 : else if (fout->remoteVersion >= 150000)
3359 0 : appendPQExpBufferStr(dbQry, "datlocprovider, daticulocale AS datlocale, datcollversion, ");
3360 : else
3361 0 : appendPQExpBufferStr(dbQry, "'c' AS datlocprovider, NULL AS datlocale, NULL AS datcollversion, ");
3362 174 : if (fout->remoteVersion >= 160000)
3363 174 : appendPQExpBufferStr(dbQry, "daticurules, ");
3364 : else
3365 0 : appendPQExpBufferStr(dbQry, "NULL AS daticurules, ");
3366 174 : appendPQExpBufferStr(dbQry,
3367 : "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
3368 : "shobj_description(oid, 'pg_database') AS description "
3369 : "FROM pg_database "
3370 : "WHERE datname = current_database()");
3371 :
3372 174 : res = ExecuteSqlQueryForSingleRow(fout, dbQry->data);
3373 :
3374 174 : i_tableoid = PQfnumber(res, "tableoid");
3375 174 : i_oid = PQfnumber(res, "oid");
3376 174 : i_datname = PQfnumber(res, "datname");
3377 174 : i_datdba = PQfnumber(res, "datdba");
3378 174 : i_encoding = PQfnumber(res, "encoding");
3379 174 : i_datlocprovider = PQfnumber(res, "datlocprovider");
3380 174 : i_collate = PQfnumber(res, "datcollate");
3381 174 : i_ctype = PQfnumber(res, "datctype");
3382 174 : i_datlocale = PQfnumber(res, "datlocale");
3383 174 : i_daticurules = PQfnumber(res, "daticurules");
3384 174 : i_frozenxid = PQfnumber(res, "datfrozenxid");
3385 174 : i_minmxid = PQfnumber(res, "datminmxid");
3386 174 : i_datacl = PQfnumber(res, "datacl");
3387 174 : i_acldefault = PQfnumber(res, "acldefault");
3388 174 : i_datistemplate = PQfnumber(res, "datistemplate");
3389 174 : i_datconnlimit = PQfnumber(res, "datconnlimit");
3390 174 : i_datcollversion = PQfnumber(res, "datcollversion");
3391 174 : i_tablespace = PQfnumber(res, "tablespace");
3392 :
3393 174 : dbCatId.tableoid = atooid(PQgetvalue(res, 0, i_tableoid));
3394 174 : dbCatId.oid = atooid(PQgetvalue(res, 0, i_oid));
3395 174 : datname = PQgetvalue(res, 0, i_datname);
3396 174 : dba = getRoleName(PQgetvalue(res, 0, i_datdba));
3397 174 : encoding = PQgetvalue(res, 0, i_encoding);
3398 174 : datlocprovider = PQgetvalue(res, 0, i_datlocprovider);
3399 174 : collate = PQgetvalue(res, 0, i_collate);
3400 174 : ctype = PQgetvalue(res, 0, i_ctype);
3401 174 : if (!PQgetisnull(res, 0, i_datlocale))
3402 28 : locale = PQgetvalue(res, 0, i_datlocale);
3403 : else
3404 146 : locale = NULL;
3405 174 : if (!PQgetisnull(res, 0, i_daticurules))
3406 0 : icurules = PQgetvalue(res, 0, i_daticurules);
3407 : else
3408 174 : icurules = NULL;
3409 174 : frozenxid = atooid(PQgetvalue(res, 0, i_frozenxid));
3410 174 : minmxid = atooid(PQgetvalue(res, 0, i_minmxid));
3411 174 : dbdacl.acl = PQgetvalue(res, 0, i_datacl);
3412 174 : dbdacl.acldefault = PQgetvalue(res, 0, i_acldefault);
3413 174 : datistemplate = PQgetvalue(res, 0, i_datistemplate);
3414 174 : datconnlimit = PQgetvalue(res, 0, i_datconnlimit);
3415 174 : tablespace = PQgetvalue(res, 0, i_tablespace);
3416 :
3417 174 : qdatname = pg_strdup(fmtId(datname));
3418 :
3419 : /*
3420 : * Prepare the CREATE DATABASE command. We must specify OID (if we want
3421 : * to preserve that), as well as the encoding, locale, and tablespace
3422 : * since those can't be altered later. Other DB properties are left to
3423 : * the DATABASE PROPERTIES entry, so that they can be applied after
3424 : * reconnecting to the target DB.
3425 : *
3426 : * For binary upgrade, we use the FILE_COPY strategy because testing has
3427 : * shown it to be faster. When the server is in binary upgrade mode, it
3428 : * will also skip the checkpoints this strategy ordinarily performs.
3429 : */
3430 174 : if (dopt->binary_upgrade)
3431 : {
3432 74 : appendPQExpBuffer(creaQry,
3433 : "CREATE DATABASE %s WITH TEMPLATE = template0 "
3434 : "OID = %u STRATEGY = FILE_COPY",
3435 : qdatname, dbCatId.oid);
3436 : }
3437 : else
3438 : {
3439 100 : appendPQExpBuffer(creaQry, "CREATE DATABASE %s WITH TEMPLATE = template0",
3440 : qdatname);
3441 : }
3442 174 : if (strlen(encoding) > 0)
3443 : {
3444 174 : appendPQExpBufferStr(creaQry, " ENCODING = ");
3445 174 : appendStringLiteralAH(creaQry, encoding, fout);
3446 : }
3447 :
3448 174 : appendPQExpBufferStr(creaQry, " LOCALE_PROVIDER = ");
3449 174 : if (datlocprovider[0] == 'b')
3450 28 : appendPQExpBufferStr(creaQry, "builtin");
3451 146 : else if (datlocprovider[0] == 'c')
3452 146 : appendPQExpBufferStr(creaQry, "libc");
3453 0 : else if (datlocprovider[0] == 'i')
3454 0 : appendPQExpBufferStr(creaQry, "icu");
3455 : else
3456 0 : pg_fatal("unrecognized locale provider: %s",
3457 : datlocprovider);
3458 :
3459 174 : if (strlen(collate) > 0 && strcmp(collate, ctype) == 0)
3460 : {
3461 174 : appendPQExpBufferStr(creaQry, " LOCALE = ");
3462 174 : appendStringLiteralAH(creaQry, collate, fout);
3463 : }
3464 : else
3465 : {
3466 0 : if (strlen(collate) > 0)
3467 : {
3468 0 : appendPQExpBufferStr(creaQry, " LC_COLLATE = ");
3469 0 : appendStringLiteralAH(creaQry, collate, fout);
3470 : }
3471 0 : if (strlen(ctype) > 0)
3472 : {
3473 0 : appendPQExpBufferStr(creaQry, " LC_CTYPE = ");
3474 0 : appendStringLiteralAH(creaQry, ctype, fout);
3475 : }
3476 : }
3477 174 : if (locale)
3478 : {
3479 28 : if (datlocprovider[0] == 'b')
3480 28 : appendPQExpBufferStr(creaQry, " BUILTIN_LOCALE = ");
3481 : else
3482 0 : appendPQExpBufferStr(creaQry, " ICU_LOCALE = ");
3483 :
3484 28 : appendStringLiteralAH(creaQry, locale, fout);
3485 : }
3486 :
3487 174 : if (icurules)
3488 : {
3489 0 : appendPQExpBufferStr(creaQry, " ICU_RULES = ");
3490 0 : appendStringLiteralAH(creaQry, icurules, fout);
3491 : }
3492 :
3493 : /*
3494 : * For binary upgrade, carry over the collation version. For normal
3495 : * dump/restore, omit the version, so that it is computed upon restore.
3496 : */
3497 174 : if (dopt->binary_upgrade)
3498 : {
3499 74 : if (!PQgetisnull(res, 0, i_datcollversion))
3500 : {
3501 74 : appendPQExpBufferStr(creaQry, " COLLATION_VERSION = ");
3502 74 : appendStringLiteralAH(creaQry,
3503 : PQgetvalue(res, 0, i_datcollversion),
3504 : fout);
3505 : }
3506 : }
3507 :
3508 : /*
3509 : * Note: looking at dopt->outputNoTablespaces here is completely the wrong
3510 : * thing; the decision whether to specify a tablespace should be left till
3511 : * pg_restore, so that pg_restore --no-tablespaces applies. Ideally we'd
3512 : * label the DATABASE entry with the tablespace and let the normal
3513 : * tablespace selection logic work ... but CREATE DATABASE doesn't pay
3514 : * attention to default_tablespace, so that won't work.
3515 : */
3516 174 : if (strlen(tablespace) > 0 && strcmp(tablespace, "pg_default") != 0 &&
3517 10 : !dopt->outputNoTablespaces)
3518 10 : appendPQExpBuffer(creaQry, " TABLESPACE = %s",
3519 : fmtId(tablespace));
3520 174 : appendPQExpBufferStr(creaQry, ";\n");
3521 :
3522 174 : appendPQExpBuffer(delQry, "DROP DATABASE %s;\n",
3523 : qdatname);
3524 :
3525 174 : dbDumpId = createDumpId();
3526 :
3527 174 : ArchiveEntry(fout,
3528 : dbCatId, /* catalog ID */
3529 : dbDumpId, /* dump ID */
3530 174 : ARCHIVE_OPTS(.tag = datname,
3531 : .owner = dba,
3532 : .description = "DATABASE",
3533 : .section = SECTION_PRE_DATA,
3534 : .createStmt = creaQry->data,
3535 : .dropStmt = delQry->data));
3536 :
3537 : /* Compute correct tag for archive entry */
3538 174 : appendPQExpBuffer(labelq, "DATABASE %s", qdatname);
3539 :
3540 : /* Dump DB comment if any */
3541 : {
3542 : /*
3543 : * 8.2 and up keep comments on shared objects in a shared table, so we
3544 : * cannot use the dumpComment() code used for other database objects.
3545 : * Be careful that the ArchiveEntry parameters match that function.
3546 : */
3547 174 : char *comment = PQgetvalue(res, 0, PQfnumber(res, "description"));
3548 :
3549 174 : if (comment && *comment && !dopt->no_comments)
3550 : {
3551 84 : resetPQExpBuffer(dbQry);
3552 :
3553 : /*
3554 : * Generates warning when loaded into a differently-named
3555 : * database.
3556 : */
3557 84 : appendPQExpBuffer(dbQry, "COMMENT ON DATABASE %s IS ", qdatname);
3558 84 : appendStringLiteralAH(dbQry, comment, fout);
3559 84 : appendPQExpBufferStr(dbQry, ";\n");
3560 :
3561 84 : ArchiveEntry(fout, nilCatalogId, createDumpId(),
3562 84 : ARCHIVE_OPTS(.tag = labelq->data,
3563 : .owner = dba,
3564 : .description = "COMMENT",
3565 : .section = SECTION_NONE,
3566 : .createStmt = dbQry->data,
3567 : .deps = &dbDumpId,
3568 : .nDeps = 1));
3569 : }
3570 : }
3571 :
3572 : /* Dump DB security label, if enabled */
3573 174 : if (!dopt->no_security_labels)
3574 : {
3575 : PGresult *shres;
3576 : PQExpBuffer seclabelQry;
3577 :
3578 174 : seclabelQry = createPQExpBuffer();
3579 :
3580 174 : buildShSecLabelQuery("pg_database", dbCatId.oid, seclabelQry);
3581 174 : shres = ExecuteSqlQuery(fout, seclabelQry->data, PGRES_TUPLES_OK);
3582 174 : resetPQExpBuffer(seclabelQry);
3583 174 : emitShSecLabels(conn, shres, seclabelQry, "DATABASE", datname);
3584 174 : if (seclabelQry->len > 0)
3585 0 : ArchiveEntry(fout, nilCatalogId, createDumpId(),
3586 0 : ARCHIVE_OPTS(.tag = labelq->data,
3587 : .owner = dba,
3588 : .description = "SECURITY LABEL",
3589 : .section = SECTION_NONE,
3590 : .createStmt = seclabelQry->data,
3591 : .deps = &dbDumpId,
3592 : .nDeps = 1));
3593 174 : destroyPQExpBuffer(seclabelQry);
3594 174 : PQclear(shres);
3595 : }
3596 :
3597 : /*
3598 : * Dump ACL if any. Note that we do not support initial privileges
3599 : * (pg_init_privs) on databases.
3600 : */
3601 174 : dbdacl.privtype = 0;
3602 174 : dbdacl.initprivs = NULL;
3603 :
3604 174 : dumpACL(fout, dbDumpId, InvalidDumpId, "DATABASE",
3605 : qdatname, NULL, NULL,
3606 : NULL, dba, &dbdacl);
3607 :
3608 : /*
3609 : * Now construct a DATABASE PROPERTIES archive entry to restore any
3610 : * non-default database-level properties. (The reason this must be
3611 : * separate is that we cannot put any additional commands into the TOC
3612 : * entry that has CREATE DATABASE. pg_restore would execute such a group
3613 : * in an implicit transaction block, and the backend won't allow CREATE
3614 : * DATABASE in that context.)
3615 : */
3616 174 : resetPQExpBuffer(creaQry);
3617 174 : resetPQExpBuffer(delQry);
3618 :
3619 174 : if (strlen(datconnlimit) > 0 && strcmp(datconnlimit, "-1") != 0)
3620 0 : appendPQExpBuffer(creaQry, "ALTER DATABASE %s CONNECTION LIMIT = %s;\n",
3621 : qdatname, datconnlimit);
3622 :
3623 174 : if (strcmp(datistemplate, "t") == 0)
3624 : {
3625 22 : appendPQExpBuffer(creaQry, "ALTER DATABASE %s IS_TEMPLATE = true;\n",
3626 : qdatname);
3627 :
3628 : /*
3629 : * The backend won't accept DROP DATABASE on a template database. We
3630 : * can deal with that by removing the template marking before the DROP
3631 : * gets issued. We'd prefer to use ALTER DATABASE IF EXISTS here, but
3632 : * since no such command is currently supported, fake it with a direct
3633 : * UPDATE on pg_database.
3634 : */
3635 22 : appendPQExpBufferStr(delQry, "UPDATE pg_catalog.pg_database "
3636 : "SET datistemplate = false WHERE datname = ");
3637 22 : appendStringLiteralAH(delQry, datname, fout);
3638 22 : appendPQExpBufferStr(delQry, ";\n");
3639 : }
3640 :
3641 : /*
3642 : * We do not restore pg_database.dathasloginevt because it is set
3643 : * automatically on login event trigger creation.
3644 : */
3645 :
3646 : /* Add database-specific SET options */
3647 174 : dumpDatabaseConfig(fout, creaQry, datname, dbCatId.oid);
3648 :
3649 : /*
3650 : * We stick this binary-upgrade query into the DATABASE PROPERTIES archive
3651 : * entry, too, for lack of a better place.
3652 : */
3653 174 : if (dopt->binary_upgrade)
3654 : {
3655 74 : appendPQExpBufferStr(creaQry, "\n-- For binary upgrade, set datfrozenxid and datminmxid.\n");
3656 74 : appendPQExpBuffer(creaQry, "UPDATE pg_catalog.pg_database\n"
3657 : "SET datfrozenxid = '%u', datminmxid = '%u'\n"
3658 : "WHERE datname = ",
3659 : frozenxid, minmxid);
3660 74 : appendStringLiteralAH(creaQry, datname, fout);
3661 74 : appendPQExpBufferStr(creaQry, ";\n");
3662 : }
3663 :
3664 174 : if (creaQry->len > 0)
3665 82 : ArchiveEntry(fout, nilCatalogId, createDumpId(),
3666 82 : ARCHIVE_OPTS(.tag = datname,
3667 : .owner = dba,
3668 : .description = "DATABASE PROPERTIES",
3669 : .section = SECTION_PRE_DATA,
3670 : .createStmt = creaQry->data,
3671 : .dropStmt = delQry->data,
3672 : .deps = &dbDumpId));
3673 :
3674 : /*
3675 : * pg_largeobject comes from the old system intact, so set its
3676 : * relfrozenxids, relminmxids and relfilenode.
3677 : *
3678 : * pg_largeobject_metadata also comes from the old system intact for
3679 : * upgrades from v16 and newer, so set its relfrozenxids, relminmxids, and
3680 : * relfilenode, too. pg_upgrade can't copy/link the files from older
3681 : * versions because aclitem (needed by pg_largeobject_metadata.lomacl)
3682 : * changed its storage format in v16.
3683 : */
3684 174 : if (dopt->binary_upgrade)
3685 : {
3686 : PGresult *lo_res;
3687 74 : PQExpBuffer loFrozenQry = createPQExpBuffer();
3688 74 : PQExpBuffer loOutQry = createPQExpBuffer();
3689 74 : PQExpBuffer lomOutQry = createPQExpBuffer();
3690 74 : PQExpBuffer loHorizonQry = createPQExpBuffer();
3691 74 : PQExpBuffer lomHorizonQry = createPQExpBuffer();
3692 : int ii_relfrozenxid,
3693 : ii_relfilenode,
3694 : ii_oid,
3695 : ii_relminmxid;
3696 :
3697 74 : if (fout->remoteVersion >= 90300)
3698 74 : appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, relminmxid, relfilenode, oid\n"
3699 : "FROM pg_catalog.pg_class\n"
3700 : "WHERE oid IN (%u, %u, %u, %u);\n",
3701 : LargeObjectRelationId, LargeObjectLOidPNIndexId,
3702 : LargeObjectMetadataRelationId, LargeObjectMetadataOidIndexId);
3703 : else
3704 0 : appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, 0 AS relminmxid, relfilenode, oid\n"
3705 : "FROM pg_catalog.pg_class\n"
3706 : "WHERE oid IN (%u, %u);\n",
3707 : LargeObjectRelationId, LargeObjectLOidPNIndexId);
3708 :
3709 74 : lo_res = ExecuteSqlQuery(fout, loFrozenQry->data, PGRES_TUPLES_OK);
3710 :
3711 74 : ii_relfrozenxid = PQfnumber(lo_res, "relfrozenxid");
3712 74 : ii_relminmxid = PQfnumber(lo_res, "relminmxid");
3713 74 : ii_relfilenode = PQfnumber(lo_res, "relfilenode");
3714 74 : ii_oid = PQfnumber(lo_res, "oid");
3715 :
3716 74 : appendPQExpBufferStr(loHorizonQry, "\n-- For binary upgrade, set pg_largeobject relfrozenxid and relminmxid\n");
3717 74 : appendPQExpBufferStr(lomHorizonQry, "\n-- For binary upgrade, set pg_largeobject_metadata relfrozenxid and relminmxid\n");
3718 74 : appendPQExpBufferStr(loOutQry, "\n-- For binary upgrade, preserve pg_largeobject and index relfilenodes\n");
3719 74 : appendPQExpBufferStr(lomOutQry, "\n-- For binary upgrade, preserve pg_largeobject_metadata and index relfilenodes\n");
3720 370 : for (int i = 0; i < PQntuples(lo_res); ++i)
3721 : {
3722 : Oid oid;
3723 : RelFileNumber relfilenumber;
3724 : PQExpBuffer horizonQry;
3725 : PQExpBuffer outQry;
3726 :
3727 296 : oid = atooid(PQgetvalue(lo_res, i, ii_oid));
3728 296 : relfilenumber = atooid(PQgetvalue(lo_res, i, ii_relfilenode));
3729 :
3730 296 : if (oid == LargeObjectRelationId ||
3731 : oid == LargeObjectLOidPNIndexId)
3732 : {
3733 148 : horizonQry = loHorizonQry;
3734 148 : outQry = loOutQry;
3735 : }
3736 : else
3737 : {
3738 148 : horizonQry = lomHorizonQry;
3739 148 : outQry = lomOutQry;
3740 : }
3741 :
3742 296 : appendPQExpBuffer(horizonQry, "UPDATE pg_catalog.pg_class\n"
3743 : "SET relfrozenxid = '%u', relminmxid = '%u'\n"
3744 : "WHERE oid = %u;\n",
3745 296 : atooid(PQgetvalue(lo_res, i, ii_relfrozenxid)),
3746 296 : atooid(PQgetvalue(lo_res, i, ii_relminmxid)),
3747 296 : atooid(PQgetvalue(lo_res, i, ii_oid)));
3748 :
3749 296 : if (oid == LargeObjectRelationId ||
3750 : oid == LargeObjectMetadataRelationId)
3751 148 : appendPQExpBuffer(outQry,
3752 : "SELECT pg_catalog.binary_upgrade_set_next_heap_relfilenode('%u'::pg_catalog.oid);\n",
3753 : relfilenumber);
3754 148 : else if (oid == LargeObjectLOidPNIndexId ||
3755 : oid == LargeObjectMetadataOidIndexId)
3756 148 : appendPQExpBuffer(outQry,
3757 : "SELECT pg_catalog.binary_upgrade_set_next_index_relfilenode('%u'::pg_catalog.oid);\n",
3758 : relfilenumber);
3759 : }
3760 :
3761 74 : appendPQExpBufferStr(loOutQry,
3762 : "TRUNCATE pg_catalog.pg_largeobject;\n");
3763 74 : appendPQExpBufferStr(lomOutQry,
3764 : "TRUNCATE pg_catalog.pg_largeobject_metadata;\n");
3765 :
3766 74 : appendPQExpBufferStr(loOutQry, loHorizonQry->data);
3767 74 : appendPQExpBufferStr(lomOutQry, lomHorizonQry->data);
3768 :
3769 74 : ArchiveEntry(fout, nilCatalogId, createDumpId(),
3770 74 : ARCHIVE_OPTS(.tag = "pg_largeobject",
3771 : .description = "pg_largeobject",
3772 : .section = SECTION_PRE_DATA,
3773 : .createStmt = loOutQry->data));
3774 :
3775 74 : if (fout->remoteVersion >= 160000)
3776 74 : ArchiveEntry(fout, nilCatalogId, createDumpId(),
3777 74 : ARCHIVE_OPTS(.tag = "pg_largeobject_metadata",
3778 : .description = "pg_largeobject_metadata",
3779 : .section = SECTION_PRE_DATA,
3780 : .createStmt = lomOutQry->data));
3781 :
3782 74 : PQclear(lo_res);
3783 :
3784 74 : destroyPQExpBuffer(loFrozenQry);
3785 74 : destroyPQExpBuffer(loHorizonQry);
3786 74 : destroyPQExpBuffer(lomHorizonQry);
3787 74 : destroyPQExpBuffer(loOutQry);
3788 74 : destroyPQExpBuffer(lomOutQry);
3789 : }
3790 :
3791 174 : PQclear(res);
3792 :
3793 174 : free(qdatname);
3794 174 : destroyPQExpBuffer(dbQry);
3795 174 : destroyPQExpBuffer(delQry);
3796 174 : destroyPQExpBuffer(creaQry);
3797 174 : destroyPQExpBuffer(labelq);
3798 174 : }
3799 :
3800 : /*
3801 : * Collect any database-specific or role-and-database-specific SET options
3802 : * for this database, and append them to outbuf.
3803 : */
3804 : static void
3805 174 : dumpDatabaseConfig(Archive *AH, PQExpBuffer outbuf,
3806 : const char *dbname, Oid dboid)
3807 : {
3808 174 : PGconn *conn = GetConnection(AH);
3809 174 : PQExpBuffer buf = createPQExpBuffer();
3810 : PGresult *res;
3811 :
3812 : /* First collect database-specific options */
3813 174 : printfPQExpBuffer(buf, "SELECT unnest(setconfig) FROM pg_db_role_setting "
3814 : "WHERE setrole = 0 AND setdatabase = '%u'::oid",
3815 : dboid);
3816 :
3817 174 : res = ExecuteSqlQuery(AH, buf->data, PGRES_TUPLES_OK);
3818 :
3819 234 : for (int i = 0; i < PQntuples(res); i++)
3820 60 : makeAlterConfigCommand(conn, PQgetvalue(res, i, 0),
3821 : "DATABASE", dbname, NULL, NULL,
3822 : outbuf);
3823 :
3824 174 : PQclear(res);
3825 :
3826 : /* Now look for role-and-database-specific options */
3827 174 : printfPQExpBuffer(buf, "SELECT rolname, unnest(setconfig) "
3828 : "FROM pg_db_role_setting s, pg_roles r "
3829 : "WHERE setrole = r.oid AND setdatabase = '%u'::oid",
3830 : dboid);
3831 :
3832 174 : res = ExecuteSqlQuery(AH, buf->data, PGRES_TUPLES_OK);
3833 :
3834 174 : for (int i = 0; i < PQntuples(res); i++)
3835 0 : makeAlterConfigCommand(conn, PQgetvalue(res, i, 1),
3836 0 : "ROLE", PQgetvalue(res, i, 0),
3837 : "DATABASE", dbname,
3838 : outbuf);
3839 :
3840 174 : PQclear(res);
3841 :
3842 174 : destroyPQExpBuffer(buf);
3843 174 : }
3844 :
3845 : /*
3846 : * dumpEncoding: put the correct encoding into the archive
3847 : */
3848 : static void
3849 376 : dumpEncoding(Archive *AH)
3850 : {
3851 376 : const char *encname = pg_encoding_to_char(AH->encoding);
3852 376 : PQExpBuffer qry = createPQExpBuffer();
3853 :
3854 376 : pg_log_info("saving encoding = %s", encname);
3855 :
3856 376 : appendPQExpBufferStr(qry, "SET client_encoding = ");
3857 376 : appendStringLiteralAH(qry, encname, AH);
3858 376 : appendPQExpBufferStr(qry, ";\n");
3859 :
3860 376 : ArchiveEntry(AH, nilCatalogId, createDumpId(),
3861 376 : ARCHIVE_OPTS(.tag = "ENCODING",
3862 : .description = "ENCODING",
3863 : .section = SECTION_PRE_DATA,
3864 : .createStmt = qry->data));
3865 :
3866 376 : destroyPQExpBuffer(qry);
3867 376 : }
3868 :
3869 :
3870 : /*
3871 : * dumpStdStrings: put the correct escape string behavior into the archive
3872 : */
3873 : static void
3874 376 : dumpStdStrings(Archive *AH)
3875 : {
3876 376 : const char *stdstrings = AH->std_strings ? "on" : "off";
3877 376 : PQExpBuffer qry = createPQExpBuffer();
3878 :
3879 376 : pg_log_info("saving \"standard_conforming_strings = %s\"",
3880 : stdstrings);
3881 :
3882 376 : appendPQExpBuffer(qry, "SET standard_conforming_strings = '%s';\n",
3883 : stdstrings);
3884 :
3885 376 : ArchiveEntry(AH, nilCatalogId, createDumpId(),
3886 376 : ARCHIVE_OPTS(.tag = "STDSTRINGS",
3887 : .description = "STDSTRINGS",
3888 : .section = SECTION_PRE_DATA,
3889 : .createStmt = qry->data));
3890 :
3891 376 : destroyPQExpBuffer(qry);
3892 376 : }
3893 :
3894 : /*
3895 : * dumpSearchPath: record the active search_path in the archive
3896 : */
3897 : static void
3898 376 : dumpSearchPath(Archive *AH)
3899 : {
3900 376 : PQExpBuffer qry = createPQExpBuffer();
3901 376 : PQExpBuffer path = createPQExpBuffer();
3902 : PGresult *res;
3903 376 : char **schemanames = NULL;
3904 376 : int nschemanames = 0;
3905 : int i;
3906 :
3907 : /*
3908 : * We use the result of current_schemas(), not the search_path GUC,
3909 : * because that might contain wildcards such as "$user", which won't
3910 : * necessarily have the same value during restore. Also, this way avoids
3911 : * listing schemas that may appear in search_path but not actually exist,
3912 : * which seems like a prudent exclusion.
3913 : */
3914 376 : res = ExecuteSqlQueryForSingleRow(AH,
3915 : "SELECT pg_catalog.current_schemas(false)");
3916 :
3917 376 : if (!parsePGArray(PQgetvalue(res, 0, 0), &schemanames, &nschemanames))
3918 0 : pg_fatal("could not parse result of current_schemas()");
3919 :
3920 : /*
3921 : * We use set_config(), not a simple "SET search_path" command, because
3922 : * the latter has less-clean behavior if the search path is empty. While
3923 : * that's likely to get fixed at some point, it seems like a good idea to
3924 : * be as backwards-compatible as possible in what we put into archives.
3925 : */
3926 376 : for (i = 0; i < nschemanames; i++)
3927 : {
3928 0 : if (i > 0)
3929 0 : appendPQExpBufferStr(path, ", ");
3930 0 : appendPQExpBufferStr(path, fmtId(schemanames[i]));
3931 : }
3932 :
3933 376 : appendPQExpBufferStr(qry, "SELECT pg_catalog.set_config('search_path', ");
3934 376 : appendStringLiteralAH(qry, path->data, AH);
3935 376 : appendPQExpBufferStr(qry, ", false);\n");
3936 :
3937 376 : pg_log_info("saving \"search_path = %s\"", path->data);
3938 :
3939 376 : ArchiveEntry(AH, nilCatalogId, createDumpId(),
3940 376 : ARCHIVE_OPTS(.tag = "SEARCHPATH",
3941 : .description = "SEARCHPATH",
3942 : .section = SECTION_PRE_DATA,
3943 : .createStmt = qry->data));
3944 :
3945 : /* Also save it in AH->searchpath, in case we're doing plain text dump */
3946 376 : AH->searchpath = pg_strdup(qry->data);
3947 :
3948 376 : free(schemanames);
3949 376 : PQclear(res);
3950 376 : destroyPQExpBuffer(qry);
3951 376 : destroyPQExpBuffer(path);
3952 376 : }
3953 :
3954 :
3955 : /*
3956 : * getLOs:
3957 : * Collect schema-level data about large objects
3958 : */
3959 : static void
3960 320 : getLOs(Archive *fout)
3961 : {
3962 320 : DumpOptions *dopt = fout->dopt;
3963 320 : PQExpBuffer loQry = createPQExpBuffer();
3964 : PGresult *res;
3965 : int ntups;
3966 : int i;
3967 : int n;
3968 : int i_oid;
3969 : int i_lomowner;
3970 : int i_lomacl;
3971 : int i_acldefault;
3972 :
3973 320 : pg_log_info("reading large objects");
3974 :
3975 : /*
3976 : * Fetch LO OIDs and owner/ACL data. Order the data so that all the blobs
3977 : * with the same owner/ACL appear together.
3978 : */
3979 320 : appendPQExpBufferStr(loQry,
3980 : "SELECT oid, lomowner, lomacl, "
3981 : "acldefault('L', lomowner) AS acldefault "
3982 : "FROM pg_largeobject_metadata "
3983 : "ORDER BY lomowner, lomacl::pg_catalog.text, oid");
3984 :
3985 320 : res = ExecuteSqlQuery(fout, loQry->data, PGRES_TUPLES_OK);
3986 :
3987 320 : i_oid = PQfnumber(res, "oid");
3988 320 : i_lomowner = PQfnumber(res, "lomowner");
3989 320 : i_lomacl = PQfnumber(res, "lomacl");
3990 320 : i_acldefault = PQfnumber(res, "acldefault");
3991 :
3992 320 : ntups = PQntuples(res);
3993 :
3994 : /*
3995 : * Group the blobs into suitably-sized groups that have the same owner and
3996 : * ACL setting, and build a metadata and a data DumpableObject for each
3997 : * group. (If we supported initprivs for blobs, we'd have to insist that
3998 : * groups also share initprivs settings, since the DumpableObject only has
3999 : * room for one.) i is the index of the first tuple in the current group,
4000 : * and n is the number of tuples we include in the group.
4001 : */
4002 500 : for (i = 0; i < ntups; i += n)
4003 : {
4004 180 : Oid thisoid = atooid(PQgetvalue(res, i, i_oid));
4005 180 : char *thisowner = PQgetvalue(res, i, i_lomowner);
4006 180 : char *thisacl = PQgetvalue(res, i, i_lomacl);
4007 : LoInfo *loinfo;
4008 : DumpableObject *lodata;
4009 : char namebuf[64];
4010 :
4011 : /* Scan to find first tuple not to be included in group */
4012 180 : n = 1;
4013 210 : while (n < MAX_BLOBS_PER_ARCHIVE_ENTRY && i + n < ntups)
4014 : {
4015 108 : if (strcmp(thisowner, PQgetvalue(res, i + n, i_lomowner)) != 0 ||
4016 98 : strcmp(thisacl, PQgetvalue(res, i + n, i_lomacl)) != 0)
4017 : break;
4018 30 : n++;
4019 : }
4020 :
4021 : /* Build the metadata DumpableObject */
4022 180 : loinfo = (LoInfo *) pg_malloc(offsetof(LoInfo, looids) + n * sizeof(Oid));
4023 :
4024 180 : loinfo->dobj.objType = DO_LARGE_OBJECT;
4025 180 : loinfo->dobj.catId.tableoid = LargeObjectRelationId;
4026 180 : loinfo->dobj.catId.oid = thisoid;
4027 180 : AssignDumpId(&loinfo->dobj);
4028 :
4029 180 : if (n > 1)
4030 20 : snprintf(namebuf, sizeof(namebuf), "%u..%u", thisoid,
4031 20 : atooid(PQgetvalue(res, i + n - 1, i_oid)));
4032 : else
4033 160 : snprintf(namebuf, sizeof(namebuf), "%u", thisoid);
4034 180 : loinfo->dobj.name = pg_strdup(namebuf);
4035 180 : loinfo->dacl.acl = pg_strdup(thisacl);
4036 180 : loinfo->dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault));
4037 180 : loinfo->dacl.privtype = 0;
4038 180 : loinfo->dacl.initprivs = NULL;
4039 180 : loinfo->rolname = getRoleName(thisowner);
4040 180 : loinfo->numlos = n;
4041 180 : loinfo->looids[0] = thisoid;
4042 : /* Collect OIDs of the remaining blobs in this group */
4043 210 : for (int k = 1; k < n; k++)
4044 : {
4045 : CatalogId extraID;
4046 :
4047 30 : loinfo->looids[k] = atooid(PQgetvalue(res, i + k, i_oid));
4048 :
4049 : /* Make sure we can look up loinfo by any of the blobs' OIDs */
4050 30 : extraID.tableoid = LargeObjectRelationId;
4051 30 : extraID.oid = loinfo->looids[k];
4052 30 : recordAdditionalCatalogID(extraID, &loinfo->dobj);
4053 : }
4054 :
4055 : /* LOs have data */
4056 180 : loinfo->dobj.components |= DUMP_COMPONENT_DATA;
4057 :
4058 : /* Mark whether LO group has a non-empty ACL */
4059 180 : if (!PQgetisnull(res, i, i_lomacl))
4060 78 : loinfo->dobj.components |= DUMP_COMPONENT_ACL;
4061 :
4062 : /*
4063 : * In binary-upgrade mode for LOs, we do *not* dump out the LO data,
4064 : * as it will be copied by pg_upgrade, which simply copies the
4065 : * pg_largeobject table. We *do* however dump out anything but the
4066 : * data, as pg_upgrade copies just pg_largeobject, but not
4067 : * pg_largeobject_metadata, after the dump is restored. In versions
4068 : * before v12, this is done via proper large object commands. In
4069 : * newer versions, we dump the content of pg_largeobject_metadata and
4070 : * any associated pg_shdepend rows, which is faster to restore. (On
4071 : * <v12, pg_largeobject_metadata was created WITH OIDS, so the OID
4072 : * column is hidden and won't be dumped.)
4073 : */
4074 180 : if (dopt->binary_upgrade)
4075 : {
4076 26 : if (fout->remoteVersion >= 120000)
4077 : {
4078 : /*
4079 : * We should've saved pg_largeobject_metadata's dump ID before
4080 : * this point.
4081 : */
4082 : Assert(lo_metadata_dumpId);
4083 :
4084 26 : loinfo->dobj.dump &= ~(DUMP_COMPONENT_DATA | DUMP_COMPONENT_ACL | DUMP_COMPONENT_DEFINITION);
4085 :
4086 : /*
4087 : * Mark the large object as dependent on
4088 : * pg_largeobject_metadata so that any large object
4089 : * comments/seclables are dumped after it.
4090 : */
4091 26 : loinfo->dobj.dependencies = (DumpId *) pg_malloc(sizeof(DumpId));
4092 26 : loinfo->dobj.dependencies[0] = lo_metadata_dumpId;
4093 26 : loinfo->dobj.nDeps = loinfo->dobj.allocDeps = 1;
4094 : }
4095 : else
4096 0 : loinfo->dobj.dump &= ~DUMP_COMPONENT_DATA;
4097 : }
4098 :
4099 : /*
4100 : * Create a "BLOBS" data item for the group, too. This is just a
4101 : * placeholder for sorting; it carries no data now.
4102 : */
4103 180 : lodata = (DumpableObject *) pg_malloc(sizeof(DumpableObject));
4104 180 : lodata->objType = DO_LARGE_OBJECT_DATA;
4105 180 : lodata->catId = nilCatalogId;
4106 180 : AssignDumpId(lodata);
4107 180 : lodata->name = pg_strdup(namebuf);
4108 180 : lodata->components |= DUMP_COMPONENT_DATA;
4109 : /* Set up explicit dependency from data to metadata */
4110 180 : lodata->dependencies = (DumpId *) pg_malloc(sizeof(DumpId));
4111 180 : lodata->dependencies[0] = loinfo->dobj.dumpId;
4112 180 : lodata->nDeps = lodata->allocDeps = 1;
4113 : }
4114 :
4115 320 : PQclear(res);
4116 320 : destroyPQExpBuffer(loQry);
4117 320 : }
4118 :
4119 : /*
4120 : * dumpLO
4121 : *
4122 : * dump the definition (metadata) of the given large object group
4123 : */
4124 : static void
4125 168 : dumpLO(Archive *fout, const LoInfo *loinfo)
4126 : {
4127 168 : PQExpBuffer cquery = createPQExpBuffer();
4128 :
4129 : /*
4130 : * The "definition" is just a newline-separated list of OIDs. We need to
4131 : * put something into the dropStmt too, but it can just be a comment.
4132 : */
4133 366 : for (int i = 0; i < loinfo->numlos; i++)
4134 198 : appendPQExpBuffer(cquery, "%u\n", loinfo->looids[i]);
4135 :
4136 168 : if (loinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
4137 154 : ArchiveEntry(fout, loinfo->dobj.catId, loinfo->dobj.dumpId,
4138 154 : ARCHIVE_OPTS(.tag = loinfo->dobj.name,
4139 : .owner = loinfo->rolname,
4140 : .description = "BLOB METADATA",
4141 : .section = SECTION_DATA,
4142 : .createStmt = cquery->data,
4143 : .dropStmt = "-- dummy"));
4144 :
4145 : /*
4146 : * Dump per-blob comments and seclabels if any. We assume these are rare
4147 : * enough that it's okay to generate retail TOC entries for them.
4148 : */
4149 168 : if (loinfo->dobj.dump & (DUMP_COMPONENT_COMMENT |
4150 : DUMP_COMPONENT_SECLABEL))
4151 : {
4152 206 : for (int i = 0; i < loinfo->numlos; i++)
4153 : {
4154 : CatalogId catId;
4155 : char namebuf[32];
4156 :
4157 : /* Build identifying info for this blob */
4158 118 : catId.tableoid = loinfo->dobj.catId.tableoid;
4159 118 : catId.oid = loinfo->looids[i];
4160 118 : snprintf(namebuf, sizeof(namebuf), "%u", loinfo->looids[i]);
4161 :
4162 118 : if (loinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
4163 118 : dumpComment(fout, "LARGE OBJECT", namebuf,
4164 118 : NULL, loinfo->rolname,
4165 118 : catId, 0, loinfo->dobj.dumpId);
4166 :
4167 118 : if (loinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
4168 20 : dumpSecLabel(fout, "LARGE OBJECT", namebuf,
4169 20 : NULL, loinfo->rolname,
4170 20 : catId, 0, loinfo->dobj.dumpId);
4171 : }
4172 : }
4173 :
4174 : /*
4175 : * Dump the ACLs if any (remember that all blobs in the group will have
4176 : * the same ACL). If there's just one blob, dump a simple ACL entry; if
4177 : * there's more, make a "LARGE OBJECTS" entry that really contains only
4178 : * the ACL for the first blob. _printTocEntry() will be cued by the tag
4179 : * string to emit a mutated version for each blob.
4180 : */
4181 168 : if (loinfo->dobj.dump & DUMP_COMPONENT_ACL)
4182 : {
4183 : char namebuf[32];
4184 :
4185 : /* Build identifying info for the first blob */
4186 66 : snprintf(namebuf, sizeof(namebuf), "%u", loinfo->looids[0]);
4187 :
4188 66 : if (loinfo->numlos > 1)
4189 : {
4190 : char tagbuf[64];
4191 :
4192 0 : snprintf(tagbuf, sizeof(tagbuf), "LARGE OBJECTS %u..%u",
4193 0 : loinfo->looids[0], loinfo->looids[loinfo->numlos - 1]);
4194 :
4195 0 : dumpACL(fout, loinfo->dobj.dumpId, InvalidDumpId,
4196 : "LARGE OBJECT", namebuf, NULL, NULL,
4197 0 : tagbuf, loinfo->rolname, &loinfo->dacl);
4198 : }
4199 : else
4200 : {
4201 66 : dumpACL(fout, loinfo->dobj.dumpId, InvalidDumpId,
4202 : "LARGE OBJECT", namebuf, NULL, NULL,
4203 66 : NULL, loinfo->rolname, &loinfo->dacl);
4204 : }
4205 : }
4206 :
4207 168 : destroyPQExpBuffer(cquery);
4208 168 : }
4209 :
4210 : /*
4211 : * dumpLOs:
4212 : * dump the data contents of the large objects in the given group
4213 : */
4214 : static int
4215 146 : dumpLOs(Archive *fout, const void *arg)
4216 : {
4217 146 : const LoInfo *loinfo = (const LoInfo *) arg;
4218 146 : PGconn *conn = GetConnection(fout);
4219 : char buf[LOBBUFSIZE];
4220 :
4221 146 : pg_log_info("saving large objects \"%s\"", loinfo->dobj.name);
4222 :
4223 308 : for (int i = 0; i < loinfo->numlos; i++)
4224 : {
4225 162 : Oid loOid = loinfo->looids[i];
4226 : int loFd;
4227 : int cnt;
4228 :
4229 : /* Open the LO */
4230 162 : loFd = lo_open(conn, loOid, INV_READ);
4231 162 : if (loFd == -1)
4232 0 : pg_fatal("could not open large object %u: %s",
4233 : loOid, PQerrorMessage(conn));
4234 :
4235 162 : StartLO(fout, loOid);
4236 :
4237 : /* Now read it in chunks, sending data to archive */
4238 : do
4239 : {
4240 254 : cnt = lo_read(conn, loFd, buf, LOBBUFSIZE);
4241 254 : if (cnt < 0)
4242 0 : pg_fatal("error reading large object %u: %s",
4243 : loOid, PQerrorMessage(conn));
4244 :
4245 254 : WriteData(fout, buf, cnt);
4246 254 : } while (cnt > 0);
4247 :
4248 162 : lo_close(conn, loFd);
4249 :
4250 162 : EndLO(fout, loOid);
4251 : }
4252 :
4253 146 : return 1;
4254 : }
4255 :
4256 : /*
4257 : * getPolicies
4258 : * get information about all RLS policies on dumpable tables.
4259 : */
4260 : void
4261 376 : getPolicies(Archive *fout, TableInfo tblinfo[], int numTables)
4262 : {
4263 376 : DumpOptions *dopt = fout->dopt;
4264 : PQExpBuffer query;
4265 : PQExpBuffer tbloids;
4266 : PGresult *res;
4267 : PolicyInfo *polinfo;
4268 : int i_oid;
4269 : int i_tableoid;
4270 : int i_polrelid;
4271 : int i_polname;
4272 : int i_polcmd;
4273 : int i_polpermissive;
4274 : int i_polroles;
4275 : int i_polqual;
4276 : int i_polwithcheck;
4277 : int i,
4278 : j,
4279 : ntups;
4280 :
4281 : /* No policies before 9.5 */
4282 376 : if (fout->remoteVersion < 90500)
4283 0 : return;
4284 :
4285 : /* Skip if --no-policies was specified */
4286 376 : if (dopt->no_policies)
4287 2 : return;
4288 :
4289 374 : query = createPQExpBuffer();
4290 374 : tbloids = createPQExpBuffer();
4291 :
4292 : /*
4293 : * Identify tables of interest, and check which ones have RLS enabled.
4294 : */
4295 374 : appendPQExpBufferChar(tbloids, '{');
4296 98804 : for (i = 0; i < numTables; i++)
4297 : {
4298 98430 : TableInfo *tbinfo = &tblinfo[i];
4299 :
4300 : /* Ignore row security on tables not to be dumped */
4301 98430 : if (!(tbinfo->dobj.dump & DUMP_COMPONENT_POLICY))
4302 84608 : continue;
4303 :
4304 : /* It can't have RLS or policies if it's not a table */
4305 13822 : if (tbinfo->relkind != RELKIND_RELATION &&
4306 3892 : tbinfo->relkind != RELKIND_PARTITIONED_TABLE)
4307 2700 : continue;
4308 :
4309 : /* Add it to the list of table OIDs to be probed below */
4310 11122 : if (tbloids->len > 1) /* do we have more than the '{'? */
4311 10878 : appendPQExpBufferChar(tbloids, ',');
4312 11122 : appendPQExpBuffer(tbloids, "%u", tbinfo->dobj.catId.oid);
4313 :
4314 : /* Is RLS enabled? (That's separate from whether it has policies) */
4315 11122 : if (tbinfo->rowsec)
4316 : {
4317 106 : tbinfo->dobj.components |= DUMP_COMPONENT_POLICY;
4318 :
4319 : /*
4320 : * We represent RLS being enabled on a table by creating a
4321 : * PolicyInfo object with null polname.
4322 : *
4323 : * Note: use tableoid 0 so that this object won't be mistaken for
4324 : * something that pg_depend entries apply to.
4325 : */
4326 106 : polinfo = pg_malloc(sizeof(PolicyInfo));
4327 106 : polinfo->dobj.objType = DO_POLICY;
4328 106 : polinfo->dobj.catId.tableoid = 0;
4329 106 : polinfo->dobj.catId.oid = tbinfo->dobj.catId.oid;
4330 106 : AssignDumpId(&polinfo->dobj);
4331 106 : polinfo->dobj.namespace = tbinfo->dobj.namespace;
4332 106 : polinfo->dobj.name = pg_strdup(tbinfo->dobj.name);
4333 106 : polinfo->poltable = tbinfo;
4334 106 : polinfo->polname = NULL;
4335 106 : polinfo->polcmd = '\0';
4336 106 : polinfo->polpermissive = 0;
4337 106 : polinfo->polroles = NULL;
4338 106 : polinfo->polqual = NULL;
4339 106 : polinfo->polwithcheck = NULL;
4340 : }
4341 : }
4342 374 : appendPQExpBufferChar(tbloids, '}');
4343 :
4344 : /*
4345 : * Now, read all RLS policies belonging to the tables of interest, and
4346 : * create PolicyInfo objects for them. (Note that we must filter the
4347 : * results server-side not locally, because we dare not apply pg_get_expr
4348 : * to tables we don't have lock on.)
4349 : */
4350 374 : pg_log_info("reading row-level security policies");
4351 :
4352 374 : printfPQExpBuffer(query,
4353 : "SELECT pol.oid, pol.tableoid, pol.polrelid, pol.polname, pol.polcmd, ");
4354 374 : if (fout->remoteVersion >= 100000)
4355 374 : appendPQExpBufferStr(query, "pol.polpermissive, ");
4356 : else
4357 0 : appendPQExpBufferStr(query, "'t' as polpermissive, ");
4358 374 : appendPQExpBuffer(query,
4359 : "CASE WHEN pol.polroles = '{0}' THEN NULL ELSE "
4360 : " pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.quote_ident(rolname) from pg_catalog.pg_roles WHERE oid = ANY(pol.polroles)), ', ') END AS polroles, "
4361 : "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid) AS polqual, "
4362 : "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid) AS polwithcheck "
4363 : "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
4364 : "JOIN pg_catalog.pg_policy pol ON (src.tbloid = pol.polrelid)",
4365 : tbloids->data);
4366 :
4367 374 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4368 :
4369 374 : ntups = PQntuples(res);
4370 374 : if (ntups > 0)
4371 : {
4372 86 : i_oid = PQfnumber(res, "oid");
4373 86 : i_tableoid = PQfnumber(res, "tableoid");
4374 86 : i_polrelid = PQfnumber(res, "polrelid");
4375 86 : i_polname = PQfnumber(res, "polname");
4376 86 : i_polcmd = PQfnumber(res, "polcmd");
4377 86 : i_polpermissive = PQfnumber(res, "polpermissive");
4378 86 : i_polroles = PQfnumber(res, "polroles");
4379 86 : i_polqual = PQfnumber(res, "polqual");
4380 86 : i_polwithcheck = PQfnumber(res, "polwithcheck");
4381 :
4382 86 : polinfo = pg_malloc(ntups * sizeof(PolicyInfo));
4383 :
4384 632 : for (j = 0; j < ntups; j++)
4385 : {
4386 546 : Oid polrelid = atooid(PQgetvalue(res, j, i_polrelid));
4387 546 : TableInfo *tbinfo = findTableByOid(polrelid);
4388 :
4389 546 : tbinfo->dobj.components |= DUMP_COMPONENT_POLICY;
4390 :
4391 546 : polinfo[j].dobj.objType = DO_POLICY;
4392 546 : polinfo[j].dobj.catId.tableoid =
4393 546 : atooid(PQgetvalue(res, j, i_tableoid));
4394 546 : polinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
4395 546 : AssignDumpId(&polinfo[j].dobj);
4396 546 : polinfo[j].dobj.namespace = tbinfo->dobj.namespace;
4397 546 : polinfo[j].poltable = tbinfo;
4398 546 : polinfo[j].polname = pg_strdup(PQgetvalue(res, j, i_polname));
4399 546 : polinfo[j].dobj.name = pg_strdup(polinfo[j].polname);
4400 :
4401 546 : polinfo[j].polcmd = *(PQgetvalue(res, j, i_polcmd));
4402 546 : polinfo[j].polpermissive = *(PQgetvalue(res, j, i_polpermissive)) == 't';
4403 :
4404 546 : if (PQgetisnull(res, j, i_polroles))
4405 242 : polinfo[j].polroles = NULL;
4406 : else
4407 304 : polinfo[j].polroles = pg_strdup(PQgetvalue(res, j, i_polroles));
4408 :
4409 546 : if (PQgetisnull(res, j, i_polqual))
4410 76 : polinfo[j].polqual = NULL;
4411 : else
4412 470 : polinfo[j].polqual = pg_strdup(PQgetvalue(res, j, i_polqual));
4413 :
4414 546 : if (PQgetisnull(res, j, i_polwithcheck))
4415 288 : polinfo[j].polwithcheck = NULL;
4416 : else
4417 258 : polinfo[j].polwithcheck
4418 258 : = pg_strdup(PQgetvalue(res, j, i_polwithcheck));
4419 : }
4420 : }
4421 :
4422 374 : PQclear(res);
4423 :
4424 374 : destroyPQExpBuffer(query);
4425 374 : destroyPQExpBuffer(tbloids);
4426 : }
4427 :
4428 : /*
4429 : * dumpPolicy
4430 : * dump the definition of the given policy
4431 : */
4432 : static void
4433 652 : dumpPolicy(Archive *fout, const PolicyInfo *polinfo)
4434 : {
4435 652 : DumpOptions *dopt = fout->dopt;
4436 652 : TableInfo *tbinfo = polinfo->poltable;
4437 : PQExpBuffer query;
4438 : PQExpBuffer delqry;
4439 : PQExpBuffer polprefix;
4440 : char *qtabname;
4441 : const char *cmd;
4442 : char *tag;
4443 :
4444 : /* Do nothing if not dumping schema */
4445 652 : if (!dopt->dumpSchema)
4446 98 : return;
4447 :
4448 : /*
4449 : * If polname is NULL, then this record is just indicating that ROW LEVEL
4450 : * SECURITY is enabled for the table. Dump as ALTER TABLE <table> ENABLE
4451 : * ROW LEVEL SECURITY.
4452 : */
4453 554 : if (polinfo->polname == NULL)
4454 : {
4455 92 : query = createPQExpBuffer();
4456 :
4457 92 : appendPQExpBuffer(query, "ALTER TABLE %s ENABLE ROW LEVEL SECURITY;",
4458 92 : fmtQualifiedDumpable(tbinfo));
4459 :
4460 : /*
4461 : * We must emit the ROW SECURITY object's dependency on its table
4462 : * explicitly, because it will not match anything in pg_depend (unlike
4463 : * the case for other PolicyInfo objects).
4464 : */
4465 92 : if (polinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
4466 92 : ArchiveEntry(fout, polinfo->dobj.catId, polinfo->dobj.dumpId,
4467 92 : ARCHIVE_OPTS(.tag = polinfo->dobj.name,
4468 : .namespace = polinfo->dobj.namespace->dobj.name,
4469 : .owner = tbinfo->rolname,
4470 : .description = "ROW SECURITY",
4471 : .section = SECTION_POST_DATA,
4472 : .createStmt = query->data,
4473 : .deps = &(tbinfo->dobj.dumpId),
4474 : .nDeps = 1));
4475 :
4476 92 : destroyPQExpBuffer(query);
4477 92 : return;
4478 : }
4479 :
4480 462 : if (polinfo->polcmd == '*')
4481 154 : cmd = "";
4482 308 : else if (polinfo->polcmd == 'r')
4483 82 : cmd = " FOR SELECT";
4484 226 : else if (polinfo->polcmd == 'a')
4485 62 : cmd = " FOR INSERT";
4486 164 : else if (polinfo->polcmd == 'w')
4487 82 : cmd = " FOR UPDATE";
4488 82 : else if (polinfo->polcmd == 'd')
4489 82 : cmd = " FOR DELETE";
4490 : else
4491 0 : pg_fatal("unexpected policy command type: %c",
4492 : polinfo->polcmd);
4493 :
4494 462 : query = createPQExpBuffer();
4495 462 : delqry = createPQExpBuffer();
4496 462 : polprefix = createPQExpBuffer();
4497 :
4498 462 : qtabname = pg_strdup(fmtId(tbinfo->dobj.name));
4499 :
4500 462 : appendPQExpBuffer(query, "CREATE POLICY %s", fmtId(polinfo->polname));
4501 :
4502 462 : appendPQExpBuffer(query, " ON %s%s%s", fmtQualifiedDumpable(tbinfo),
4503 462 : !polinfo->polpermissive ? " AS RESTRICTIVE" : "", cmd);
4504 :
4505 462 : if (polinfo->polroles != NULL)
4506 248 : appendPQExpBuffer(query, " TO %s", polinfo->polroles);
4507 :
4508 462 : if (polinfo->polqual != NULL)
4509 400 : appendPQExpBuffer(query, " USING (%s)", polinfo->polqual);
4510 :
4511 462 : if (polinfo->polwithcheck != NULL)
4512 216 : appendPQExpBuffer(query, " WITH CHECK (%s)", polinfo->polwithcheck);
4513 :
4514 462 : appendPQExpBufferStr(query, ";\n");
4515 :
4516 462 : appendPQExpBuffer(delqry, "DROP POLICY %s", fmtId(polinfo->polname));
4517 462 : appendPQExpBuffer(delqry, " ON %s;\n", fmtQualifiedDumpable(tbinfo));
4518 :
4519 462 : appendPQExpBuffer(polprefix, "POLICY %s ON",
4520 462 : fmtId(polinfo->polname));
4521 :
4522 462 : tag = psprintf("%s %s", tbinfo->dobj.name, polinfo->dobj.name);
4523 :
4524 462 : if (polinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
4525 462 : ArchiveEntry(fout, polinfo->dobj.catId, polinfo->dobj.dumpId,
4526 462 : ARCHIVE_OPTS(.tag = tag,
4527 : .namespace = polinfo->dobj.namespace->dobj.name,
4528 : .owner = tbinfo->rolname,
4529 : .description = "POLICY",
4530 : .section = SECTION_POST_DATA,
4531 : .createStmt = query->data,
4532 : .dropStmt = delqry->data));
4533 :
4534 462 : if (polinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
4535 62 : dumpComment(fout, polprefix->data, qtabname,
4536 62 : tbinfo->dobj.namespace->dobj.name, tbinfo->rolname,
4537 62 : polinfo->dobj.catId, 0, polinfo->dobj.dumpId);
4538 :
4539 462 : free(tag);
4540 462 : destroyPQExpBuffer(query);
4541 462 : destroyPQExpBuffer(delqry);
4542 462 : destroyPQExpBuffer(polprefix);
4543 462 : free(qtabname);
4544 : }
4545 :
4546 : /*
4547 : * getPublications
4548 : * get information about publications
4549 : */
4550 : void
4551 376 : getPublications(Archive *fout)
4552 : {
4553 376 : DumpOptions *dopt = fout->dopt;
4554 : PQExpBuffer query;
4555 : PGresult *res;
4556 : PublicationInfo *pubinfo;
4557 : int i_tableoid;
4558 : int i_oid;
4559 : int i_pubname;
4560 : int i_pubowner;
4561 : int i_puballtables;
4562 : int i_puballsequences;
4563 : int i_pubinsert;
4564 : int i_pubupdate;
4565 : int i_pubdelete;
4566 : int i_pubtruncate;
4567 : int i_pubviaroot;
4568 : int i_pubgencols;
4569 : int i,
4570 : ntups;
4571 :
4572 376 : if (dopt->no_publications || fout->remoteVersion < 100000)
4573 0 : return;
4574 :
4575 376 : query = createPQExpBuffer();
4576 :
4577 : /* Get the publications. */
4578 376 : appendPQExpBufferStr(query, "SELECT p.tableoid, p.oid, p.pubname, "
4579 : "p.pubowner, p.puballtables, p.pubinsert, "
4580 : "p.pubupdate, p.pubdelete, ");
4581 :
4582 376 : if (fout->remoteVersion >= 110000)
4583 376 : appendPQExpBufferStr(query, "p.pubtruncate, ");
4584 : else
4585 0 : appendPQExpBufferStr(query, "false AS pubtruncate, ");
4586 :
4587 376 : if (fout->remoteVersion >= 130000)
4588 376 : appendPQExpBufferStr(query, "p.pubviaroot, ");
4589 : else
4590 0 : appendPQExpBufferStr(query, "false AS pubviaroot, ");
4591 :
4592 376 : if (fout->remoteVersion >= 180000)
4593 376 : appendPQExpBufferStr(query, "p.pubgencols, ");
4594 : else
4595 0 : appendPQExpBuffer(query, "'%c' AS pubgencols, ", PUBLISH_GENCOLS_NONE);
4596 :
4597 376 : if (fout->remoteVersion >= 190000)
4598 376 : appendPQExpBufferStr(query, "p.puballsequences ");
4599 : else
4600 0 : appendPQExpBufferStr(query, "false AS puballsequences ");
4601 :
4602 376 : appendPQExpBufferStr(query, "FROM pg_publication p");
4603 :
4604 376 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4605 :
4606 376 : ntups = PQntuples(res);
4607 :
4608 376 : if (ntups == 0)
4609 270 : goto cleanup;
4610 :
4611 106 : i_tableoid = PQfnumber(res, "tableoid");
4612 106 : i_oid = PQfnumber(res, "oid");
4613 106 : i_pubname = PQfnumber(res, "pubname");
4614 106 : i_pubowner = PQfnumber(res, "pubowner");
4615 106 : i_puballtables = PQfnumber(res, "puballtables");
4616 106 : i_puballsequences = PQfnumber(res, "puballsequences");
4617 106 : i_pubinsert = PQfnumber(res, "pubinsert");
4618 106 : i_pubupdate = PQfnumber(res, "pubupdate");
4619 106 : i_pubdelete = PQfnumber(res, "pubdelete");
4620 106 : i_pubtruncate = PQfnumber(res, "pubtruncate");
4621 106 : i_pubviaroot = PQfnumber(res, "pubviaroot");
4622 106 : i_pubgencols = PQfnumber(res, "pubgencols");
4623 :
4624 106 : pubinfo = pg_malloc(ntups * sizeof(PublicationInfo));
4625 :
4626 808 : for (i = 0; i < ntups; i++)
4627 : {
4628 702 : pubinfo[i].dobj.objType = DO_PUBLICATION;
4629 702 : pubinfo[i].dobj.catId.tableoid =
4630 702 : atooid(PQgetvalue(res, i, i_tableoid));
4631 702 : pubinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4632 702 : AssignDumpId(&pubinfo[i].dobj);
4633 702 : pubinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_pubname));
4634 702 : pubinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_pubowner));
4635 702 : pubinfo[i].puballtables =
4636 702 : (strcmp(PQgetvalue(res, i, i_puballtables), "t") == 0);
4637 702 : pubinfo[i].puballsequences =
4638 702 : (strcmp(PQgetvalue(res, i, i_puballsequences), "t") == 0);
4639 702 : pubinfo[i].pubinsert =
4640 702 : (strcmp(PQgetvalue(res, i, i_pubinsert), "t") == 0);
4641 702 : pubinfo[i].pubupdate =
4642 702 : (strcmp(PQgetvalue(res, i, i_pubupdate), "t") == 0);
4643 702 : pubinfo[i].pubdelete =
4644 702 : (strcmp(PQgetvalue(res, i, i_pubdelete), "t") == 0);
4645 702 : pubinfo[i].pubtruncate =
4646 702 : (strcmp(PQgetvalue(res, i, i_pubtruncate), "t") == 0);
4647 702 : pubinfo[i].pubviaroot =
4648 702 : (strcmp(PQgetvalue(res, i, i_pubviaroot), "t") == 0);
4649 702 : pubinfo[i].pubgencols_type =
4650 702 : *(PQgetvalue(res, i, i_pubgencols));
4651 :
4652 : /* Decide whether we want to dump it */
4653 702 : selectDumpableObject(&(pubinfo[i].dobj), fout);
4654 : }
4655 :
4656 106 : cleanup:
4657 376 : PQclear(res);
4658 :
4659 376 : destroyPQExpBuffer(query);
4660 : }
4661 :
4662 : /*
4663 : * dumpPublication
4664 : * dump the definition of the given publication
4665 : */
4666 : static void
4667 570 : dumpPublication(Archive *fout, const PublicationInfo *pubinfo)
4668 : {
4669 570 : DumpOptions *dopt = fout->dopt;
4670 : PQExpBuffer delq;
4671 : PQExpBuffer query;
4672 : char *qpubname;
4673 570 : bool first = true;
4674 :
4675 : /* Do nothing if not dumping schema */
4676 570 : if (!dopt->dumpSchema)
4677 84 : return;
4678 :
4679 486 : delq = createPQExpBuffer();
4680 486 : query = createPQExpBuffer();
4681 :
4682 486 : qpubname = pg_strdup(fmtId(pubinfo->dobj.name));
4683 :
4684 486 : appendPQExpBuffer(delq, "DROP PUBLICATION %s;\n",
4685 : qpubname);
4686 :
4687 486 : appendPQExpBuffer(query, "CREATE PUBLICATION %s",
4688 : qpubname);
4689 :
4690 486 : if (pubinfo->puballtables && pubinfo->puballsequences)
4691 62 : appendPQExpBufferStr(query, " FOR ALL TABLES, ALL SEQUENCES");
4692 424 : else if (pubinfo->puballtables)
4693 64 : appendPQExpBufferStr(query, " FOR ALL TABLES");
4694 360 : else if (pubinfo->puballsequences)
4695 62 : appendPQExpBufferStr(query, " FOR ALL SEQUENCES");
4696 :
4697 486 : appendPQExpBufferStr(query, " WITH (publish = '");
4698 486 : if (pubinfo->pubinsert)
4699 : {
4700 362 : appendPQExpBufferStr(query, "insert");
4701 362 : first = false;
4702 : }
4703 :
4704 486 : if (pubinfo->pubupdate)
4705 : {
4706 362 : if (!first)
4707 362 : appendPQExpBufferStr(query, ", ");
4708 :
4709 362 : appendPQExpBufferStr(query, "update");
4710 362 : first = false;
4711 : }
4712 :
4713 486 : if (pubinfo->pubdelete)
4714 : {
4715 362 : if (!first)
4716 362 : appendPQExpBufferStr(query, ", ");
4717 :
4718 362 : appendPQExpBufferStr(query, "delete");
4719 362 : first = false;
4720 : }
4721 :
4722 486 : if (pubinfo->pubtruncate)
4723 : {
4724 362 : if (!first)
4725 362 : appendPQExpBufferStr(query, ", ");
4726 :
4727 362 : appendPQExpBufferStr(query, "truncate");
4728 362 : first = false;
4729 : }
4730 :
4731 486 : appendPQExpBufferChar(query, '\'');
4732 :
4733 486 : if (pubinfo->pubviaroot)
4734 10 : appendPQExpBufferStr(query, ", publish_via_partition_root = true");
4735 :
4736 486 : if (pubinfo->pubgencols_type == PUBLISH_GENCOLS_STORED)
4737 62 : appendPQExpBufferStr(query, ", publish_generated_columns = stored");
4738 :
4739 486 : appendPQExpBufferStr(query, ");\n");
4740 :
4741 486 : if (pubinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
4742 486 : ArchiveEntry(fout, pubinfo->dobj.catId, pubinfo->dobj.dumpId,
4743 486 : ARCHIVE_OPTS(.tag = pubinfo->dobj.name,
4744 : .owner = pubinfo->rolname,
4745 : .description = "PUBLICATION",
4746 : .section = SECTION_POST_DATA,
4747 : .createStmt = query->data,
4748 : .dropStmt = delq->data));
4749 :
4750 486 : if (pubinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
4751 62 : dumpComment(fout, "PUBLICATION", qpubname,
4752 62 : NULL, pubinfo->rolname,
4753 62 : pubinfo->dobj.catId, 0, pubinfo->dobj.dumpId);
4754 :
4755 486 : if (pubinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
4756 0 : dumpSecLabel(fout, "PUBLICATION", qpubname,
4757 0 : NULL, pubinfo->rolname,
4758 0 : pubinfo->dobj.catId, 0, pubinfo->dobj.dumpId);
4759 :
4760 486 : destroyPQExpBuffer(delq);
4761 486 : destroyPQExpBuffer(query);
4762 486 : free(qpubname);
4763 : }
4764 :
4765 : /*
4766 : * getPublicationNamespaces
4767 : * get information about publication membership for dumpable schemas.
4768 : */
4769 : void
4770 376 : getPublicationNamespaces(Archive *fout)
4771 : {
4772 : PQExpBuffer query;
4773 : PGresult *res;
4774 : PublicationSchemaInfo *pubsinfo;
4775 376 : DumpOptions *dopt = fout->dopt;
4776 : int i_tableoid;
4777 : int i_oid;
4778 : int i_pnpubid;
4779 : int i_pnnspid;
4780 : int i,
4781 : j,
4782 : ntups;
4783 :
4784 376 : if (dopt->no_publications || fout->remoteVersion < 150000)
4785 0 : return;
4786 :
4787 376 : query = createPQExpBuffer();
4788 :
4789 : /* Collect all publication membership info. */
4790 376 : appendPQExpBufferStr(query,
4791 : "SELECT tableoid, oid, pnpubid, pnnspid "
4792 : "FROM pg_catalog.pg_publication_namespace");
4793 376 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4794 :
4795 376 : ntups = PQntuples(res);
4796 :
4797 376 : i_tableoid = PQfnumber(res, "tableoid");
4798 376 : i_oid = PQfnumber(res, "oid");
4799 376 : i_pnpubid = PQfnumber(res, "pnpubid");
4800 376 : i_pnnspid = PQfnumber(res, "pnnspid");
4801 :
4802 : /* this allocation may be more than we need */
4803 376 : pubsinfo = pg_malloc(ntups * sizeof(PublicationSchemaInfo));
4804 376 : j = 0;
4805 :
4806 626 : for (i = 0; i < ntups; i++)
4807 : {
4808 250 : Oid pnpubid = atooid(PQgetvalue(res, i, i_pnpubid));
4809 250 : Oid pnnspid = atooid(PQgetvalue(res, i, i_pnnspid));
4810 : PublicationInfo *pubinfo;
4811 : NamespaceInfo *nspinfo;
4812 :
4813 : /*
4814 : * Ignore any entries for which we aren't interested in either the
4815 : * publication or the rel.
4816 : */
4817 250 : pubinfo = findPublicationByOid(pnpubid);
4818 250 : if (pubinfo == NULL)
4819 0 : continue;
4820 250 : nspinfo = findNamespaceByOid(pnnspid);
4821 250 : if (nspinfo == NULL)
4822 0 : continue;
4823 :
4824 : /* OK, make a DumpableObject for this relationship */
4825 250 : pubsinfo[j].dobj.objType = DO_PUBLICATION_TABLE_IN_SCHEMA;
4826 250 : pubsinfo[j].dobj.catId.tableoid =
4827 250 : atooid(PQgetvalue(res, i, i_tableoid));
4828 250 : pubsinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4829 250 : AssignDumpId(&pubsinfo[j].dobj);
4830 250 : pubsinfo[j].dobj.namespace = nspinfo->dobj.namespace;
4831 250 : pubsinfo[j].dobj.name = nspinfo->dobj.name;
4832 250 : pubsinfo[j].publication = pubinfo;
4833 250 : pubsinfo[j].pubschema = nspinfo;
4834 :
4835 : /* Decide whether we want to dump it */
4836 250 : selectDumpablePublicationObject(&(pubsinfo[j].dobj), fout);
4837 :
4838 250 : j++;
4839 : }
4840 :
4841 376 : PQclear(res);
4842 376 : destroyPQExpBuffer(query);
4843 : }
4844 :
4845 : /*
4846 : * getPublicationTables
4847 : * get information about publication membership for dumpable tables.
4848 : */
4849 : void
4850 376 : getPublicationTables(Archive *fout, TableInfo tblinfo[], int numTables)
4851 : {
4852 : PQExpBuffer query;
4853 : PGresult *res;
4854 : PublicationRelInfo *pubrinfo;
4855 376 : DumpOptions *dopt = fout->dopt;
4856 : int i_tableoid;
4857 : int i_oid;
4858 : int i_prpubid;
4859 : int i_prrelid;
4860 : int i_prrelqual;
4861 : int i_prattrs;
4862 : int i,
4863 : j,
4864 : ntups;
4865 :
4866 376 : if (dopt->no_publications || fout->remoteVersion < 100000)
4867 0 : return;
4868 :
4869 376 : query = createPQExpBuffer();
4870 :
4871 : /* Collect all publication membership info. */
4872 376 : if (fout->remoteVersion >= 150000)
4873 376 : appendPQExpBufferStr(query,
4874 : "SELECT tableoid, oid, prpubid, prrelid, "
4875 : "pg_catalog.pg_get_expr(prqual, prrelid) AS prrelqual, "
4876 : "(CASE\n"
4877 : " WHEN pr.prattrs IS NOT NULL THEN\n"
4878 : " (SELECT array_agg(attname)\n"
4879 : " FROM\n"
4880 : " pg_catalog.generate_series(0, pg_catalog.array_upper(pr.prattrs::pg_catalog.int2[], 1)) s,\n"
4881 : " pg_catalog.pg_attribute\n"
4882 : " WHERE attrelid = pr.prrelid AND attnum = prattrs[s])\n"
4883 : " ELSE NULL END) prattrs "
4884 : "FROM pg_catalog.pg_publication_rel pr");
4885 : else
4886 0 : appendPQExpBufferStr(query,
4887 : "SELECT tableoid, oid, prpubid, prrelid, "
4888 : "NULL AS prrelqual, NULL AS prattrs "
4889 : "FROM pg_catalog.pg_publication_rel");
4890 376 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
4891 :
4892 376 : ntups = PQntuples(res);
4893 :
4894 376 : i_tableoid = PQfnumber(res, "tableoid");
4895 376 : i_oid = PQfnumber(res, "oid");
4896 376 : i_prpubid = PQfnumber(res, "prpubid");
4897 376 : i_prrelid = PQfnumber(res, "prrelid");
4898 376 : i_prrelqual = PQfnumber(res, "prrelqual");
4899 376 : i_prattrs = PQfnumber(res, "prattrs");
4900 :
4901 : /* this allocation may be more than we need */
4902 376 : pubrinfo = pg_malloc(ntups * sizeof(PublicationRelInfo));
4903 376 : j = 0;
4904 :
4905 1076 : for (i = 0; i < ntups; i++)
4906 : {
4907 700 : Oid prpubid = atooid(PQgetvalue(res, i, i_prpubid));
4908 700 : Oid prrelid = atooid(PQgetvalue(res, i, i_prrelid));
4909 : PublicationInfo *pubinfo;
4910 : TableInfo *tbinfo;
4911 :
4912 : /*
4913 : * Ignore any entries for which we aren't interested in either the
4914 : * publication or the rel.
4915 : */
4916 700 : pubinfo = findPublicationByOid(prpubid);
4917 700 : if (pubinfo == NULL)
4918 0 : continue;
4919 700 : tbinfo = findTableByOid(prrelid);
4920 700 : if (tbinfo == NULL)
4921 0 : continue;
4922 :
4923 : /* OK, make a DumpableObject for this relationship */
4924 700 : pubrinfo[j].dobj.objType = DO_PUBLICATION_REL;
4925 700 : pubrinfo[j].dobj.catId.tableoid =
4926 700 : atooid(PQgetvalue(res, i, i_tableoid));
4927 700 : pubrinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
4928 700 : AssignDumpId(&pubrinfo[j].dobj);
4929 700 : pubrinfo[j].dobj.namespace = tbinfo->dobj.namespace;
4930 700 : pubrinfo[j].dobj.name = tbinfo->dobj.name;
4931 700 : pubrinfo[j].publication = pubinfo;
4932 700 : pubrinfo[j].pubtable = tbinfo;
4933 700 : if (PQgetisnull(res, i, i_prrelqual))
4934 388 : pubrinfo[j].pubrelqual = NULL;
4935 : else
4936 312 : pubrinfo[j].pubrelqual = pg_strdup(PQgetvalue(res, i, i_prrelqual));
4937 :
4938 700 : if (!PQgetisnull(res, i, i_prattrs))
4939 : {
4940 : char **attnames;
4941 : int nattnames;
4942 : PQExpBuffer attribs;
4943 :
4944 222 : if (!parsePGArray(PQgetvalue(res, i, i_prattrs),
4945 : &attnames, &nattnames))
4946 0 : pg_fatal("could not parse %s array", "prattrs");
4947 222 : attribs = createPQExpBuffer();
4948 638 : for (int k = 0; k < nattnames; k++)
4949 : {
4950 416 : if (k > 0)
4951 194 : appendPQExpBufferStr(attribs, ", ");
4952 :
4953 416 : appendPQExpBufferStr(attribs, fmtId(attnames[k]));
4954 : }
4955 222 : pubrinfo[j].pubrattrs = attribs->data;
4956 222 : free(attribs); /* but not attribs->data */
4957 222 : free(attnames);
4958 : }
4959 : else
4960 478 : pubrinfo[j].pubrattrs = NULL;
4961 :
4962 : /* Decide whether we want to dump it */
4963 700 : selectDumpablePublicationObject(&(pubrinfo[j].dobj), fout);
4964 :
4965 700 : j++;
4966 : }
4967 :
4968 376 : PQclear(res);
4969 376 : destroyPQExpBuffer(query);
4970 : }
4971 :
4972 : /*
4973 : * dumpPublicationNamespace
4974 : * dump the definition of the given publication schema mapping.
4975 : */
4976 : static void
4977 198 : dumpPublicationNamespace(Archive *fout, const PublicationSchemaInfo *pubsinfo)
4978 : {
4979 198 : DumpOptions *dopt = fout->dopt;
4980 198 : NamespaceInfo *schemainfo = pubsinfo->pubschema;
4981 198 : PublicationInfo *pubinfo = pubsinfo->publication;
4982 : PQExpBuffer query;
4983 : char *tag;
4984 :
4985 : /* Do nothing if not dumping schema */
4986 198 : if (!dopt->dumpSchema)
4987 24 : return;
4988 :
4989 174 : tag = psprintf("%s %s", pubinfo->dobj.name, schemainfo->dobj.name);
4990 :
4991 174 : query = createPQExpBuffer();
4992 :
4993 174 : appendPQExpBuffer(query, "ALTER PUBLICATION %s ", fmtId(pubinfo->dobj.name));
4994 174 : appendPQExpBuffer(query, "ADD TABLES IN SCHEMA %s;\n", fmtId(schemainfo->dobj.name));
4995 :
4996 : /*
4997 : * There is no point in creating drop query as the drop is done by schema
4998 : * drop.
4999 : */
5000 174 : if (pubsinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
5001 174 : ArchiveEntry(fout, pubsinfo->dobj.catId, pubsinfo->dobj.dumpId,
5002 174 : ARCHIVE_OPTS(.tag = tag,
5003 : .namespace = schemainfo->dobj.name,
5004 : .owner = pubinfo->rolname,
5005 : .description = "PUBLICATION TABLES IN SCHEMA",
5006 : .section = SECTION_POST_DATA,
5007 : .createStmt = query->data));
5008 :
5009 : /* These objects can't currently have comments or seclabels */
5010 :
5011 174 : free(tag);
5012 174 : destroyPQExpBuffer(query);
5013 : }
5014 :
5015 : /*
5016 : * dumpPublicationTable
5017 : * dump the definition of the given publication table mapping
5018 : */
5019 : static void
5020 568 : dumpPublicationTable(Archive *fout, const PublicationRelInfo *pubrinfo)
5021 : {
5022 568 : DumpOptions *dopt = fout->dopt;
5023 568 : PublicationInfo *pubinfo = pubrinfo->publication;
5024 568 : TableInfo *tbinfo = pubrinfo->pubtable;
5025 : PQExpBuffer query;
5026 : char *tag;
5027 :
5028 : /* Do nothing if not dumping schema */
5029 568 : if (!dopt->dumpSchema)
5030 84 : return;
5031 :
5032 484 : tag = psprintf("%s %s", pubinfo->dobj.name, tbinfo->dobj.name);
5033 :
5034 484 : query = createPQExpBuffer();
5035 :
5036 484 : appendPQExpBuffer(query, "ALTER PUBLICATION %s ADD TABLE ONLY",
5037 484 : fmtId(pubinfo->dobj.name));
5038 484 : appendPQExpBuffer(query, " %s",
5039 484 : fmtQualifiedDumpable(tbinfo));
5040 :
5041 484 : if (pubrinfo->pubrattrs)
5042 154 : appendPQExpBuffer(query, " (%s)", pubrinfo->pubrattrs);
5043 :
5044 484 : if (pubrinfo->pubrelqual)
5045 : {
5046 : /*
5047 : * It's necessary to add parentheses around the expression because
5048 : * pg_get_expr won't supply the parentheses for things like WHERE
5049 : * TRUE.
5050 : */
5051 216 : appendPQExpBuffer(query, " WHERE (%s)", pubrinfo->pubrelqual);
5052 : }
5053 484 : appendPQExpBufferStr(query, ";\n");
5054 :
5055 : /*
5056 : * There is no point in creating a drop query as the drop is done by table
5057 : * drop. (If you think to change this, see also _printTocEntry().)
5058 : * Although this object doesn't really have ownership as such, set the
5059 : * owner field anyway to ensure that the command is run by the correct
5060 : * role at restore time.
5061 : */
5062 484 : if (pubrinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
5063 484 : ArchiveEntry(fout, pubrinfo->dobj.catId, pubrinfo->dobj.dumpId,
5064 484 : ARCHIVE_OPTS(.tag = tag,
5065 : .namespace = tbinfo->dobj.namespace->dobj.name,
5066 : .owner = pubinfo->rolname,
5067 : .description = "PUBLICATION TABLE",
5068 : .section = SECTION_POST_DATA,
5069 : .createStmt = query->data));
5070 :
5071 : /* These objects can't currently have comments or seclabels */
5072 :
5073 484 : free(tag);
5074 484 : destroyPQExpBuffer(query);
5075 : }
5076 :
5077 : /*
5078 : * Is the currently connected user a superuser?
5079 : */
5080 : static bool
5081 374 : is_superuser(Archive *fout)
5082 : {
5083 374 : ArchiveHandle *AH = (ArchiveHandle *) fout;
5084 : const char *val;
5085 :
5086 374 : val = PQparameterStatus(AH->connection, "is_superuser");
5087 :
5088 374 : if (val && strcmp(val, "on") == 0)
5089 368 : return true;
5090 :
5091 6 : return false;
5092 : }
5093 :
5094 : /*
5095 : * Set the given value to restrict_nonsystem_relation_kind value. Since
5096 : * restrict_nonsystem_relation_kind is introduced in minor version releases,
5097 : * the setting query is effective only where available.
5098 : */
5099 : static void
5100 444 : set_restrict_relation_kind(Archive *AH, const char *value)
5101 : {
5102 444 : PQExpBuffer query = createPQExpBuffer();
5103 : PGresult *res;
5104 :
5105 444 : appendPQExpBuffer(query,
5106 : "SELECT set_config(name, '%s', false) "
5107 : "FROM pg_settings "
5108 : "WHERE name = 'restrict_nonsystem_relation_kind'",
5109 : value);
5110 444 : res = ExecuteSqlQuery(AH, query->data, PGRES_TUPLES_OK);
5111 :
5112 444 : PQclear(res);
5113 444 : destroyPQExpBuffer(query);
5114 444 : }
5115 :
5116 : /*
5117 : * getSubscriptions
5118 : * get information about subscriptions
5119 : */
5120 : void
5121 376 : getSubscriptions(Archive *fout)
5122 : {
5123 376 : DumpOptions *dopt = fout->dopt;
5124 : PQExpBuffer query;
5125 : PGresult *res;
5126 : SubscriptionInfo *subinfo;
5127 : int i_tableoid;
5128 : int i_oid;
5129 : int i_subname;
5130 : int i_subowner;
5131 : int i_subbinary;
5132 : int i_substream;
5133 : int i_subtwophasestate;
5134 : int i_subdisableonerr;
5135 : int i_subpasswordrequired;
5136 : int i_subrunasowner;
5137 : int i_subconninfo;
5138 : int i_subslotname;
5139 : int i_subsynccommit;
5140 : int i_subpublications;
5141 : int i_suborigin;
5142 : int i_suboriginremotelsn;
5143 : int i_subenabled;
5144 : int i_subfailover;
5145 : int i_subretaindeadtuples;
5146 : int i_submaxretention;
5147 : int i,
5148 : ntups;
5149 :
5150 376 : if (dopt->no_subscriptions || fout->remoteVersion < 100000)
5151 2 : return;
5152 :
5153 374 : if (!is_superuser(fout))
5154 : {
5155 : int n;
5156 :
5157 6 : res = ExecuteSqlQuery(fout,
5158 : "SELECT count(*) FROM pg_subscription "
5159 : "WHERE subdbid = (SELECT oid FROM pg_database"
5160 : " WHERE datname = current_database())",
5161 : PGRES_TUPLES_OK);
5162 6 : n = atoi(PQgetvalue(res, 0, 0));
5163 6 : if (n > 0)
5164 4 : pg_log_warning("subscriptions not dumped because current user is not a superuser");
5165 6 : PQclear(res);
5166 6 : return;
5167 : }
5168 :
5169 368 : query = createPQExpBuffer();
5170 :
5171 : /* Get the subscriptions in current database. */
5172 368 : appendPQExpBufferStr(query,
5173 : "SELECT s.tableoid, s.oid, s.subname,\n"
5174 : " s.subowner,\n"
5175 : " s.subconninfo, s.subslotname, s.subsynccommit,\n"
5176 : " s.subpublications,\n");
5177 :
5178 368 : if (fout->remoteVersion >= 140000)
5179 368 : appendPQExpBufferStr(query, " s.subbinary,\n");
5180 : else
5181 0 : appendPQExpBufferStr(query, " false AS subbinary,\n");
5182 :
5183 368 : if (fout->remoteVersion >= 140000)
5184 368 : appendPQExpBufferStr(query, " s.substream,\n");
5185 : else
5186 0 : appendPQExpBufferStr(query, " 'f' AS substream,\n");
5187 :
5188 368 : if (fout->remoteVersion >= 150000)
5189 368 : appendPQExpBufferStr(query,
5190 : " s.subtwophasestate,\n"
5191 : " s.subdisableonerr,\n");
5192 : else
5193 0 : appendPQExpBuffer(query,
5194 : " '%c' AS subtwophasestate,\n"
5195 : " false AS subdisableonerr,\n",
5196 : LOGICALREP_TWOPHASE_STATE_DISABLED);
5197 :
5198 368 : if (fout->remoteVersion >= 160000)
5199 368 : appendPQExpBufferStr(query,
5200 : " s.subpasswordrequired,\n"
5201 : " s.subrunasowner,\n"
5202 : " s.suborigin,\n");
5203 : else
5204 0 : appendPQExpBuffer(query,
5205 : " 't' AS subpasswordrequired,\n"
5206 : " 't' AS subrunasowner,\n"
5207 : " '%s' AS suborigin,\n",
5208 : LOGICALREP_ORIGIN_ANY);
5209 :
5210 368 : if (dopt->binary_upgrade && fout->remoteVersion >= 170000)
5211 76 : appendPQExpBufferStr(query, " o.remote_lsn AS suboriginremotelsn,\n"
5212 : " s.subenabled,\n");
5213 : else
5214 292 : appendPQExpBufferStr(query, " NULL AS suboriginremotelsn,\n"
5215 : " false AS subenabled,\n");
5216 :
5217 368 : if (fout->remoteVersion >= 170000)
5218 368 : appendPQExpBufferStr(query,
5219 : " s.subfailover,\n");
5220 : else
5221 0 : appendPQExpBufferStr(query,
5222 : " false AS subfailover,\n");
5223 :
5224 368 : if (fout->remoteVersion >= 190000)
5225 368 : appendPQExpBufferStr(query,
5226 : " s.subretaindeadtuples,\n");
5227 : else
5228 0 : appendPQExpBufferStr(query,
5229 : " false AS subretaindeadtuples,\n");
5230 :
5231 368 : if (fout->remoteVersion >= 190000)
5232 368 : appendPQExpBufferStr(query,
5233 : " s.submaxretention\n");
5234 : else
5235 0 : appendPQExpBuffer(query,
5236 : " 0 AS submaxretention\n");
5237 :
5238 368 : appendPQExpBufferStr(query,
5239 : "FROM pg_subscription s\n");
5240 :
5241 368 : if (dopt->binary_upgrade && fout->remoteVersion >= 170000)
5242 76 : appendPQExpBufferStr(query,
5243 : "LEFT JOIN pg_catalog.pg_replication_origin_status o \n"
5244 : " ON o.external_id = 'pg_' || s.oid::text \n");
5245 :
5246 368 : appendPQExpBufferStr(query,
5247 : "WHERE s.subdbid = (SELECT oid FROM pg_database\n"
5248 : " WHERE datname = current_database())");
5249 :
5250 368 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5251 :
5252 368 : ntups = PQntuples(res);
5253 :
5254 : /*
5255 : * Get subscription fields. We don't include subskiplsn in the dump as
5256 : * after restoring the dump this value may no longer be relevant.
5257 : */
5258 368 : i_tableoid = PQfnumber(res, "tableoid");
5259 368 : i_oid = PQfnumber(res, "oid");
5260 368 : i_subname = PQfnumber(res, "subname");
5261 368 : i_subowner = PQfnumber(res, "subowner");
5262 368 : i_subenabled = PQfnumber(res, "subenabled");
5263 368 : i_subbinary = PQfnumber(res, "subbinary");
5264 368 : i_substream = PQfnumber(res, "substream");
5265 368 : i_subtwophasestate = PQfnumber(res, "subtwophasestate");
5266 368 : i_subdisableonerr = PQfnumber(res, "subdisableonerr");
5267 368 : i_subpasswordrequired = PQfnumber(res, "subpasswordrequired");
5268 368 : i_subrunasowner = PQfnumber(res, "subrunasowner");
5269 368 : i_subfailover = PQfnumber(res, "subfailover");
5270 368 : i_subretaindeadtuples = PQfnumber(res, "subretaindeadtuples");
5271 368 : i_submaxretention = PQfnumber(res, "submaxretention");
5272 368 : i_subconninfo = PQfnumber(res, "subconninfo");
5273 368 : i_subslotname = PQfnumber(res, "subslotname");
5274 368 : i_subsynccommit = PQfnumber(res, "subsynccommit");
5275 368 : i_subpublications = PQfnumber(res, "subpublications");
5276 368 : i_suborigin = PQfnumber(res, "suborigin");
5277 368 : i_suboriginremotelsn = PQfnumber(res, "suboriginremotelsn");
5278 :
5279 368 : subinfo = pg_malloc(ntups * sizeof(SubscriptionInfo));
5280 :
5281 624 : for (i = 0; i < ntups; i++)
5282 : {
5283 256 : subinfo[i].dobj.objType = DO_SUBSCRIPTION;
5284 256 : subinfo[i].dobj.catId.tableoid =
5285 256 : atooid(PQgetvalue(res, i, i_tableoid));
5286 256 : subinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
5287 256 : AssignDumpId(&subinfo[i].dobj);
5288 256 : subinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_subname));
5289 256 : subinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_subowner));
5290 :
5291 256 : subinfo[i].subenabled =
5292 256 : (strcmp(PQgetvalue(res, i, i_subenabled), "t") == 0);
5293 256 : subinfo[i].subbinary =
5294 256 : (strcmp(PQgetvalue(res, i, i_subbinary), "t") == 0);
5295 256 : subinfo[i].substream = *(PQgetvalue(res, i, i_substream));
5296 256 : subinfo[i].subtwophasestate = *(PQgetvalue(res, i, i_subtwophasestate));
5297 256 : subinfo[i].subdisableonerr =
5298 256 : (strcmp(PQgetvalue(res, i, i_subdisableonerr), "t") == 0);
5299 256 : subinfo[i].subpasswordrequired =
5300 256 : (strcmp(PQgetvalue(res, i, i_subpasswordrequired), "t") == 0);
5301 256 : subinfo[i].subrunasowner =
5302 256 : (strcmp(PQgetvalue(res, i, i_subrunasowner), "t") == 0);
5303 256 : subinfo[i].subfailover =
5304 256 : (strcmp(PQgetvalue(res, i, i_subfailover), "t") == 0);
5305 256 : subinfo[i].subretaindeadtuples =
5306 256 : (strcmp(PQgetvalue(res, i, i_subretaindeadtuples), "t") == 0);
5307 256 : subinfo[i].submaxretention =
5308 256 : atoi(PQgetvalue(res, i, i_submaxretention));
5309 512 : subinfo[i].subconninfo =
5310 256 : pg_strdup(PQgetvalue(res, i, i_subconninfo));
5311 256 : if (PQgetisnull(res, i, i_subslotname))
5312 0 : subinfo[i].subslotname = NULL;
5313 : else
5314 256 : subinfo[i].subslotname =
5315 256 : pg_strdup(PQgetvalue(res, i, i_subslotname));
5316 512 : subinfo[i].subsynccommit =
5317 256 : pg_strdup(PQgetvalue(res, i, i_subsynccommit));
5318 512 : subinfo[i].subpublications =
5319 256 : pg_strdup(PQgetvalue(res, i, i_subpublications));
5320 256 : subinfo[i].suborigin = pg_strdup(PQgetvalue(res, i, i_suborigin));
5321 256 : if (PQgetisnull(res, i, i_suboriginremotelsn))
5322 254 : subinfo[i].suboriginremotelsn = NULL;
5323 : else
5324 2 : subinfo[i].suboriginremotelsn =
5325 2 : pg_strdup(PQgetvalue(res, i, i_suboriginremotelsn));
5326 :
5327 : /* Decide whether we want to dump it */
5328 256 : selectDumpableObject(&(subinfo[i].dobj), fout);
5329 : }
5330 368 : PQclear(res);
5331 :
5332 368 : destroyPQExpBuffer(query);
5333 : }
5334 :
5335 : /*
5336 : * getSubscriptionRelations
5337 : * Get information about subscription membership for dumpable relations. This
5338 : * will be used only in binary-upgrade mode for PG17 or later versions.
5339 : */
5340 : void
5341 376 : getSubscriptionRelations(Archive *fout)
5342 : {
5343 376 : DumpOptions *dopt = fout->dopt;
5344 376 : SubscriptionInfo *subinfo = NULL;
5345 : SubRelInfo *subrinfo;
5346 : PGresult *res;
5347 : int i_srsubid;
5348 : int i_srrelid;
5349 : int i_srsubstate;
5350 : int i_srsublsn;
5351 : int ntups;
5352 376 : Oid last_srsubid = InvalidOid;
5353 :
5354 376 : if (dopt->no_subscriptions || !dopt->binary_upgrade ||
5355 76 : fout->remoteVersion < 170000)
5356 300 : return;
5357 :
5358 76 : res = ExecuteSqlQuery(fout,
5359 : "SELECT srsubid, srrelid, srsubstate, srsublsn "
5360 : "FROM pg_catalog.pg_subscription_rel "
5361 : "ORDER BY srsubid",
5362 : PGRES_TUPLES_OK);
5363 76 : ntups = PQntuples(res);
5364 76 : if (ntups == 0)
5365 74 : goto cleanup;
5366 :
5367 : /* Get pg_subscription_rel attributes */
5368 2 : i_srsubid = PQfnumber(res, "srsubid");
5369 2 : i_srrelid = PQfnumber(res, "srrelid");
5370 2 : i_srsubstate = PQfnumber(res, "srsubstate");
5371 2 : i_srsublsn = PQfnumber(res, "srsublsn");
5372 :
5373 2 : subrinfo = pg_malloc(ntups * sizeof(SubRelInfo));
5374 8 : for (int i = 0; i < ntups; i++)
5375 : {
5376 6 : Oid cur_srsubid = atooid(PQgetvalue(res, i, i_srsubid));
5377 6 : Oid relid = atooid(PQgetvalue(res, i, i_srrelid));
5378 : TableInfo *tblinfo;
5379 :
5380 : /*
5381 : * If we switched to a new subscription, check if the subscription
5382 : * exists.
5383 : */
5384 6 : if (cur_srsubid != last_srsubid)
5385 : {
5386 4 : subinfo = findSubscriptionByOid(cur_srsubid);
5387 4 : if (subinfo == NULL)
5388 0 : pg_fatal("subscription with OID %u does not exist", cur_srsubid);
5389 :
5390 4 : last_srsubid = cur_srsubid;
5391 : }
5392 :
5393 6 : tblinfo = findTableByOid(relid);
5394 6 : if (tblinfo == NULL)
5395 0 : pg_fatal("failed sanity check, relation with OID %u not found",
5396 : relid);
5397 :
5398 : /* OK, make a DumpableObject for this relationship */
5399 6 : subrinfo[i].dobj.objType = DO_SUBSCRIPTION_REL;
5400 6 : subrinfo[i].dobj.catId.tableoid = relid;
5401 6 : subrinfo[i].dobj.catId.oid = cur_srsubid;
5402 6 : AssignDumpId(&subrinfo[i].dobj);
5403 6 : subrinfo[i].dobj.namespace = tblinfo->dobj.namespace;
5404 6 : subrinfo[i].dobj.name = tblinfo->dobj.name;
5405 6 : subrinfo[i].subinfo = subinfo;
5406 6 : subrinfo[i].tblinfo = tblinfo;
5407 6 : subrinfo[i].srsubstate = PQgetvalue(res, i, i_srsubstate)[0];
5408 6 : if (PQgetisnull(res, i, i_srsublsn))
5409 2 : subrinfo[i].srsublsn = NULL;
5410 : else
5411 4 : subrinfo[i].srsublsn = pg_strdup(PQgetvalue(res, i, i_srsublsn));
5412 :
5413 : /* Decide whether we want to dump it */
5414 6 : selectDumpableObject(&(subrinfo[i].dobj), fout);
5415 : }
5416 :
5417 2 : cleanup:
5418 76 : PQclear(res);
5419 : }
5420 :
5421 : /*
5422 : * dumpSubscriptionTable
5423 : * Dump the definition of the given subscription table mapping. This will be
5424 : * used only in binary-upgrade mode for PG17 or later versions.
5425 : */
5426 : static void
5427 6 : dumpSubscriptionTable(Archive *fout, const SubRelInfo *subrinfo)
5428 : {
5429 6 : DumpOptions *dopt = fout->dopt;
5430 6 : SubscriptionInfo *subinfo = subrinfo->subinfo;
5431 : PQExpBuffer query;
5432 : char *tag;
5433 :
5434 : /* Do nothing if not dumping schema */
5435 6 : if (!dopt->dumpSchema)
5436 0 : return;
5437 :
5438 : Assert(fout->dopt->binary_upgrade && fout->remoteVersion >= 170000);
5439 :
5440 6 : tag = psprintf("%s %s", subinfo->dobj.name, subrinfo->tblinfo->dobj.name);
5441 :
5442 6 : query = createPQExpBuffer();
5443 :
5444 6 : if (subinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
5445 : {
5446 : /*
5447 : * binary_upgrade_add_sub_rel_state will add the subscription relation
5448 : * to pg_subscription_rel table. This will be used only in
5449 : * binary-upgrade mode.
5450 : */
5451 6 : appendPQExpBufferStr(query,
5452 : "\n-- For binary upgrade, must preserve the subscriber table.\n");
5453 6 : appendPQExpBufferStr(query,
5454 : "SELECT pg_catalog.binary_upgrade_add_sub_rel_state(");
5455 6 : appendStringLiteralAH(query, subinfo->dobj.name, fout);
5456 6 : appendPQExpBuffer(query,
5457 : ", %u, '%c'",
5458 6 : subrinfo->tblinfo->dobj.catId.oid,
5459 6 : subrinfo->srsubstate);
5460 :
5461 6 : if (subrinfo->srsublsn && subrinfo->srsublsn[0] != '\0')
5462 4 : appendPQExpBuffer(query, ", '%s'", subrinfo->srsublsn);
5463 : else
5464 2 : appendPQExpBufferStr(query, ", NULL");
5465 :
5466 6 : appendPQExpBufferStr(query, ");\n");
5467 : }
5468 :
5469 : /*
5470 : * There is no point in creating a drop query as the drop is done by table
5471 : * drop. (If you think to change this, see also _printTocEntry().)
5472 : * Although this object doesn't really have ownership as such, set the
5473 : * owner field anyway to ensure that the command is run by the correct
5474 : * role at restore time.
5475 : */
5476 6 : if (subrinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
5477 6 : ArchiveEntry(fout, subrinfo->dobj.catId, subrinfo->dobj.dumpId,
5478 6 : ARCHIVE_OPTS(.tag = tag,
5479 : .namespace = subrinfo->tblinfo->dobj.namespace->dobj.name,
5480 : .owner = subinfo->rolname,
5481 : .description = "SUBSCRIPTION TABLE",
5482 : .section = SECTION_POST_DATA,
5483 : .createStmt = query->data));
5484 :
5485 : /* These objects can't currently have comments or seclabels */
5486 :
5487 6 : free(tag);
5488 6 : destroyPQExpBuffer(query);
5489 : }
5490 :
5491 : /*
5492 : * dumpSubscription
5493 : * dump the definition of the given subscription
5494 : */
5495 : static void
5496 220 : dumpSubscription(Archive *fout, const SubscriptionInfo *subinfo)
5497 : {
5498 220 : DumpOptions *dopt = fout->dopt;
5499 : PQExpBuffer delq;
5500 : PQExpBuffer query;
5501 : PQExpBuffer publications;
5502 : char *qsubname;
5503 220 : char **pubnames = NULL;
5504 220 : int npubnames = 0;
5505 : int i;
5506 :
5507 : /* Do nothing if not dumping schema */
5508 220 : if (!dopt->dumpSchema)
5509 36 : return;
5510 :
5511 184 : delq = createPQExpBuffer();
5512 184 : query = createPQExpBuffer();
5513 :
5514 184 : qsubname = pg_strdup(fmtId(subinfo->dobj.name));
5515 :
5516 184 : appendPQExpBuffer(delq, "DROP SUBSCRIPTION %s;\n",
5517 : qsubname);
5518 :
5519 184 : appendPQExpBuffer(query, "CREATE SUBSCRIPTION %s CONNECTION ",
5520 : qsubname);
5521 184 : appendStringLiteralAH(query, subinfo->subconninfo, fout);
5522 :
5523 : /* Build list of quoted publications and append them to query. */
5524 184 : if (!parsePGArray(subinfo->subpublications, &pubnames, &npubnames))
5525 0 : pg_fatal("could not parse %s array", "subpublications");
5526 :
5527 184 : publications = createPQExpBuffer();
5528 368 : for (i = 0; i < npubnames; i++)
5529 : {
5530 184 : if (i > 0)
5531 0 : appendPQExpBufferStr(publications, ", ");
5532 :
5533 184 : appendPQExpBufferStr(publications, fmtId(pubnames[i]));
5534 : }
5535 :
5536 184 : appendPQExpBuffer(query, " PUBLICATION %s WITH (connect = false, slot_name = ", publications->data);
5537 184 : if (subinfo->subslotname)
5538 184 : appendStringLiteralAH(query, subinfo->subslotname, fout);
5539 : else
5540 0 : appendPQExpBufferStr(query, "NONE");
5541 :
5542 184 : if (subinfo->subbinary)
5543 0 : appendPQExpBufferStr(query, ", binary = true");
5544 :
5545 184 : if (subinfo->substream == LOGICALREP_STREAM_ON)
5546 60 : appendPQExpBufferStr(query, ", streaming = on");
5547 124 : else if (subinfo->substream == LOGICALREP_STREAM_PARALLEL)
5548 64 : appendPQExpBufferStr(query, ", streaming = parallel");
5549 : else
5550 60 : appendPQExpBufferStr(query, ", streaming = off");
5551 :
5552 184 : if (subinfo->subtwophasestate != LOGICALREP_TWOPHASE_STATE_DISABLED)
5553 0 : appendPQExpBufferStr(query, ", two_phase = on");
5554 :
5555 184 : if (subinfo->subdisableonerr)
5556 0 : appendPQExpBufferStr(query, ", disable_on_error = true");
5557 :
5558 184 : if (!subinfo->subpasswordrequired)
5559 0 : appendPQExpBufferStr(query, ", password_required = false");
5560 :
5561 184 : if (subinfo->subrunasowner)
5562 0 : appendPQExpBufferStr(query, ", run_as_owner = true");
5563 :
5564 184 : if (subinfo->subfailover)
5565 2 : appendPQExpBufferStr(query, ", failover = true");
5566 :
5567 184 : if (subinfo->subretaindeadtuples)
5568 2 : appendPQExpBufferStr(query, ", retain_dead_tuples = true");
5569 :
5570 184 : if (subinfo->submaxretention)
5571 0 : appendPQExpBuffer(query, ", max_retention_duration = %d", subinfo->submaxretention);
5572 :
5573 184 : if (strcmp(subinfo->subsynccommit, "off") != 0)
5574 0 : appendPQExpBuffer(query, ", synchronous_commit = %s", fmtId(subinfo->subsynccommit));
5575 :
5576 184 : if (pg_strcasecmp(subinfo->suborigin, LOGICALREP_ORIGIN_ANY) != 0)
5577 60 : appendPQExpBuffer(query, ", origin = %s", subinfo->suborigin);
5578 :
5579 184 : appendPQExpBufferStr(query, ");\n");
5580 :
5581 : /*
5582 : * In binary-upgrade mode, we allow the replication to continue after the
5583 : * upgrade.
5584 : */
5585 184 : if (dopt->binary_upgrade && fout->remoteVersion >= 170000)
5586 : {
5587 10 : if (subinfo->suboriginremotelsn)
5588 : {
5589 : /*
5590 : * Preserve the remote_lsn for the subscriber's replication
5591 : * origin. This value is required to start the replication from
5592 : * the position before the upgrade. This value will be stale if
5593 : * the publisher gets upgraded before the subscriber node.
5594 : * However, this shouldn't be a problem as the upgrade of the
5595 : * publisher ensures that all the transactions were replicated
5596 : * before upgrading it.
5597 : */
5598 2 : appendPQExpBufferStr(query,
5599 : "\n-- For binary upgrade, must preserve the remote_lsn for the subscriber's replication origin.\n");
5600 2 : appendPQExpBufferStr(query,
5601 : "SELECT pg_catalog.binary_upgrade_replorigin_advance(");
5602 2 : appendStringLiteralAH(query, subinfo->dobj.name, fout);
5603 2 : appendPQExpBuffer(query, ", '%s');\n", subinfo->suboriginremotelsn);
5604 : }
5605 :
5606 10 : if (subinfo->subenabled)
5607 : {
5608 : /*
5609 : * Enable the subscription to allow the replication to continue
5610 : * after the upgrade.
5611 : */
5612 2 : appendPQExpBufferStr(query,
5613 : "\n-- For binary upgrade, must preserve the subscriber's running state.\n");
5614 2 : appendPQExpBuffer(query, "ALTER SUBSCRIPTION %s ENABLE;\n", qsubname);
5615 : }
5616 : }
5617 :
5618 184 : if (subinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
5619 184 : ArchiveEntry(fout, subinfo->dobj.catId, subinfo->dobj.dumpId,
5620 184 : ARCHIVE_OPTS(.tag = subinfo->dobj.name,
5621 : .owner = subinfo->rolname,
5622 : .description = "SUBSCRIPTION",
5623 : .section = SECTION_POST_DATA,
5624 : .createStmt = query->data,
5625 : .dropStmt = delq->data));
5626 :
5627 184 : if (subinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
5628 60 : dumpComment(fout, "SUBSCRIPTION", qsubname,
5629 60 : NULL, subinfo->rolname,
5630 60 : subinfo->dobj.catId, 0, subinfo->dobj.dumpId);
5631 :
5632 184 : if (subinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
5633 0 : dumpSecLabel(fout, "SUBSCRIPTION", qsubname,
5634 0 : NULL, subinfo->rolname,
5635 0 : subinfo->dobj.catId, 0, subinfo->dobj.dumpId);
5636 :
5637 184 : destroyPQExpBuffer(publications);
5638 184 : free(pubnames);
5639 :
5640 184 : destroyPQExpBuffer(delq);
5641 184 : destroyPQExpBuffer(query);
5642 184 : free(qsubname);
5643 : }
5644 :
5645 : /*
5646 : * Given a "create query", append as many ALTER ... DEPENDS ON EXTENSION as
5647 : * the object needs.
5648 : */
5649 : static void
5650 10152 : append_depends_on_extension(Archive *fout,
5651 : PQExpBuffer create,
5652 : const DumpableObject *dobj,
5653 : const char *catalog,
5654 : const char *keyword,
5655 : const char *objname)
5656 : {
5657 10152 : if (dobj->depends_on_ext)
5658 : {
5659 : char *nm;
5660 : PGresult *res;
5661 : PQExpBuffer query;
5662 : int ntups;
5663 : int i_extname;
5664 : int i;
5665 :
5666 : /* dodge fmtId() non-reentrancy */
5667 84 : nm = pg_strdup(objname);
5668 :
5669 84 : query = createPQExpBuffer();
5670 84 : appendPQExpBuffer(query,
5671 : "SELECT e.extname "
5672 : "FROM pg_catalog.pg_depend d, pg_catalog.pg_extension e "
5673 : "WHERE d.refobjid = e.oid AND classid = '%s'::pg_catalog.regclass "
5674 : "AND objid = '%u'::pg_catalog.oid AND deptype = 'x' "
5675 : "AND refclassid = 'pg_catalog.pg_extension'::pg_catalog.regclass",
5676 : catalog,
5677 84 : dobj->catId.oid);
5678 84 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
5679 84 : ntups = PQntuples(res);
5680 84 : i_extname = PQfnumber(res, "extname");
5681 168 : for (i = 0; i < ntups; i++)
5682 : {
5683 84 : appendPQExpBuffer(create, "\nALTER %s %s DEPENDS ON EXTENSION %s;",
5684 : keyword, nm,
5685 84 : fmtId(PQgetvalue(res, i, i_extname)));
5686 : }
5687 :
5688 84 : PQclear(res);
5689 84 : destroyPQExpBuffer(query);
5690 84 : pg_free(nm);
5691 : }
5692 10152 : }
5693 :
5694 : static Oid
5695 0 : get_next_possible_free_pg_type_oid(Archive *fout, PQExpBuffer upgrade_query)
5696 : {
5697 : /*
5698 : * If the old version didn't assign an array type, but the new version
5699 : * does, we must select an unused type OID to assign. This currently only
5700 : * happens for domains, when upgrading pre-v11 to v11 and up.
5701 : *
5702 : * Note: local state here is kind of ugly, but we must have some, since we
5703 : * mustn't choose the same unused OID more than once.
5704 : */
5705 : static Oid next_possible_free_oid = FirstNormalObjectId;
5706 : PGresult *res;
5707 : bool is_dup;
5708 :
5709 : do
5710 : {
5711 0 : ++next_possible_free_oid;
5712 0 : printfPQExpBuffer(upgrade_query,
5713 : "SELECT EXISTS(SELECT 1 "
5714 : "FROM pg_catalog.pg_type "
5715 : "WHERE oid = '%u'::pg_catalog.oid);",
5716 : next_possible_free_oid);
5717 0 : res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
5718 0 : is_dup = (PQgetvalue(res, 0, 0)[0] == 't');
5719 0 : PQclear(res);
5720 0 : } while (is_dup);
5721 :
5722 0 : return next_possible_free_oid;
5723 : }
5724 :
5725 : static void
5726 1894 : binary_upgrade_set_type_oids_by_type_oid(Archive *fout,
5727 : PQExpBuffer upgrade_buffer,
5728 : Oid pg_type_oid,
5729 : bool force_array_type,
5730 : bool include_multirange_type)
5731 : {
5732 1894 : PQExpBuffer upgrade_query = createPQExpBuffer();
5733 : PGresult *res;
5734 : Oid pg_type_array_oid;
5735 : Oid pg_type_multirange_oid;
5736 : Oid pg_type_multirange_array_oid;
5737 : TypeInfo *tinfo;
5738 :
5739 1894 : appendPQExpBufferStr(upgrade_buffer, "\n-- For binary upgrade, must preserve pg_type oid\n");
5740 1894 : appendPQExpBuffer(upgrade_buffer,
5741 : "SELECT pg_catalog.binary_upgrade_set_next_pg_type_oid('%u'::pg_catalog.oid);\n\n",
5742 : pg_type_oid);
5743 :
5744 1894 : tinfo = findTypeByOid(pg_type_oid);
5745 1894 : if (tinfo)
5746 1894 : pg_type_array_oid = tinfo->typarray;
5747 : else
5748 0 : pg_type_array_oid = InvalidOid;
5749 :
5750 1894 : if (!OidIsValid(pg_type_array_oid) && force_array_type)
5751 0 : pg_type_array_oid = get_next_possible_free_pg_type_oid(fout, upgrade_query);
5752 :
5753 1894 : if (OidIsValid(pg_type_array_oid))
5754 : {
5755 1890 : appendPQExpBufferStr(upgrade_buffer,
5756 : "\n-- For binary upgrade, must preserve pg_type array oid\n");
5757 1890 : appendPQExpBuffer(upgrade_buffer,
5758 : "SELECT pg_catalog.binary_upgrade_set_next_array_pg_type_oid('%u'::pg_catalog.oid);\n\n",
5759 : pg_type_array_oid);
5760 : }
5761 :
5762 : /*
5763 : * Pre-set the multirange type oid and its own array type oid.
5764 : */
5765 1894 : if (include_multirange_type)
5766 : {
5767 16 : if (fout->remoteVersion >= 140000)
5768 : {
5769 16 : printfPQExpBuffer(upgrade_query,
5770 : "SELECT t.oid, t.typarray "
5771 : "FROM pg_catalog.pg_type t "
5772 : "JOIN pg_catalog.pg_range r "
5773 : "ON t.oid = r.rngmultitypid "
5774 : "WHERE r.rngtypid = '%u'::pg_catalog.oid;",
5775 : pg_type_oid);
5776 :
5777 16 : res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
5778 :
5779 16 : pg_type_multirange_oid = atooid(PQgetvalue(res, 0, PQfnumber(res, "oid")));
5780 16 : pg_type_multirange_array_oid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typarray")));
5781 :
5782 16 : PQclear(res);
5783 : }
5784 : else
5785 : {
5786 0 : pg_type_multirange_oid = get_next_possible_free_pg_type_oid(fout, upgrade_query);
5787 0 : pg_type_multirange_array_oid = get_next_possible_free_pg_type_oid(fout, upgrade_query);
5788 : }
5789 :
5790 16 : appendPQExpBufferStr(upgrade_buffer,
5791 : "\n-- For binary upgrade, must preserve multirange pg_type oid\n");
5792 16 : appendPQExpBuffer(upgrade_buffer,
5793 : "SELECT pg_catalog.binary_upgrade_set_next_multirange_pg_type_oid('%u'::pg_catalog.oid);\n\n",
5794 : pg_type_multirange_oid);
5795 16 : appendPQExpBufferStr(upgrade_buffer,
5796 : "\n-- For binary upgrade, must preserve multirange pg_type array oid\n");
5797 16 : appendPQExpBuffer(upgrade_buffer,
5798 : "SELECT pg_catalog.binary_upgrade_set_next_multirange_array_pg_type_oid('%u'::pg_catalog.oid);\n\n",
5799 : pg_type_multirange_array_oid);
5800 : }
5801 :
5802 1894 : destroyPQExpBuffer(upgrade_query);
5803 1894 : }
5804 :
5805 : static void
5806 1744 : binary_upgrade_set_type_oids_by_rel(Archive *fout,
5807 : PQExpBuffer upgrade_buffer,
5808 : const TableInfo *tbinfo)
5809 : {
5810 1744 : Oid pg_type_oid = tbinfo->reltype;
5811 :
5812 1744 : if (OidIsValid(pg_type_oid))
5813 1744 : binary_upgrade_set_type_oids_by_type_oid(fout, upgrade_buffer,
5814 : pg_type_oid, false, false);
5815 1744 : }
5816 :
5817 : /*
5818 : * bsearch() comparator for BinaryUpgradeClassOidItem
5819 : */
5820 : static int
5821 24916 : BinaryUpgradeClassOidItemCmp(const void *p1, const void *p2)
5822 : {
5823 24916 : BinaryUpgradeClassOidItem v1 = *((const BinaryUpgradeClassOidItem *) p1);
5824 24916 : BinaryUpgradeClassOidItem v2 = *((const BinaryUpgradeClassOidItem *) p2);
5825 :
5826 24916 : return pg_cmp_u32(v1.oid, v2.oid);
5827 : }
5828 :
5829 : /*
5830 : * collectBinaryUpgradeClassOids
5831 : *
5832 : * Construct a table of pg_class information required for
5833 : * binary_upgrade_set_pg_class_oids(). The table is sorted by OID for speed in
5834 : * lookup.
5835 : */
5836 : static void
5837 76 : collectBinaryUpgradeClassOids(Archive *fout)
5838 : {
5839 : PGresult *res;
5840 : const char *query;
5841 :
5842 76 : query = "SELECT c.oid, c.relkind, c.relfilenode, c.reltoastrelid, "
5843 : "ct.relfilenode, i.indexrelid, cti.relfilenode "
5844 : "FROM pg_catalog.pg_class c LEFT JOIN pg_catalog.pg_index i "
5845 : "ON (c.reltoastrelid = i.indrelid AND i.indisvalid) "
5846 : "LEFT JOIN pg_catalog.pg_class ct ON (c.reltoastrelid = ct.oid) "
5847 : "LEFT JOIN pg_catalog.pg_class AS cti ON (i.indexrelid = cti.oid) "
5848 : "ORDER BY c.oid;";
5849 :
5850 76 : res = ExecuteSqlQuery(fout, query, PGRES_TUPLES_OK);
5851 :
5852 76 : nbinaryUpgradeClassOids = PQntuples(res);
5853 76 : binaryUpgradeClassOids = (BinaryUpgradeClassOidItem *)
5854 76 : pg_malloc(nbinaryUpgradeClassOids * sizeof(BinaryUpgradeClassOidItem));
5855 :
5856 35404 : for (int i = 0; i < nbinaryUpgradeClassOids; i++)
5857 : {
5858 35328 : binaryUpgradeClassOids[i].oid = atooid(PQgetvalue(res, i, 0));
5859 35328 : binaryUpgradeClassOids[i].relkind = *PQgetvalue(res, i, 1);
5860 35328 : binaryUpgradeClassOids[i].relfilenumber = atooid(PQgetvalue(res, i, 2));
5861 35328 : binaryUpgradeClassOids[i].toast_oid = atooid(PQgetvalue(res, i, 3));
5862 35328 : binaryUpgradeClassOids[i].toast_relfilenumber = atooid(PQgetvalue(res, i, 4));
5863 35328 : binaryUpgradeClassOids[i].toast_index_oid = atooid(PQgetvalue(res, i, 5));
5864 35328 : binaryUpgradeClassOids[i].toast_index_relfilenumber = atooid(PQgetvalue(res, i, 6));
5865 : }
5866 :
5867 76 : PQclear(res);
5868 76 : }
5869 :
5870 : static void
5871 2528 : binary_upgrade_set_pg_class_oids(Archive *fout,
5872 : PQExpBuffer upgrade_buffer, Oid pg_class_oid)
5873 : {
5874 2528 : BinaryUpgradeClassOidItem key = {0};
5875 : BinaryUpgradeClassOidItem *entry;
5876 :
5877 : Assert(binaryUpgradeClassOids);
5878 :
5879 : /*
5880 : * Preserve the OID and relfilenumber of the table, table's index, table's
5881 : * toast table and toast table's index if any.
5882 : *
5883 : * One complexity is that the current table definition might not require
5884 : * the creation of a TOAST table, but the old database might have a TOAST
5885 : * table that was created earlier, before some wide columns were dropped.
5886 : * By setting the TOAST oid we force creation of the TOAST heap and index
5887 : * by the new backend, so we can copy the files during binary upgrade
5888 : * without worrying about this case.
5889 : */
5890 2528 : key.oid = pg_class_oid;
5891 2528 : entry = bsearch(&key, binaryUpgradeClassOids, nbinaryUpgradeClassOids,
5892 : sizeof(BinaryUpgradeClassOidItem),
5893 : BinaryUpgradeClassOidItemCmp);
5894 :
5895 2528 : appendPQExpBufferStr(upgrade_buffer,
5896 : "\n-- For binary upgrade, must preserve pg_class oids and relfilenodes\n");
5897 :
5898 2528 : if (entry->relkind != RELKIND_INDEX &&
5899 1968 : entry->relkind != RELKIND_PARTITIONED_INDEX)
5900 : {
5901 1912 : appendPQExpBuffer(upgrade_buffer,
5902 : "SELECT pg_catalog.binary_upgrade_set_next_heap_pg_class_oid('%u'::pg_catalog.oid);\n",
5903 : pg_class_oid);
5904 :
5905 : /*
5906 : * Not every relation has storage. Also, in a pre-v12 database,
5907 : * partitioned tables have a relfilenumber, which should not be
5908 : * preserved when upgrading.
5909 : */
5910 1912 : if (RelFileNumberIsValid(entry->relfilenumber) &&
5911 1584 : entry->relkind != RELKIND_PARTITIONED_TABLE)
5912 1584 : appendPQExpBuffer(upgrade_buffer,
5913 : "SELECT pg_catalog.binary_upgrade_set_next_heap_relfilenode('%u'::pg_catalog.oid);\n",
5914 : entry->relfilenumber);
5915 :
5916 : /*
5917 : * In a pre-v12 database, partitioned tables might be marked as having
5918 : * toast tables, but we should ignore them if so.
5919 : */
5920 1912 : if (OidIsValid(entry->toast_oid) &&
5921 554 : entry->relkind != RELKIND_PARTITIONED_TABLE)
5922 : {
5923 554 : appendPQExpBuffer(upgrade_buffer,
5924 : "SELECT pg_catalog.binary_upgrade_set_next_toast_pg_class_oid('%u'::pg_catalog.oid);\n",
5925 : entry->toast_oid);
5926 554 : appendPQExpBuffer(upgrade_buffer,
5927 : "SELECT pg_catalog.binary_upgrade_set_next_toast_relfilenode('%u'::pg_catalog.oid);\n",
5928 : entry->toast_relfilenumber);
5929 :
5930 : /* every toast table has an index */
5931 554 : appendPQExpBuffer(upgrade_buffer,
5932 : "SELECT pg_catalog.binary_upgrade_set_next_index_pg_class_oid('%u'::pg_catalog.oid);\n",
5933 : entry->toast_index_oid);
5934 554 : appendPQExpBuffer(upgrade_buffer,
5935 : "SELECT pg_catalog.binary_upgrade_set_next_index_relfilenode('%u'::pg_catalog.oid);\n",
5936 : entry->toast_index_relfilenumber);
5937 : }
5938 : }
5939 : else
5940 : {
5941 : /* Preserve the OID and relfilenumber of the index */
5942 616 : appendPQExpBuffer(upgrade_buffer,
5943 : "SELECT pg_catalog.binary_upgrade_set_next_index_pg_class_oid('%u'::pg_catalog.oid);\n",
5944 : pg_class_oid);
5945 616 : appendPQExpBuffer(upgrade_buffer,
5946 : "SELECT pg_catalog.binary_upgrade_set_next_index_relfilenode('%u'::pg_catalog.oid);\n",
5947 : entry->relfilenumber);
5948 : }
5949 :
5950 2528 : appendPQExpBufferChar(upgrade_buffer, '\n');
5951 2528 : }
5952 :
5953 : /*
5954 : * If the DumpableObject is a member of an extension, add a suitable
5955 : * ALTER EXTENSION ADD command to the creation commands in upgrade_buffer.
5956 : *
5957 : * For somewhat historical reasons, objname should already be quoted,
5958 : * but not objnamespace (if any).
5959 : */
5960 : static void
5961 3028 : binary_upgrade_extension_member(PQExpBuffer upgrade_buffer,
5962 : const DumpableObject *dobj,
5963 : const char *objtype,
5964 : const char *objname,
5965 : const char *objnamespace)
5966 : {
5967 3028 : DumpableObject *extobj = NULL;
5968 : int i;
5969 :
5970 3028 : if (!dobj->ext_member)
5971 2986 : return;
5972 :
5973 : /*
5974 : * Find the parent extension. We could avoid this search if we wanted to
5975 : * add a link field to DumpableObject, but the space costs of that would
5976 : * be considerable. We assume that member objects could only have a
5977 : * direct dependency on their own extension, not any others.
5978 : */
5979 42 : for (i = 0; i < dobj->nDeps; i++)
5980 : {
5981 42 : extobj = findObjectByDumpId(dobj->dependencies[i]);
5982 42 : if (extobj && extobj->objType == DO_EXTENSION)
5983 42 : break;
5984 0 : extobj = NULL;
5985 : }
5986 42 : if (extobj == NULL)
5987 0 : pg_fatal("could not find parent extension for %s %s",
5988 : objtype, objname);
5989 :
5990 42 : appendPQExpBufferStr(upgrade_buffer,
5991 : "\n-- For binary upgrade, handle extension membership the hard way\n");
5992 42 : appendPQExpBuffer(upgrade_buffer, "ALTER EXTENSION %s ADD %s ",
5993 42 : fmtId(extobj->name),
5994 : objtype);
5995 42 : if (objnamespace && *objnamespace)
5996 36 : appendPQExpBuffer(upgrade_buffer, "%s.", fmtId(objnamespace));
5997 42 : appendPQExpBuffer(upgrade_buffer, "%s;\n", objname);
5998 : }
5999 :
6000 : /*
6001 : * getNamespaces:
6002 : * get information about all namespaces in the system catalogs
6003 : */
6004 : void
6005 378 : getNamespaces(Archive *fout)
6006 : {
6007 : PGresult *res;
6008 : int ntups;
6009 : int i;
6010 : PQExpBuffer query;
6011 : NamespaceInfo *nsinfo;
6012 : int i_tableoid;
6013 : int i_oid;
6014 : int i_nspname;
6015 : int i_nspowner;
6016 : int i_nspacl;
6017 : int i_acldefault;
6018 :
6019 378 : query = createPQExpBuffer();
6020 :
6021 : /*
6022 : * we fetch all namespaces including system ones, so that every object we
6023 : * read in can be linked to a containing namespace.
6024 : */
6025 378 : appendPQExpBufferStr(query, "SELECT n.tableoid, n.oid, n.nspname, "
6026 : "n.nspowner, "
6027 : "n.nspacl, "
6028 : "acldefault('n', n.nspowner) AS acldefault "
6029 : "FROM pg_namespace n");
6030 :
6031 378 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
6032 :
6033 378 : ntups = PQntuples(res);
6034 :
6035 378 : nsinfo = (NamespaceInfo *) pg_malloc(ntups * sizeof(NamespaceInfo));
6036 :
6037 378 : i_tableoid = PQfnumber(res, "tableoid");
6038 378 : i_oid = PQfnumber(res, "oid");
6039 378 : i_nspname = PQfnumber(res, "nspname");
6040 378 : i_nspowner = PQfnumber(res, "nspowner");
6041 378 : i_nspacl = PQfnumber(res, "nspacl");
6042 378 : i_acldefault = PQfnumber(res, "acldefault");
6043 :
6044 3278 : for (i = 0; i < ntups; i++)
6045 : {
6046 : const char *nspowner;
6047 :
6048 2900 : nsinfo[i].dobj.objType = DO_NAMESPACE;
6049 2900 : nsinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
6050 2900 : nsinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
6051 2900 : AssignDumpId(&nsinfo[i].dobj);
6052 2900 : nsinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_nspname));
6053 2900 : nsinfo[i].dacl.acl = pg_strdup(PQgetvalue(res, i, i_nspacl));
6054 2900 : nsinfo[i].dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault));
6055 2900 : nsinfo[i].dacl.privtype = 0;
6056 2900 : nsinfo[i].dacl.initprivs = NULL;
6057 2900 : nspowner = PQgetvalue(res, i, i_nspowner);
6058 2900 : nsinfo[i].nspowner = atooid(nspowner);
6059 2900 : nsinfo[i].rolname = getRoleName(nspowner);
6060 :
6061 : /* Decide whether to dump this namespace */
6062 2900 : selectDumpableNamespace(&nsinfo[i], fout);
6063 :
6064 : /* Mark whether namespace has an ACL */
6065 2900 : if (!PQgetisnull(res, i, i_nspacl))
6066 1258 : nsinfo[i].dobj.components |= DUMP_COMPONENT_ACL;
6067 :
6068 : /*
6069 : * We ignore any pg_init_privs.initprivs entry for the public schema
6070 : * and assume a predetermined default, for several reasons. First,
6071 : * dropping and recreating the schema removes its pg_init_privs entry,
6072 : * but an empty destination database starts with this ACL nonetheless.
6073 : * Second, we support dump/reload of public schema ownership changes.
6074 : * ALTER SCHEMA OWNER filters nspacl through aclnewowner(), but
6075 : * initprivs continues to reflect the initial owner. Hence,
6076 : * synthesize the value that nspacl will have after the restore's
6077 : * ALTER SCHEMA OWNER. Third, this makes the destination database
6078 : * match the source's ACL, even if the latter was an initdb-default
6079 : * ACL, which changed in v15. An upgrade pulls in changes to most
6080 : * system object ACLs that the DBA had not customized. We've made the
6081 : * public schema depart from that, because changing its ACL so easily
6082 : * breaks applications.
6083 : */
6084 2900 : if (strcmp(nsinfo[i].dobj.name, "public") == 0)
6085 : {
6086 370 : PQExpBuffer aclarray = createPQExpBuffer();
6087 370 : PQExpBuffer aclitem = createPQExpBuffer();
6088 :
6089 : /* Standard ACL as of v15 is {owner=UC/owner,=U/owner} */
6090 370 : appendPQExpBufferChar(aclarray, '{');
6091 370 : quoteAclUserName(aclitem, nsinfo[i].rolname);
6092 370 : appendPQExpBufferStr(aclitem, "=UC/");
6093 370 : quoteAclUserName(aclitem, nsinfo[i].rolname);
6094 370 : appendPGArray(aclarray, aclitem->data);
6095 370 : resetPQExpBuffer(aclitem);
6096 370 : appendPQExpBufferStr(aclitem, "=U/");
6097 370 : quoteAclUserName(aclitem, nsinfo[i].rolname);
6098 370 : appendPGArray(aclarray, aclitem->data);
6099 370 : appendPQExpBufferChar(aclarray, '}');
6100 :
6101 370 : nsinfo[i].dacl.privtype = 'i';
6102 370 : nsinfo[i].dacl.initprivs = pstrdup(aclarray->data);
6103 370 : nsinfo[i].dobj.components |= DUMP_COMPONENT_ACL;
6104 :
6105 370 : destroyPQExpBuffer(aclarray);
6106 370 : destroyPQExpBuffer(aclitem);
6107 : }
6108 : }
6109 :
6110 378 : PQclear(res);
6111 378 : destroyPQExpBuffer(query);
6112 378 : }
6113 :
6114 : /*
6115 : * findNamespace:
6116 : * given a namespace OID, look up the info read by getNamespaces
6117 : */
6118 : static NamespaceInfo *
6119 1192196 : findNamespace(Oid nsoid)
6120 : {
6121 : NamespaceInfo *nsinfo;
6122 :
6123 1192196 : nsinfo = findNamespaceByOid(nsoid);
6124 1192196 : if (nsinfo == NULL)
6125 0 : pg_fatal("schema with OID %u does not exist", nsoid);
6126 1192196 : return nsinfo;
6127 : }
6128 :
6129 : /*
6130 : * getExtensions:
6131 : * read all extensions in the system catalogs and return them in the
6132 : * ExtensionInfo* structure
6133 : *
6134 : * numExtensions is set to the number of extensions read in
6135 : */
6136 : ExtensionInfo *
6137 378 : getExtensions(Archive *fout, int *numExtensions)
6138 : {
6139 378 : DumpOptions *dopt = fout->dopt;
6140 : PGresult *res;
6141 : int ntups;
6142 : int i;
6143 : PQExpBuffer query;
6144 378 : ExtensionInfo *extinfo = NULL;
6145 : int i_tableoid;
6146 : int i_oid;
6147 : int i_extname;
6148 : int i_nspname;
6149 : int i_extrelocatable;
6150 : int i_extversion;
6151 : int i_extconfig;
6152 : int i_extcondition;
6153 :
6154 378 : query = createPQExpBuffer();
6155 :
6156 378 : appendPQExpBufferStr(query, "SELECT x.tableoid, x.oid, "
6157 : "x.extname, n.nspname, x.extrelocatable, x.extversion, x.extconfig, x.extcondition "
6158 : "FROM pg_extension x "
6159 : "JOIN pg_namespace n ON n.oid = x.extnamespace");
6160 :
6161 378 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
6162 :
6163 378 : ntups = PQntuples(res);
6164 378 : if (ntups == 0)
6165 0 : goto cleanup;
6166 :
6167 378 : extinfo = (ExtensionInfo *) pg_malloc(ntups * sizeof(ExtensionInfo));
6168 :
6169 378 : i_tableoid = PQfnumber(res, "tableoid");
6170 378 : i_oid = PQfnumber(res, "oid");
6171 378 : i_extname = PQfnumber(res, "extname");
6172 378 : i_nspname = PQfnumber(res, "nspname");
6173 378 : i_extrelocatable = PQfnumber(res, "extrelocatable");
6174 378 : i_extversion = PQfnumber(res, "extversion");
6175 378 : i_extconfig = PQfnumber(res, "extconfig");
6176 378 : i_extcondition = PQfnumber(res, "extcondition");
6177 :
6178 816 : for (i = 0; i < ntups; i++)
6179 : {
6180 438 : extinfo[i].dobj.objType = DO_EXTENSION;
6181 438 : extinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
6182 438 : extinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
6183 438 : AssignDumpId(&extinfo[i].dobj);
6184 438 : extinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_extname));
6185 438 : extinfo[i].namespace = pg_strdup(PQgetvalue(res, i, i_nspname));
6186 438 : extinfo[i].relocatable = *(PQgetvalue(res, i, i_extrelocatable)) == 't';
6187 438 : extinfo[i].extversion = pg_strdup(PQgetvalue(res, i, i_extversion));
6188 438 : extinfo[i].extconfig = pg_strdup(PQgetvalue(res, i, i_extconfig));
6189 438 : extinfo[i].extcondition = pg_strdup(PQgetvalue(res, i, i_extcondition));
6190 :
6191 : /* Decide whether we want to dump it */
6192 438 : selectDumpableExtension(&(extinfo[i]), dopt);
6193 : }
6194 :
6195 378 : cleanup:
6196 378 : PQclear(res);
6197 378 : destroyPQExpBuffer(query);
6198 :
6199 378 : *numExtensions = ntups;
6200 :
6201 378 : return extinfo;
6202 : }
6203 :
6204 : /*
6205 : * getTypes:
6206 : * get information about all types in the system catalogs
6207 : *
6208 : * NB: this must run after getFuncs() because we assume we can do
6209 : * findFuncByOid().
6210 : */
6211 : void
6212 376 : getTypes(Archive *fout)
6213 : {
6214 : PGresult *res;
6215 : int ntups;
6216 : int i;
6217 376 : PQExpBuffer query = createPQExpBuffer();
6218 : TypeInfo *tyinfo;
6219 : ShellTypeInfo *stinfo;
6220 : int i_tableoid;
6221 : int i_oid;
6222 : int i_typname;
6223 : int i_typnamespace;
6224 : int i_typacl;
6225 : int i_acldefault;
6226 : int i_typowner;
6227 : int i_typelem;
6228 : int i_typrelid;
6229 : int i_typrelkind;
6230 : int i_typtype;
6231 : int i_typisdefined;
6232 : int i_isarray;
6233 : int i_typarray;
6234 :
6235 : /*
6236 : * we include even the built-in types because those may be used as array
6237 : * elements by user-defined types
6238 : *
6239 : * we filter out the built-in types when we dump out the types
6240 : *
6241 : * same approach for undefined (shell) types and array types
6242 : *
6243 : * Note: as of 8.3 we can reliably detect whether a type is an
6244 : * auto-generated array type by checking the element type's typarray.
6245 : * (Before that the test is capable of generating false positives.) We
6246 : * still check for name beginning with '_', though, so as to avoid the
6247 : * cost of the subselect probe for all standard types. This would have to
6248 : * be revisited if the backend ever allows renaming of array types.
6249 : */
6250 376 : appendPQExpBufferStr(query, "SELECT tableoid, oid, typname, "
6251 : "typnamespace, typacl, "
6252 : "acldefault('T', typowner) AS acldefault, "
6253 : "typowner, "
6254 : "typelem, typrelid, typarray, "
6255 : "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
6256 : "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
6257 : "typtype, typisdefined, "
6258 : "typname[0] = '_' AND typelem != 0 AND "
6259 : "(SELECT typarray FROM pg_type te WHERE oid = pg_type.typelem) = oid AS isarray "
6260 : "FROM pg_type");
6261 :
6262 376 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
6263 :
6264 376 : ntups = PQntuples(res);
6265 :
6266 376 : tyinfo = (TypeInfo *) pg_malloc(ntups * sizeof(TypeInfo));
6267 :
6268 376 : i_tableoid = PQfnumber(res, "tableoid");
6269 376 : i_oid = PQfnumber(res, "oid");
6270 376 : i_typname = PQfnumber(res, "typname");
6271 376 : i_typnamespace = PQfnumber(res, "typnamespace");
6272 376 : i_typacl = PQfnumber(res, "typacl");
6273 376 : i_acldefault = PQfnumber(res, "acldefault");
6274 376 : i_typowner = PQfnumber(res, "typowner");
6275 376 : i_typelem = PQfnumber(res, "typelem");
6276 376 : i_typrelid = PQfnumber(res, "typrelid");
6277 376 : i_typrelkind = PQfnumber(res, "typrelkind");
6278 376 : i_typtype = PQfnumber(res, "typtype");
6279 376 : i_typisdefined = PQfnumber(res, "typisdefined");
6280 376 : i_isarray = PQfnumber(res, "isarray");
6281 376 : i_typarray = PQfnumber(res, "typarray");
6282 :
6283 273460 : for (i = 0; i < ntups; i++)
6284 : {
6285 273084 : tyinfo[i].dobj.objType = DO_TYPE;
6286 273084 : tyinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
6287 273084 : tyinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
6288 273084 : AssignDumpId(&tyinfo[i].dobj);
6289 273084 : tyinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_typname));
6290 546168 : tyinfo[i].dobj.namespace =
6291 273084 : findNamespace(atooid(PQgetvalue(res, i, i_typnamespace)));
6292 273084 : tyinfo[i].dacl.acl = pg_strdup(PQgetvalue(res, i, i_typacl));
6293 273084 : tyinfo[i].dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault));
6294 273084 : tyinfo[i].dacl.privtype = 0;
6295 273084 : tyinfo[i].dacl.initprivs = NULL;
6296 273084 : tyinfo[i].ftypname = NULL; /* may get filled later */
6297 273084 : tyinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_typowner));
6298 273084 : tyinfo[i].typelem = atooid(PQgetvalue(res, i, i_typelem));
6299 273084 : tyinfo[i].typrelid = atooid(PQgetvalue(res, i, i_typrelid));
6300 273084 : tyinfo[i].typrelkind = *PQgetvalue(res, i, i_typrelkind);
6301 273084 : tyinfo[i].typtype = *PQgetvalue(res, i, i_typtype);
6302 273084 : tyinfo[i].shellType = NULL;
6303 :
6304 273084 : if (strcmp(PQgetvalue(res, i, i_typisdefined), "t") == 0)
6305 272980 : tyinfo[i].isDefined = true;
6306 : else
6307 104 : tyinfo[i].isDefined = false;
6308 :
6309 273084 : if (strcmp(PQgetvalue(res, i, i_isarray), "t") == 0)
6310 131038 : tyinfo[i].isArray = true;
6311 : else
6312 142046 : tyinfo[i].isArray = false;
6313 :
6314 273084 : tyinfo[i].typarray = atooid(PQgetvalue(res, i, i_typarray));
6315 :
6316 273084 : if (tyinfo[i].typtype == TYPTYPE_MULTIRANGE)
6317 2520 : tyinfo[i].isMultirange = true;
6318 : else
6319 270564 : tyinfo[i].isMultirange = false;
6320 :
6321 : /* Decide whether we want to dump it */
6322 273084 : selectDumpableType(&tyinfo[i], fout);
6323 :
6324 : /* Mark whether type has an ACL */
6325 273084 : if (!PQgetisnull(res, i, i_typacl))
6326 410 : tyinfo[i].dobj.components |= DUMP_COMPONENT_ACL;
6327 :
6328 : /*
6329 : * If it's a domain, fetch info about its constraints, if any
6330 : */
6331 273084 : tyinfo[i].nDomChecks = 0;
6332 273084 : tyinfo[i].domChecks = NULL;
6333 273084 : tyinfo[i].notnull = NULL;
6334 273084 : if ((tyinfo[i].dobj.dump & DUMP_COMPONENT_DEFINITION) &&
6335 30126 : tyinfo[i].typtype == TYPTYPE_DOMAIN)
6336 316 : getDomainConstraints(fout, &(tyinfo[i]));
6337 :
6338 : /*
6339 : * If it's a base type, make a DumpableObject representing a shell
6340 : * definition of the type. We will need to dump that ahead of the I/O
6341 : * functions for the type. Similarly, range types need a shell
6342 : * definition in case they have a canonicalize function.
6343 : *
6344 : * Note: the shell type doesn't have a catId. You might think it
6345 : * should copy the base type's catId, but then it might capture the
6346 : * pg_depend entries for the type, which we don't want.
6347 : */
6348 273084 : if ((tyinfo[i].dobj.dump & DUMP_COMPONENT_DEFINITION) &&
6349 30126 : (tyinfo[i].typtype == TYPTYPE_BASE ||
6350 14630 : tyinfo[i].typtype == TYPTYPE_RANGE))
6351 : {
6352 15744 : stinfo = (ShellTypeInfo *) pg_malloc(sizeof(ShellTypeInfo));
6353 15744 : stinfo->dobj.objType = DO_SHELL_TYPE;
6354 15744 : stinfo->dobj.catId = nilCatalogId;
6355 15744 : AssignDumpId(&stinfo->dobj);
6356 15744 : stinfo->dobj.name = pg_strdup(tyinfo[i].dobj.name);
6357 15744 : stinfo->dobj.namespace = tyinfo[i].dobj.namespace;
6358 15744 : stinfo->baseType = &(tyinfo[i]);
6359 15744 : tyinfo[i].shellType = stinfo;
6360 :
6361 : /*
6362 : * Initially mark the shell type as not to be dumped. We'll only
6363 : * dump it if the I/O or canonicalize functions need to be dumped;
6364 : * this is taken care of while sorting dependencies.
6365 : */
6366 15744 : stinfo->dobj.dump = DUMP_COMPONENT_NONE;
6367 : }
6368 : }
6369 :
6370 376 : PQclear(res);
6371 :
6372 376 : destroyPQExpBuffer(query);
6373 376 : }
6374 :
6375 : /*
6376 : * getOperators:
6377 : * get information about all operators in the system catalogs
6378 : */
6379 : void
6380 376 : getOperators(Archive *fout)
6381 : {
6382 : PGresult *res;
6383 : int ntups;
6384 : int i;
6385 376 : PQExpBuffer query = createPQExpBuffer();
6386 : OprInfo *oprinfo;
6387 : int i_tableoid;
6388 : int i_oid;
6389 : int i_oprname;
6390 : int i_oprnamespace;
6391 : int i_oprowner;
6392 : int i_oprkind;
6393 : int i_oprleft;
6394 : int i_oprright;
6395 : int i_oprcode;
6396 :
6397 : /*
6398 : * find all operators, including builtin operators; we filter out
6399 : * system-defined operators at dump-out time.
6400 : */
6401 :
6402 376 : appendPQExpBufferStr(query, "SELECT tableoid, oid, oprname, "
6403 : "oprnamespace, "
6404 : "oprowner, "
6405 : "oprkind, "
6406 : "oprleft, "
6407 : "oprright, "
6408 : "oprcode::oid AS oprcode "
6409 : "FROM pg_operator");
6410 :
6411 376 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
6412 :
6413 376 : ntups = PQntuples(res);
6414 :
6415 376 : oprinfo = (OprInfo *) pg_malloc(ntups * sizeof(OprInfo));
6416 :
6417 376 : i_tableoid = PQfnumber(res, "tableoid");
6418 376 : i_oid = PQfnumber(res, "oid");
6419 376 : i_oprname = PQfnumber(res, "oprname");
6420 376 : i_oprnamespace = PQfnumber(res, "oprnamespace");
6421 376 : i_oprowner = PQfnumber(res, "oprowner");
6422 376 : i_oprkind = PQfnumber(res, "oprkind");
6423 376 : i_oprleft = PQfnumber(res, "oprleft");
6424 376 : i_oprright = PQfnumber(res, "oprright");
6425 376 : i_oprcode = PQfnumber(res, "oprcode");
6426 :
6427 303340 : for (i = 0; i < ntups; i++)
6428 : {
6429 302964 : oprinfo[i].dobj.objType = DO_OPERATOR;
6430 302964 : oprinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
6431 302964 : oprinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
6432 302964 : AssignDumpId(&oprinfo[i].dobj);
6433 302964 : oprinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_oprname));
6434 605928 : oprinfo[i].dobj.namespace =
6435 302964 : findNamespace(atooid(PQgetvalue(res, i, i_oprnamespace)));
6436 302964 : oprinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_oprowner));
6437 302964 : oprinfo[i].oprkind = (PQgetvalue(res, i, i_oprkind))[0];
6438 302964 : oprinfo[i].oprleft = atooid(PQgetvalue(res, i, i_oprleft));
6439 302964 : oprinfo[i].oprright = atooid(PQgetvalue(res, i, i_oprright));
6440 302964 : oprinfo[i].oprcode = atooid(PQgetvalue(res, i, i_oprcode));
6441 :
6442 : /* Decide whether we want to dump it */
6443 302964 : selectDumpableObject(&(oprinfo[i].dobj), fout);
6444 : }
6445 :
6446 376 : PQclear(res);
6447 :
6448 376 : destroyPQExpBuffer(query);
6449 376 : }
6450 :
6451 : /*
6452 : * getCollations:
6453 : * get information about all collations in the system catalogs
6454 : */
6455 : void
6456 376 : getCollations(Archive *fout)
6457 : {
6458 : PGresult *res;
6459 : int ntups;
6460 : int i;
6461 : PQExpBuffer query;
6462 : CollInfo *collinfo;
6463 : int i_tableoid;
6464 : int i_oid;
6465 : int i_collname;
6466 : int i_collnamespace;
6467 : int i_collowner;
6468 : int i_collencoding;
6469 :
6470 376 : query = createPQExpBuffer();
6471 :
6472 : /*
6473 : * find all collations, including builtin collations; we filter out
6474 : * system-defined collations at dump-out time.
6475 : */
6476 :
6477 376 : appendPQExpBufferStr(query, "SELECT tableoid, oid, collname, "
6478 : "collnamespace, "
6479 : "collowner, "
6480 : "collencoding "
6481 : "FROM pg_collation");
6482 :
6483 376 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
6484 :
6485 376 : ntups = PQntuples(res);
6486 :
6487 376 : collinfo = (CollInfo *) pg_malloc(ntups * sizeof(CollInfo));
6488 :
6489 376 : i_tableoid = PQfnumber(res, "tableoid");
6490 376 : i_oid = PQfnumber(res, "oid");
6491 376 : i_collname = PQfnumber(res, "collname");
6492 376 : i_collnamespace = PQfnumber(res, "collnamespace");
6493 376 : i_collowner = PQfnumber(res, "collowner");
6494 376 : i_collencoding = PQfnumber(res, "collencoding");
6495 :
6496 307414 : for (i = 0; i < ntups; i++)
6497 : {
6498 307038 : collinfo[i].dobj.objType = DO_COLLATION;
6499 307038 : collinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
6500 307038 : collinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
6501 307038 : AssignDumpId(&collinfo[i].dobj);
6502 307038 : collinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_collname));
6503 614076 : collinfo[i].dobj.namespace =
6504 307038 : findNamespace(atooid(PQgetvalue(res, i, i_collnamespace)));
6505 307038 : collinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_collowner));
6506 307038 : collinfo[i].collencoding = atoi(PQgetvalue(res, i, i_collencoding));
6507 :
6508 : /* Decide whether we want to dump it */
6509 307038 : selectDumpableObject(&(collinfo[i].dobj), fout);
6510 : }
6511 :
6512 376 : PQclear(res);
6513 :
6514 376 : destroyPQExpBuffer(query);
6515 376 : }
6516 :
6517 : /*
6518 : * getConversions:
6519 : * get information about all conversions in the system catalogs
6520 : */
6521 : void
6522 376 : getConversions(Archive *fout)
6523 : {
6524 : PGresult *res;
6525 : int ntups;
6526 : int i;
6527 : PQExpBuffer query;
6528 : ConvInfo *convinfo;
6529 : int i_tableoid;
6530 : int i_oid;
6531 : int i_conname;
6532 : int i_connamespace;
6533 : int i_conowner;
6534 :
6535 376 : query = createPQExpBuffer();
6536 :
6537 : /*
6538 : * find all conversions, including builtin conversions; we filter out
6539 : * system-defined conversions at dump-out time.
6540 : */
6541 :
6542 376 : appendPQExpBufferStr(query, "SELECT tableoid, oid, conname, "
6543 : "connamespace, "
6544 : "conowner "
6545 : "FROM pg_conversion");
6546 :
6547 376 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
6548 :
6549 376 : ntups = PQntuples(res);
6550 :
6551 376 : convinfo = (ConvInfo *) pg_malloc(ntups * sizeof(ConvInfo));
6552 :
6553 376 : i_tableoid = PQfnumber(res, "tableoid");
6554 376 : i_oid = PQfnumber(res, "oid");
6555 376 : i_conname = PQfnumber(res, "conname");
6556 376 : i_connamespace = PQfnumber(res, "connamespace");
6557 376 : i_conowner = PQfnumber(res, "conowner");
6558 :
6559 48594 : for (i = 0; i < ntups; i++)
6560 : {
6561 48218 : convinfo[i].dobj.objType = DO_CONVERSION;
6562 48218 : convinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
6563 48218 : convinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
6564 48218 : AssignDumpId(&convinfo[i].dobj);
6565 48218 : convinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_conname));
6566 96436 : convinfo[i].dobj.namespace =
6567 48218 : findNamespace(atooid(PQgetvalue(res, i, i_connamespace)));
6568 48218 : convinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_conowner));
6569 :
6570 : /* Decide whether we want to dump it */
6571 48218 : selectDumpableObject(&(convinfo[i].dobj), fout);
6572 : }
6573 :
6574 376 : PQclear(res);
6575 :
6576 376 : destroyPQExpBuffer(query);
6577 376 : }
6578 :
6579 : /*
6580 : * getAccessMethods:
6581 : * get information about all user-defined access methods
6582 : */
6583 : void
6584 376 : getAccessMethods(Archive *fout)
6585 : {
6586 : PGresult *res;
6587 : int ntups;
6588 : int i;
6589 : PQExpBuffer query;
6590 : AccessMethodInfo *aminfo;
6591 : int i_tableoid;
6592 : int i_oid;
6593 : int i_amname;
6594 : int i_amhandler;
6595 : int i_amtype;
6596 :
6597 376 : query = createPQExpBuffer();
6598 :
6599 : /*
6600 : * Select all access methods from pg_am table. v9.6 introduced CREATE
6601 : * ACCESS METHOD, so earlier versions usually have only built-in access
6602 : * methods. v9.6 also changed the access method API, replacing dozens of
6603 : * pg_am columns with amhandler. Even if a user created an access method
6604 : * by "INSERT INTO pg_am", we have no way to translate pre-v9.6 pg_am
6605 : * columns to a v9.6+ CREATE ACCESS METHOD. Hence, before v9.6, read
6606 : * pg_am just to facilitate findAccessMethodByOid() providing the
6607 : * OID-to-name mapping.
6608 : */
6609 376 : appendPQExpBufferStr(query, "SELECT tableoid, oid, amname, ");
6610 376 : if (fout->remoteVersion >= 90600)
6611 376 : appendPQExpBufferStr(query,
6612 : "amtype, "
6613 : "amhandler::pg_catalog.regproc AS amhandler ");
6614 : else
6615 0 : appendPQExpBufferStr(query,
6616 : "'i'::pg_catalog.\"char\" AS amtype, "
6617 : "'-'::pg_catalog.regproc AS amhandler ");
6618 376 : appendPQExpBufferStr(query, "FROM pg_am");
6619 :
6620 376 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
6621 :
6622 376 : ntups = PQntuples(res);
6623 :
6624 376 : aminfo = (AccessMethodInfo *) pg_malloc(ntups * sizeof(AccessMethodInfo));
6625 :
6626 376 : i_tableoid = PQfnumber(res, "tableoid");
6627 376 : i_oid = PQfnumber(res, "oid");
6628 376 : i_amname = PQfnumber(res, "amname");
6629 376 : i_amhandler = PQfnumber(res, "amhandler");
6630 376 : i_amtype = PQfnumber(res, "amtype");
6631 :
6632 3252 : for (i = 0; i < ntups; i++)
6633 : {
6634 2876 : aminfo[i].dobj.objType = DO_ACCESS_METHOD;
6635 2876 : aminfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
6636 2876 : aminfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
6637 2876 : AssignDumpId(&aminfo[i].dobj);
6638 2876 : aminfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_amname));
6639 2876 : aminfo[i].dobj.namespace = NULL;
6640 2876 : aminfo[i].amhandler = pg_strdup(PQgetvalue(res, i, i_amhandler));
6641 2876 : aminfo[i].amtype = *(PQgetvalue(res, i, i_amtype));
6642 :
6643 : /* Decide whether we want to dump it */
6644 2876 : selectDumpableAccessMethod(&(aminfo[i]), fout);
6645 : }
6646 :
6647 376 : PQclear(res);
6648 :
6649 376 : destroyPQExpBuffer(query);
6650 376 : }
6651 :
6652 :
6653 : /*
6654 : * getOpclasses:
6655 : * get information about all opclasses in the system catalogs
6656 : */
6657 : void
6658 376 : getOpclasses(Archive *fout)
6659 : {
6660 : PGresult *res;
6661 : int ntups;
6662 : int i;
6663 376 : PQExpBuffer query = createPQExpBuffer();
6664 : OpclassInfo *opcinfo;
6665 : int i_tableoid;
6666 : int i_oid;
6667 : int i_opcmethod;
6668 : int i_opcname;
6669 : int i_opcnamespace;
6670 : int i_opcowner;
6671 :
6672 : /*
6673 : * find all opclasses, including builtin opclasses; we filter out
6674 : * system-defined opclasses at dump-out time.
6675 : */
6676 :
6677 376 : appendPQExpBufferStr(query, "SELECT tableoid, oid, opcmethod, opcname, "
6678 : "opcnamespace, "
6679 : "opcowner "
6680 : "FROM pg_opclass");
6681 :
6682 376 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
6683 :
6684 376 : ntups = PQntuples(res);
6685 :
6686 376 : opcinfo = (OpclassInfo *) pg_malloc(ntups * sizeof(OpclassInfo));
6687 :
6688 376 : i_tableoid = PQfnumber(res, "tableoid");
6689 376 : i_oid = PQfnumber(res, "oid");
6690 376 : i_opcmethod = PQfnumber(res, "opcmethod");
6691 376 : i_opcname = PQfnumber(res, "opcname");
6692 376 : i_opcnamespace = PQfnumber(res, "opcnamespace");
6693 376 : i_opcowner = PQfnumber(res, "opcowner");
6694 :
6695 67992 : for (i = 0; i < ntups; i++)
6696 : {
6697 67616 : opcinfo[i].dobj.objType = DO_OPCLASS;
6698 67616 : opcinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
6699 67616 : opcinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
6700 67616 : AssignDumpId(&opcinfo[i].dobj);
6701 67616 : opcinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_opcname));
6702 135232 : opcinfo[i].dobj.namespace =
6703 67616 : findNamespace(atooid(PQgetvalue(res, i, i_opcnamespace)));
6704 67616 : opcinfo[i].opcmethod = atooid(PQgetvalue(res, i, i_opcmethod));
6705 67616 : opcinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_opcowner));
6706 :
6707 : /* Decide whether we want to dump it */
6708 67616 : selectDumpableObject(&(opcinfo[i].dobj), fout);
6709 : }
6710 :
6711 376 : PQclear(res);
6712 :
6713 376 : destroyPQExpBuffer(query);
6714 376 : }
6715 :
6716 : /*
6717 : * getOpfamilies:
6718 : * get information about all opfamilies in the system catalogs
6719 : */
6720 : void
6721 376 : getOpfamilies(Archive *fout)
6722 : {
6723 : PGresult *res;
6724 : int ntups;
6725 : int i;
6726 : PQExpBuffer query;
6727 : OpfamilyInfo *opfinfo;
6728 : int i_tableoid;
6729 : int i_oid;
6730 : int i_opfmethod;
6731 : int i_opfname;
6732 : int i_opfnamespace;
6733 : int i_opfowner;
6734 :
6735 376 : query = createPQExpBuffer();
6736 :
6737 : /*
6738 : * find all opfamilies, including builtin opfamilies; we filter out
6739 : * system-defined opfamilies at dump-out time.
6740 : */
6741 :
6742 376 : appendPQExpBufferStr(query, "SELECT tableoid, oid, opfmethod, opfname, "
6743 : "opfnamespace, "
6744 : "opfowner "
6745 : "FROM pg_opfamily");
6746 :
6747 376 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
6748 :
6749 376 : ntups = PQntuples(res);
6750 :
6751 376 : opfinfo = (OpfamilyInfo *) pg_malloc(ntups * sizeof(OpfamilyInfo));
6752 :
6753 376 : i_tableoid = PQfnumber(res, "tableoid");
6754 376 : i_oid = PQfnumber(res, "oid");
6755 376 : i_opfname = PQfnumber(res, "opfname");
6756 376 : i_opfmethod = PQfnumber(res, "opfmethod");
6757 376 : i_opfnamespace = PQfnumber(res, "opfnamespace");
6758 376 : i_opfowner = PQfnumber(res, "opfowner");
6759 :
6760 56302 : for (i = 0; i < ntups; i++)
6761 : {
6762 55926 : opfinfo[i].dobj.objType = DO_OPFAMILY;
6763 55926 : opfinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
6764 55926 : opfinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
6765 55926 : AssignDumpId(&opfinfo[i].dobj);
6766 55926 : opfinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_opfname));
6767 111852 : opfinfo[i].dobj.namespace =
6768 55926 : findNamespace(atooid(PQgetvalue(res, i, i_opfnamespace)));
6769 55926 : opfinfo[i].opfmethod = atooid(PQgetvalue(res, i, i_opfmethod));
6770 55926 : opfinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_opfowner));
6771 :
6772 : /* Decide whether we want to dump it */
6773 55926 : selectDumpableObject(&(opfinfo[i].dobj), fout);
6774 : }
6775 :
6776 376 : PQclear(res);
6777 :
6778 376 : destroyPQExpBuffer(query);
6779 376 : }
6780 :
6781 : /*
6782 : * getAggregates:
6783 : * get information about all user-defined aggregates in the system catalogs
6784 : */
6785 : void
6786 376 : getAggregates(Archive *fout)
6787 : {
6788 376 : DumpOptions *dopt = fout->dopt;
6789 : PGresult *res;
6790 : int ntups;
6791 : int i;
6792 376 : PQExpBuffer query = createPQExpBuffer();
6793 : AggInfo *agginfo;
6794 : int i_tableoid;
6795 : int i_oid;
6796 : int i_aggname;
6797 : int i_aggnamespace;
6798 : int i_pronargs;
6799 : int i_proargtypes;
6800 : int i_proowner;
6801 : int i_aggacl;
6802 : int i_acldefault;
6803 :
6804 : /*
6805 : * Find all interesting aggregates. See comment in getFuncs() for the
6806 : * rationale behind the filtering logic.
6807 : */
6808 376 : if (fout->remoteVersion >= 90600)
6809 : {
6810 : const char *agg_check;
6811 :
6812 752 : agg_check = (fout->remoteVersion >= 110000 ? "p.prokind = 'a'"
6813 376 : : "p.proisagg");
6814 :
6815 376 : appendPQExpBuffer(query, "SELECT p.tableoid, p.oid, "
6816 : "p.proname AS aggname, "
6817 : "p.pronamespace AS aggnamespace, "
6818 : "p.pronargs, p.proargtypes, "
6819 : "p.proowner, "
6820 : "p.proacl AS aggacl, "
6821 : "acldefault('f', p.proowner) AS acldefault "
6822 : "FROM pg_proc p "
6823 : "LEFT JOIN pg_init_privs pip ON "
6824 : "(p.oid = pip.objoid "
6825 : "AND pip.classoid = 'pg_proc'::regclass "
6826 : "AND pip.objsubid = 0) "
6827 : "WHERE %s AND ("
6828 : "p.pronamespace != "
6829 : "(SELECT oid FROM pg_namespace "
6830 : "WHERE nspname = 'pg_catalog') OR "
6831 : "p.proacl IS DISTINCT FROM pip.initprivs",
6832 : agg_check);
6833 376 : if (dopt->binary_upgrade)
6834 76 : appendPQExpBufferStr(query,
6835 : " OR EXISTS(SELECT 1 FROM pg_depend WHERE "
6836 : "classid = 'pg_proc'::regclass AND "
6837 : "objid = p.oid AND "
6838 : "refclassid = 'pg_extension'::regclass AND "
6839 : "deptype = 'e')");
6840 376 : appendPQExpBufferChar(query, ')');
6841 : }
6842 : else
6843 : {
6844 0 : appendPQExpBufferStr(query, "SELECT tableoid, oid, proname AS aggname, "
6845 : "pronamespace AS aggnamespace, "
6846 : "pronargs, proargtypes, "
6847 : "proowner, "
6848 : "proacl AS aggacl, "
6849 : "acldefault('f', proowner) AS acldefault "
6850 : "FROM pg_proc p "
6851 : "WHERE proisagg AND ("
6852 : "pronamespace != "
6853 : "(SELECT oid FROM pg_namespace "
6854 : "WHERE nspname = 'pg_catalog')");
6855 0 : if (dopt->binary_upgrade)
6856 0 : appendPQExpBufferStr(query,
6857 : " OR EXISTS(SELECT 1 FROM pg_depend WHERE "
6858 : "classid = 'pg_proc'::regclass AND "
6859 : "objid = p.oid AND "
6860 : "refclassid = 'pg_extension'::regclass AND "
6861 : "deptype = 'e')");
6862 0 : appendPQExpBufferChar(query, ')');
6863 : }
6864 :
6865 376 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
6866 :
6867 376 : ntups = PQntuples(res);
6868 :
6869 376 : agginfo = (AggInfo *) pg_malloc(ntups * sizeof(AggInfo));
6870 :
6871 376 : i_tableoid = PQfnumber(res, "tableoid");
6872 376 : i_oid = PQfnumber(res, "oid");
6873 376 : i_aggname = PQfnumber(res, "aggname");
6874 376 : i_aggnamespace = PQfnumber(res, "aggnamespace");
6875 376 : i_pronargs = PQfnumber(res, "pronargs");
6876 376 : i_proargtypes = PQfnumber(res, "proargtypes");
6877 376 : i_proowner = PQfnumber(res, "proowner");
6878 376 : i_aggacl = PQfnumber(res, "aggacl");
6879 376 : i_acldefault = PQfnumber(res, "acldefault");
6880 :
6881 1174 : for (i = 0; i < ntups; i++)
6882 : {
6883 798 : agginfo[i].aggfn.dobj.objType = DO_AGG;
6884 798 : agginfo[i].aggfn.dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
6885 798 : agginfo[i].aggfn.dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
6886 798 : AssignDumpId(&agginfo[i].aggfn.dobj);
6887 798 : agginfo[i].aggfn.dobj.name = pg_strdup(PQgetvalue(res, i, i_aggname));
6888 1596 : agginfo[i].aggfn.dobj.namespace =
6889 798 : findNamespace(atooid(PQgetvalue(res, i, i_aggnamespace)));
6890 798 : agginfo[i].aggfn.dacl.acl = pg_strdup(PQgetvalue(res, i, i_aggacl));
6891 798 : agginfo[i].aggfn.dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault));
6892 798 : agginfo[i].aggfn.dacl.privtype = 0;
6893 798 : agginfo[i].aggfn.dacl.initprivs = NULL;
6894 798 : agginfo[i].aggfn.rolname = getRoleName(PQgetvalue(res, i, i_proowner));
6895 798 : agginfo[i].aggfn.lang = InvalidOid; /* not currently interesting */
6896 798 : agginfo[i].aggfn.prorettype = InvalidOid; /* not saved */
6897 798 : agginfo[i].aggfn.nargs = atoi(PQgetvalue(res, i, i_pronargs));
6898 798 : if (agginfo[i].aggfn.nargs == 0)
6899 112 : agginfo[i].aggfn.argtypes = NULL;
6900 : else
6901 : {
6902 686 : agginfo[i].aggfn.argtypes = (Oid *) pg_malloc(agginfo[i].aggfn.nargs * sizeof(Oid));
6903 686 : parseOidArray(PQgetvalue(res, i, i_proargtypes),
6904 686 : agginfo[i].aggfn.argtypes,
6905 686 : agginfo[i].aggfn.nargs);
6906 : }
6907 798 : agginfo[i].aggfn.postponed_def = false; /* might get set during sort */
6908 :
6909 : /* Decide whether we want to dump it */
6910 798 : selectDumpableObject(&(agginfo[i].aggfn.dobj), fout);
6911 :
6912 : /* Mark whether aggregate has an ACL */
6913 798 : if (!PQgetisnull(res, i, i_aggacl))
6914 50 : agginfo[i].aggfn.dobj.components |= DUMP_COMPONENT_ACL;
6915 : }
6916 :
6917 376 : PQclear(res);
6918 :
6919 376 : destroyPQExpBuffer(query);
6920 376 : }
6921 :
6922 : /*
6923 : * getFuncs:
6924 : * get information about all user-defined functions in the system catalogs
6925 : */
6926 : void
6927 376 : getFuncs(Archive *fout)
6928 : {
6929 376 : DumpOptions *dopt = fout->dopt;
6930 : PGresult *res;
6931 : int ntups;
6932 : int i;
6933 376 : PQExpBuffer query = createPQExpBuffer();
6934 : FuncInfo *finfo;
6935 : int i_tableoid;
6936 : int i_oid;
6937 : int i_proname;
6938 : int i_pronamespace;
6939 : int i_proowner;
6940 : int i_prolang;
6941 : int i_pronargs;
6942 : int i_proargtypes;
6943 : int i_prorettype;
6944 : int i_proacl;
6945 : int i_acldefault;
6946 :
6947 : /*
6948 : * Find all interesting functions. This is a bit complicated:
6949 : *
6950 : * 1. Always exclude aggregates; those are handled elsewhere.
6951 : *
6952 : * 2. Always exclude functions that are internally dependent on something
6953 : * else, since presumably those will be created as a result of creating
6954 : * the something else. This currently acts only to suppress constructor
6955 : * functions for range types. Note this is OK only because the
6956 : * constructors don't have any dependencies the range type doesn't have;
6957 : * otherwise we might not get creation ordering correct.
6958 : *
6959 : * 3. Otherwise, we normally exclude functions in pg_catalog. However, if
6960 : * they're members of extensions and we are in binary-upgrade mode then
6961 : * include them, since we want to dump extension members individually in
6962 : * that mode. Also, if they are used by casts or transforms then we need
6963 : * to gather the information about them, though they won't be dumped if
6964 : * they are built-in. Also, in 9.6 and up, include functions in
6965 : * pg_catalog if they have an ACL different from what's shown in
6966 : * pg_init_privs (so we have to join to pg_init_privs; annoying).
6967 : */
6968 376 : if (fout->remoteVersion >= 90600)
6969 : {
6970 : const char *not_agg_check;
6971 :
6972 752 : not_agg_check = (fout->remoteVersion >= 110000 ? "p.prokind <> 'a'"
6973 376 : : "NOT p.proisagg");
6974 :
6975 376 : appendPQExpBuffer(query,
6976 : "SELECT p.tableoid, p.oid, p.proname, p.prolang, "
6977 : "p.pronargs, p.proargtypes, p.prorettype, "
6978 : "p.proacl, "
6979 : "acldefault('f', p.proowner) AS acldefault, "
6980 : "p.pronamespace, "
6981 : "p.proowner "
6982 : "FROM pg_proc p "
6983 : "LEFT JOIN pg_init_privs pip ON "
6984 : "(p.oid = pip.objoid "
6985 : "AND pip.classoid = 'pg_proc'::regclass "
6986 : "AND pip.objsubid = 0) "
6987 : "WHERE %s"
6988 : "\n AND NOT EXISTS (SELECT 1 FROM pg_depend "
6989 : "WHERE classid = 'pg_proc'::regclass AND "
6990 : "objid = p.oid AND deptype = 'i')"
6991 : "\n AND ("
6992 : "\n pronamespace != "
6993 : "(SELECT oid FROM pg_namespace "
6994 : "WHERE nspname = 'pg_catalog')"
6995 : "\n OR EXISTS (SELECT 1 FROM pg_cast"
6996 : "\n WHERE pg_cast.oid > %u "
6997 : "\n AND p.oid = pg_cast.castfunc)"
6998 : "\n OR EXISTS (SELECT 1 FROM pg_transform"
6999 : "\n WHERE pg_transform.oid > %u AND "
7000 : "\n (p.oid = pg_transform.trffromsql"
7001 : "\n OR p.oid = pg_transform.trftosql))",
7002 : not_agg_check,
7003 : g_last_builtin_oid,
7004 : g_last_builtin_oid);
7005 376 : if (dopt->binary_upgrade)
7006 76 : appendPQExpBufferStr(query,
7007 : "\n OR EXISTS(SELECT 1 FROM pg_depend WHERE "
7008 : "classid = 'pg_proc'::regclass AND "
7009 : "objid = p.oid AND "
7010 : "refclassid = 'pg_extension'::regclass AND "
7011 : "deptype = 'e')");
7012 376 : appendPQExpBufferStr(query,
7013 : "\n OR p.proacl IS DISTINCT FROM pip.initprivs");
7014 376 : appendPQExpBufferChar(query, ')');
7015 : }
7016 : else
7017 : {
7018 0 : appendPQExpBuffer(query,
7019 : "SELECT tableoid, oid, proname, prolang, "
7020 : "pronargs, proargtypes, prorettype, proacl, "
7021 : "acldefault('f', proowner) AS acldefault, "
7022 : "pronamespace, "
7023 : "proowner "
7024 : "FROM pg_proc p "
7025 : "WHERE NOT proisagg"
7026 : "\n AND NOT EXISTS (SELECT 1 FROM pg_depend "
7027 : "WHERE classid = 'pg_proc'::regclass AND "
7028 : "objid = p.oid AND deptype = 'i')"
7029 : "\n AND ("
7030 : "\n pronamespace != "
7031 : "(SELECT oid FROM pg_namespace "
7032 : "WHERE nspname = 'pg_catalog')"
7033 : "\n OR EXISTS (SELECT 1 FROM pg_cast"
7034 : "\n WHERE pg_cast.oid > '%u'::oid"
7035 : "\n AND p.oid = pg_cast.castfunc)",
7036 : g_last_builtin_oid);
7037 :
7038 0 : if (fout->remoteVersion >= 90500)
7039 0 : appendPQExpBuffer(query,
7040 : "\n OR EXISTS (SELECT 1 FROM pg_transform"
7041 : "\n WHERE pg_transform.oid > '%u'::oid"
7042 : "\n AND (p.oid = pg_transform.trffromsql"
7043 : "\n OR p.oid = pg_transform.trftosql))",
7044 : g_last_builtin_oid);
7045 :
7046 0 : if (dopt->binary_upgrade)
7047 0 : appendPQExpBufferStr(query,
7048 : "\n OR EXISTS(SELECT 1 FROM pg_depend WHERE "
7049 : "classid = 'pg_proc'::regclass AND "
7050 : "objid = p.oid AND "
7051 : "refclassid = 'pg_extension'::regclass AND "
7052 : "deptype = 'e')");
7053 0 : appendPQExpBufferChar(query, ')');
7054 : }
7055 :
7056 376 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
7057 :
7058 376 : ntups = PQntuples(res);
7059 :
7060 376 : finfo = (FuncInfo *) pg_malloc0(ntups * sizeof(FuncInfo));
7061 :
7062 376 : i_tableoid = PQfnumber(res, "tableoid");
7063 376 : i_oid = PQfnumber(res, "oid");
7064 376 : i_proname = PQfnumber(res, "proname");
7065 376 : i_pronamespace = PQfnumber(res, "pronamespace");
7066 376 : i_proowner = PQfnumber(res, "proowner");
7067 376 : i_prolang = PQfnumber(res, "prolang");
7068 376 : i_pronargs = PQfnumber(res, "pronargs");
7069 376 : i_proargtypes = PQfnumber(res, "proargtypes");
7070 376 : i_prorettype = PQfnumber(res, "prorettype");
7071 376 : i_proacl = PQfnumber(res, "proacl");
7072 376 : i_acldefault = PQfnumber(res, "acldefault");
7073 :
7074 9956 : for (i = 0; i < ntups; i++)
7075 : {
7076 9580 : finfo[i].dobj.objType = DO_FUNC;
7077 9580 : finfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
7078 9580 : finfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
7079 9580 : AssignDumpId(&finfo[i].dobj);
7080 9580 : finfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_proname));
7081 19160 : finfo[i].dobj.namespace =
7082 9580 : findNamespace(atooid(PQgetvalue(res, i, i_pronamespace)));
7083 9580 : finfo[i].dacl.acl = pg_strdup(PQgetvalue(res, i, i_proacl));
7084 9580 : finfo[i].dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault));
7085 9580 : finfo[i].dacl.privtype = 0;
7086 9580 : finfo[i].dacl.initprivs = NULL;
7087 9580 : finfo[i].rolname = getRoleName(PQgetvalue(res, i, i_proowner));
7088 9580 : finfo[i].lang = atooid(PQgetvalue(res, i, i_prolang));
7089 9580 : finfo[i].prorettype = atooid(PQgetvalue(res, i, i_prorettype));
7090 9580 : finfo[i].nargs = atoi(PQgetvalue(res, i, i_pronargs));
7091 9580 : if (finfo[i].nargs == 0)
7092 2144 : finfo[i].argtypes = NULL;
7093 : else
7094 : {
7095 7436 : finfo[i].argtypes = (Oid *) pg_malloc(finfo[i].nargs * sizeof(Oid));
7096 7436 : parseOidArray(PQgetvalue(res, i, i_proargtypes),
7097 7436 : finfo[i].argtypes, finfo[i].nargs);
7098 : }
7099 9580 : finfo[i].postponed_def = false; /* might get set during sort */
7100 :
7101 : /* Decide whether we want to dump it */
7102 9580 : selectDumpableObject(&(finfo[i].dobj), fout);
7103 :
7104 : /* Mark whether function has an ACL */
7105 9580 : if (!PQgetisnull(res, i, i_proacl))
7106 280 : finfo[i].dobj.components |= DUMP_COMPONENT_ACL;
7107 : }
7108 :
7109 376 : PQclear(res);
7110 :
7111 376 : destroyPQExpBuffer(query);
7112 376 : }
7113 :
7114 : /*
7115 : * getRelationStatistics
7116 : * register the statistics object as a dependent of the relation.
7117 : *
7118 : * reltuples is passed as a string to avoid complexities in converting from/to
7119 : * floating point.
7120 : */
7121 : static RelStatsInfo *
7122 19254 : getRelationStatistics(Archive *fout, DumpableObject *rel, int32 relpages,
7123 : char *reltuples, int32 relallvisible,
7124 : int32 relallfrozen, char relkind,
7125 : char **indAttNames, int nindAttNames)
7126 : {
7127 19254 : if (!fout->dopt->dumpStatistics)
7128 12122 : return NULL;
7129 :
7130 7132 : if ((relkind == RELKIND_RELATION) ||
7131 2968 : (relkind == RELKIND_PARTITIONED_TABLE) ||
7132 1772 : (relkind == RELKIND_INDEX) ||
7133 1144 : (relkind == RELKIND_PARTITIONED_INDEX) ||
7134 538 : (relkind == RELKIND_MATVIEW ||
7135 : relkind == RELKIND_FOREIGN_TABLE))
7136 : {
7137 6658 : RelStatsInfo *info = pg_malloc0(sizeof(RelStatsInfo));
7138 6658 : DumpableObject *dobj = &info->dobj;
7139 :
7140 6658 : dobj->objType = DO_REL_STATS;
7141 6658 : dobj->catId.tableoid = 0;
7142 6658 : dobj->catId.oid = 0;
7143 6658 : AssignDumpId(dobj);
7144 6658 : dobj->dependencies = (DumpId *) pg_malloc(sizeof(DumpId));
7145 6658 : dobj->dependencies[0] = rel->dumpId;
7146 6658 : dobj->nDeps = 1;
7147 6658 : dobj->allocDeps = 1;
7148 6658 : dobj->components |= DUMP_COMPONENT_STATISTICS;
7149 6658 : dobj->name = pg_strdup(rel->name);
7150 6658 : dobj->namespace = rel->namespace;
7151 6658 : info->relpages = relpages;
7152 6658 : info->reltuples = pstrdup(reltuples);
7153 6658 : info->relallvisible = relallvisible;
7154 6658 : info->relallfrozen = relallfrozen;
7155 6658 : info->relkind = relkind;
7156 6658 : info->indAttNames = indAttNames;
7157 6658 : info->nindAttNames = nindAttNames;
7158 :
7159 : /*
7160 : * Ordinarily, stats go in SECTION_DATA for tables and
7161 : * SECTION_POST_DATA for indexes.
7162 : *
7163 : * However, the section may be updated later for materialized view
7164 : * stats. REFRESH MATERIALIZED VIEW replaces the storage and resets
7165 : * the stats, so the stats must be restored after the data. Also, the
7166 : * materialized view definition may be postponed to SECTION_POST_DATA
7167 : * (see repairMatViewBoundaryMultiLoop()).
7168 : */
7169 6658 : switch (info->relkind)
7170 : {
7171 4834 : case RELKIND_RELATION:
7172 : case RELKIND_PARTITIONED_TABLE:
7173 : case RELKIND_MATVIEW:
7174 : case RELKIND_FOREIGN_TABLE:
7175 4834 : info->section = SECTION_DATA;
7176 4834 : break;
7177 1824 : case RELKIND_INDEX:
7178 : case RELKIND_PARTITIONED_INDEX:
7179 1824 : info->section = SECTION_POST_DATA;
7180 1824 : break;
7181 0 : default:
7182 0 : pg_fatal("cannot dump statistics for relation kind \"%c\"",
7183 : info->relkind);
7184 : }
7185 :
7186 6658 : return info;
7187 : }
7188 474 : return NULL;
7189 : }
7190 :
7191 : /*
7192 : * getTables
7193 : * read all the tables (no indexes) in the system catalogs,
7194 : * and return them as an array of TableInfo structures
7195 : *
7196 : * *numTables is set to the number of tables read in
7197 : */
7198 : TableInfo *
7199 378 : getTables(Archive *fout, int *numTables)
7200 : {
7201 378 : DumpOptions *dopt = fout->dopt;
7202 : PGresult *res;
7203 : int ntups;
7204 : int i;
7205 378 : PQExpBuffer query = createPQExpBuffer();
7206 : TableInfo *tblinfo;
7207 : int i_reltableoid;
7208 : int i_reloid;
7209 : int i_relname;
7210 : int i_relnamespace;
7211 : int i_relkind;
7212 : int i_reltype;
7213 : int i_relowner;
7214 : int i_relchecks;
7215 : int i_relhasindex;
7216 : int i_relhasrules;
7217 : int i_relpages;
7218 : int i_reltuples;
7219 : int i_relallvisible;
7220 : int i_relallfrozen;
7221 : int i_toastpages;
7222 : int i_owning_tab;
7223 : int i_owning_col;
7224 : int i_reltablespace;
7225 : int i_relhasoids;
7226 : int i_relhastriggers;
7227 : int i_relpersistence;
7228 : int i_relispopulated;
7229 : int i_relreplident;
7230 : int i_relrowsec;
7231 : int i_relforcerowsec;
7232 : int i_relfrozenxid;
7233 : int i_toastfrozenxid;
7234 : int i_toastoid;
7235 : int i_relminmxid;
7236 : int i_toastminmxid;
7237 : int i_reloptions;
7238 : int i_checkoption;
7239 : int i_toastreloptions;
7240 : int i_reloftype;
7241 : int i_foreignserver;
7242 : int i_amname;
7243 : int i_is_identity_sequence;
7244 : int i_relacl;
7245 : int i_acldefault;
7246 : int i_ispartition;
7247 :
7248 : /*
7249 : * Find all the tables and table-like objects.
7250 : *
7251 : * We must fetch all tables in this phase because otherwise we cannot
7252 : * correctly identify inherited columns, owned sequences, etc.
7253 : *
7254 : * We include system catalogs, so that we can work if a user table is
7255 : * defined to inherit from a system catalog (pretty weird, but...)
7256 : *
7257 : * Note: in this phase we should collect only a minimal amount of
7258 : * information about each table, basically just enough to decide if it is
7259 : * interesting. In particular, since we do not yet have lock on any user
7260 : * table, we MUST NOT invoke any server-side data collection functions
7261 : * (for instance, pg_get_partkeydef()). Those are likely to fail or give
7262 : * wrong answers if any concurrent DDL is happening.
7263 : */
7264 :
7265 378 : appendPQExpBufferStr(query,
7266 : "SELECT c.tableoid, c.oid, c.relname, "
7267 : "c.relnamespace, c.relkind, c.reltype, "
7268 : "c.relowner, "
7269 : "c.relchecks, "
7270 : "c.relhasindex, c.relhasrules, c.relpages, "
7271 : "c.reltuples, c.relallvisible, ");
7272 :
7273 378 : if (fout->remoteVersion >= 180000)
7274 378 : appendPQExpBufferStr(query, "c.relallfrozen, ");
7275 : else
7276 0 : appendPQExpBufferStr(query, "0 AS relallfrozen, ");
7277 :
7278 378 : appendPQExpBufferStr(query,
7279 : "c.relhastriggers, c.relpersistence, "
7280 : "c.reloftype, "
7281 : "c.relacl, "
7282 : "acldefault(CASE WHEN c.relkind = " CppAsString2(RELKIND_SEQUENCE)
7283 : " THEN 's'::\"char\" ELSE 'r'::\"char\" END, c.relowner) AS acldefault, "
7284 : "CASE WHEN c.relkind = " CppAsString2(RELKIND_FOREIGN_TABLE) " THEN "
7285 : "(SELECT ftserver FROM pg_catalog.pg_foreign_table WHERE ftrelid = c.oid) "
7286 : "ELSE 0 END AS foreignserver, "
7287 : "c.relfrozenxid, tc.relfrozenxid AS tfrozenxid, "
7288 : "tc.oid AS toid, "
7289 : "tc.relpages AS toastpages, "
7290 : "tc.reloptions AS toast_reloptions, "
7291 : "d.refobjid AS owning_tab, "
7292 : "d.refobjsubid AS owning_col, "
7293 : "tsp.spcname AS reltablespace, ");
7294 :
7295 378 : if (fout->remoteVersion >= 120000)
7296 378 : appendPQExpBufferStr(query,
7297 : "false AS relhasoids, ");
7298 : else
7299 0 : appendPQExpBufferStr(query,
7300 : "c.relhasoids, ");
7301 :
7302 378 : if (fout->remoteVersion >= 90300)
7303 378 : appendPQExpBufferStr(query,
7304 : "c.relispopulated, ");
7305 : else
7306 0 : appendPQExpBufferStr(query,
7307 : "'t' as relispopulated, ");
7308 :
7309 378 : if (fout->remoteVersion >= 90400)
7310 378 : appendPQExpBufferStr(query,
7311 : "c.relreplident, ");
7312 : else
7313 0 : appendPQExpBufferStr(query,
7314 : "'d' AS relreplident, ");
7315 :
7316 378 : if (fout->remoteVersion >= 90500)
7317 378 : appendPQExpBufferStr(query,
7318 : "c.relrowsecurity, c.relforcerowsecurity, ");
7319 : else
7320 0 : appendPQExpBufferStr(query,
7321 : "false AS relrowsecurity, "
7322 : "false AS relforcerowsecurity, ");
7323 :
7324 378 : if (fout->remoteVersion >= 90300)
7325 378 : appendPQExpBufferStr(query,
7326 : "c.relminmxid, tc.relminmxid AS tminmxid, ");
7327 : else
7328 0 : appendPQExpBufferStr(query,
7329 : "0 AS relminmxid, 0 AS tminmxid, ");
7330 :
7331 378 : if (fout->remoteVersion >= 90300)
7332 378 : appendPQExpBufferStr(query,
7333 : "array_remove(array_remove(c.reloptions,'check_option=local'),'check_option=cascaded') AS reloptions, "
7334 : "CASE WHEN 'check_option=local' = ANY (c.reloptions) THEN 'LOCAL'::text "
7335 : "WHEN 'check_option=cascaded' = ANY (c.reloptions) THEN 'CASCADED'::text ELSE NULL END AS checkoption, ");
7336 : else
7337 0 : appendPQExpBufferStr(query,
7338 : "c.reloptions, NULL AS checkoption, ");
7339 :
7340 378 : if (fout->remoteVersion >= 90600)
7341 378 : appendPQExpBufferStr(query,
7342 : "am.amname, ");
7343 : else
7344 0 : appendPQExpBufferStr(query,
7345 : "NULL AS amname, ");
7346 :
7347 378 : if (fout->remoteVersion >= 90600)
7348 378 : appendPQExpBufferStr(query,
7349 : "(d.deptype = 'i') IS TRUE AS is_identity_sequence, ");
7350 : else
7351 0 : appendPQExpBufferStr(query,
7352 : "false AS is_identity_sequence, ");
7353 :
7354 378 : if (fout->remoteVersion >= 100000)
7355 378 : appendPQExpBufferStr(query,
7356 : "c.relispartition AS ispartition ");
7357 : else
7358 0 : appendPQExpBufferStr(query,
7359 : "false AS ispartition ");
7360 :
7361 : /*
7362 : * Left join to pg_depend to pick up dependency info linking sequences to
7363 : * their owning column, if any (note this dependency is AUTO except for
7364 : * identity sequences, where it's INTERNAL). Also join to pg_tablespace to
7365 : * collect the spcname.
7366 : */
7367 378 : appendPQExpBufferStr(query,
7368 : "\nFROM pg_class c\n"
7369 : "LEFT JOIN pg_depend d ON "
7370 : "(c.relkind = " CppAsString2(RELKIND_SEQUENCE) " AND "
7371 : "d.classid = 'pg_class'::regclass AND d.objid = c.oid AND "
7372 : "d.objsubid = 0 AND "
7373 : "d.refclassid = 'pg_class'::regclass AND d.deptype IN ('a', 'i'))\n"
7374 : "LEFT JOIN pg_tablespace tsp ON (tsp.oid = c.reltablespace)\n");
7375 :
7376 : /*
7377 : * In 9.6 and up, left join to pg_am to pick up the amname.
7378 : */
7379 378 : if (fout->remoteVersion >= 90600)
7380 378 : appendPQExpBufferStr(query,
7381 : "LEFT JOIN pg_am am ON (c.relam = am.oid)\n");
7382 :
7383 : /*
7384 : * We purposefully ignore toast OIDs for partitioned tables; the reason is
7385 : * that versions 10 and 11 have them, but later versions do not, so
7386 : * emitting them causes the upgrade to fail.
7387 : */
7388 378 : appendPQExpBufferStr(query,
7389 : "LEFT JOIN pg_class tc ON (c.reltoastrelid = tc.oid"
7390 : " AND tc.relkind = " CppAsString2(RELKIND_TOASTVALUE)
7391 : " AND c.relkind <> " CppAsString2(RELKIND_PARTITIONED_TABLE) ")\n");
7392 :
7393 : /*
7394 : * Restrict to interesting relkinds (in particular, not indexes). Not all
7395 : * relkinds are possible in older servers, but it's not worth the trouble
7396 : * to emit a version-dependent list.
7397 : *
7398 : * Composite-type table entries won't be dumped as such, but we have to
7399 : * make a DumpableObject for them so that we can track dependencies of the
7400 : * composite type (pg_depend entries for columns of the composite type
7401 : * link to the pg_class entry not the pg_type entry).
7402 : */
7403 378 : appendPQExpBufferStr(query,
7404 : "WHERE c.relkind IN ("
7405 : CppAsString2(RELKIND_RELATION) ", "
7406 : CppAsString2(RELKIND_SEQUENCE) ", "
7407 : CppAsString2(RELKIND_VIEW) ", "
7408 : CppAsString2(RELKIND_COMPOSITE_TYPE) ", "
7409 : CppAsString2(RELKIND_MATVIEW) ", "
7410 : CppAsString2(RELKIND_FOREIGN_TABLE) ", "
7411 : CppAsString2(RELKIND_PARTITIONED_TABLE) ")\n"
7412 : "ORDER BY c.oid");
7413 :
7414 378 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
7415 :
7416 378 : ntups = PQntuples(res);
7417 :
7418 378 : *numTables = ntups;
7419 :
7420 : /*
7421 : * Extract data from result and lock dumpable tables. We do the locking
7422 : * before anything else, to minimize the window wherein a table could
7423 : * disappear under us.
7424 : *
7425 : * Note that we have to save info about all tables here, even when dumping
7426 : * only one, because we don't yet know which tables might be inheritance
7427 : * ancestors of the target table.
7428 : */
7429 378 : tblinfo = (TableInfo *) pg_malloc0(ntups * sizeof(TableInfo));
7430 :
7431 378 : i_reltableoid = PQfnumber(res, "tableoid");
7432 378 : i_reloid = PQfnumber(res, "oid");
7433 378 : i_relname = PQfnumber(res, "relname");
7434 378 : i_relnamespace = PQfnumber(res, "relnamespace");
7435 378 : i_relkind = PQfnumber(res, "relkind");
7436 378 : i_reltype = PQfnumber(res, "reltype");
7437 378 : i_relowner = PQfnumber(res, "relowner");
7438 378 : i_relchecks = PQfnumber(res, "relchecks");
7439 378 : i_relhasindex = PQfnumber(res, "relhasindex");
7440 378 : i_relhasrules = PQfnumber(res, "relhasrules");
7441 378 : i_relpages = PQfnumber(res, "relpages");
7442 378 : i_reltuples = PQfnumber(res, "reltuples");
7443 378 : i_relallvisible = PQfnumber(res, "relallvisible");
7444 378 : i_relallfrozen = PQfnumber(res, "relallfrozen");
7445 378 : i_toastpages = PQfnumber(res, "toastpages");
7446 378 : i_owning_tab = PQfnumber(res, "owning_tab");
7447 378 : i_owning_col = PQfnumber(res, "owning_col");
7448 378 : i_reltablespace = PQfnumber(res, "reltablespace");
7449 378 : i_relhasoids = PQfnumber(res, "relhasoids");
7450 378 : i_relhastriggers = PQfnumber(res, "relhastriggers");
7451 378 : i_relpersistence = PQfnumber(res, "relpersistence");
7452 378 : i_relispopulated = PQfnumber(res, "relispopulated");
7453 378 : i_relreplident = PQfnumber(res, "relreplident");
7454 378 : i_relrowsec = PQfnumber(res, "relrowsecurity");
7455 378 : i_relforcerowsec = PQfnumber(res, "relforcerowsecurity");
7456 378 : i_relfrozenxid = PQfnumber(res, "relfrozenxid");
7457 378 : i_toastfrozenxid = PQfnumber(res, "tfrozenxid");
7458 378 : i_toastoid = PQfnumber(res, "toid");
7459 378 : i_relminmxid = PQfnumber(res, "relminmxid");
7460 378 : i_toastminmxid = PQfnumber(res, "tminmxid");
7461 378 : i_reloptions = PQfnumber(res, "reloptions");
7462 378 : i_checkoption = PQfnumber(res, "checkoption");
7463 378 : i_toastreloptions = PQfnumber(res, "toast_reloptions");
7464 378 : i_reloftype = PQfnumber(res, "reloftype");
7465 378 : i_foreignserver = PQfnumber(res, "foreignserver");
7466 378 : i_amname = PQfnumber(res, "amname");
7467 378 : i_is_identity_sequence = PQfnumber(res, "is_identity_sequence");
7468 378 : i_relacl = PQfnumber(res, "relacl");
7469 378 : i_acldefault = PQfnumber(res, "acldefault");
7470 378 : i_ispartition = PQfnumber(res, "ispartition");
7471 :
7472 378 : if (dopt->lockWaitTimeout)
7473 : {
7474 : /*
7475 : * Arrange to fail instead of waiting forever for a table lock.
7476 : *
7477 : * NB: this coding assumes that the only queries issued within the
7478 : * following loop are LOCK TABLEs; else the timeout may be undesirably
7479 : * applied to other things too.
7480 : */
7481 4 : resetPQExpBuffer(query);
7482 4 : appendPQExpBufferStr(query, "SET statement_timeout = ");
7483 4 : appendStringLiteralConn(query, dopt->lockWaitTimeout, GetConnection(fout));
7484 4 : ExecuteSqlStatement(fout, query->data);
7485 : }
7486 :
7487 378 : resetPQExpBuffer(query);
7488 :
7489 99892 : for (i = 0; i < ntups; i++)
7490 : {
7491 99514 : int32 relallvisible = atoi(PQgetvalue(res, i, i_relallvisible));
7492 99514 : int32 relallfrozen = atoi(PQgetvalue(res, i, i_relallfrozen));
7493 :
7494 99514 : tblinfo[i].dobj.objType = DO_TABLE;
7495 99514 : tblinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_reltableoid));
7496 99514 : tblinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_reloid));
7497 99514 : AssignDumpId(&tblinfo[i].dobj);
7498 99514 : tblinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_relname));
7499 199028 : tblinfo[i].dobj.namespace =
7500 99514 : findNamespace(atooid(PQgetvalue(res, i, i_relnamespace)));
7501 99514 : tblinfo[i].dacl.acl = pg_strdup(PQgetvalue(res, i, i_relacl));
7502 99514 : tblinfo[i].dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault));
7503 99514 : tblinfo[i].dacl.privtype = 0;
7504 99514 : tblinfo[i].dacl.initprivs = NULL;
7505 99514 : tblinfo[i].relkind = *(PQgetvalue(res, i, i_relkind));
7506 99514 : tblinfo[i].reltype = atooid(PQgetvalue(res, i, i_reltype));
7507 99514 : tblinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_relowner));
7508 99514 : tblinfo[i].ncheck = atoi(PQgetvalue(res, i, i_relchecks));
7509 99514 : tblinfo[i].hasindex = (strcmp(PQgetvalue(res, i, i_relhasindex), "t") == 0);
7510 99514 : tblinfo[i].hasrules = (strcmp(PQgetvalue(res, i, i_relhasrules), "t") == 0);
7511 99514 : tblinfo[i].relpages = atoi(PQgetvalue(res, i, i_relpages));
7512 99514 : if (PQgetisnull(res, i, i_toastpages))
7513 80032 : tblinfo[i].toastpages = 0;
7514 : else
7515 19482 : tblinfo[i].toastpages = atoi(PQgetvalue(res, i, i_toastpages));
7516 99514 : if (PQgetisnull(res, i, i_owning_tab))
7517 : {
7518 98684 : tblinfo[i].owning_tab = InvalidOid;
7519 98684 : tblinfo[i].owning_col = 0;
7520 : }
7521 : else
7522 : {
7523 830 : tblinfo[i].owning_tab = atooid(PQgetvalue(res, i, i_owning_tab));
7524 830 : tblinfo[i].owning_col = atoi(PQgetvalue(res, i, i_owning_col));
7525 : }
7526 99514 : tblinfo[i].reltablespace = pg_strdup(PQgetvalue(res, i, i_reltablespace));
7527 99514 : tblinfo[i].hasoids = (strcmp(PQgetvalue(res, i, i_relhasoids), "t") == 0);
7528 99514 : tblinfo[i].hastriggers = (strcmp(PQgetvalue(res, i, i_relhastriggers), "t") == 0);
7529 99514 : tblinfo[i].relpersistence = *(PQgetvalue(res, i, i_relpersistence));
7530 99514 : tblinfo[i].relispopulated = (strcmp(PQgetvalue(res, i, i_relispopulated), "t") == 0);
7531 99514 : tblinfo[i].relreplident = *(PQgetvalue(res, i, i_relreplident));
7532 99514 : tblinfo[i].rowsec = (strcmp(PQgetvalue(res, i, i_relrowsec), "t") == 0);
7533 99514 : tblinfo[i].forcerowsec = (strcmp(PQgetvalue(res, i, i_relforcerowsec), "t") == 0);
7534 99514 : tblinfo[i].frozenxid = atooid(PQgetvalue(res, i, i_relfrozenxid));
7535 99514 : tblinfo[i].toast_frozenxid = atooid(PQgetvalue(res, i, i_toastfrozenxid));
7536 99514 : tblinfo[i].toast_oid = atooid(PQgetvalue(res, i, i_toastoid));
7537 99514 : tblinfo[i].minmxid = atooid(PQgetvalue(res, i, i_relminmxid));
7538 99514 : tblinfo[i].toast_minmxid = atooid(PQgetvalue(res, i, i_toastminmxid));
7539 99514 : tblinfo[i].reloptions = pg_strdup(PQgetvalue(res, i, i_reloptions));
7540 99514 : if (PQgetisnull(res, i, i_checkoption))
7541 99422 : tblinfo[i].checkoption = NULL;
7542 : else
7543 92 : tblinfo[i].checkoption = pg_strdup(PQgetvalue(res, i, i_checkoption));
7544 99514 : tblinfo[i].toast_reloptions = pg_strdup(PQgetvalue(res, i, i_toastreloptions));
7545 99514 : tblinfo[i].reloftype = atooid(PQgetvalue(res, i, i_reloftype));
7546 99514 : tblinfo[i].foreign_server = atooid(PQgetvalue(res, i, i_foreignserver));
7547 99514 : if (PQgetisnull(res, i, i_amname))
7548 59650 : tblinfo[i].amname = NULL;
7549 : else
7550 39864 : tblinfo[i].amname = pg_strdup(PQgetvalue(res, i, i_amname));
7551 99514 : tblinfo[i].is_identity_sequence = (strcmp(PQgetvalue(res, i, i_is_identity_sequence), "t") == 0);
7552 99514 : tblinfo[i].ispartition = (strcmp(PQgetvalue(res, i, i_ispartition), "t") == 0);
7553 :
7554 : /* other fields were zeroed above */
7555 :
7556 : /*
7557 : * Decide whether we want to dump this table.
7558 : */
7559 99514 : if (tblinfo[i].relkind == RELKIND_COMPOSITE_TYPE)
7560 366 : tblinfo[i].dobj.dump = DUMP_COMPONENT_NONE;
7561 : else
7562 99148 : selectDumpableTable(&tblinfo[i], fout);
7563 :
7564 : /*
7565 : * Now, consider the table "interesting" if we need to dump its
7566 : * definition, data or its statistics. Later on, we'll skip a lot of
7567 : * data collection for uninteresting tables.
7568 : *
7569 : * Note: the "interesting" flag will also be set by flagInhTables for
7570 : * parents of interesting tables, so that we collect necessary
7571 : * inheritance info even when the parents are not themselves being
7572 : * dumped. This is the main reason why we need an "interesting" flag
7573 : * that's separate from the components-to-dump bitmask.
7574 : */
7575 99514 : tblinfo[i].interesting = (tblinfo[i].dobj.dump &
7576 : (DUMP_COMPONENT_DEFINITION |
7577 : DUMP_COMPONENT_DATA |
7578 99514 : DUMP_COMPONENT_STATISTICS)) != 0;
7579 :
7580 99514 : tblinfo[i].dummy_view = false; /* might get set during sort */
7581 99514 : tblinfo[i].postponed_def = false; /* might get set during sort */
7582 :
7583 : /* Tables have data */
7584 99514 : tblinfo[i].dobj.components |= DUMP_COMPONENT_DATA;
7585 :
7586 : /* Mark whether table has an ACL */
7587 99514 : if (!PQgetisnull(res, i, i_relacl))
7588 79760 : tblinfo[i].dobj.components |= DUMP_COMPONENT_ACL;
7589 99514 : tblinfo[i].hascolumnACLs = false; /* may get set later */
7590 :
7591 : /* Add statistics */
7592 99514 : if (tblinfo[i].interesting)
7593 : {
7594 : RelStatsInfo *stats;
7595 :
7596 28072 : stats = getRelationStatistics(fout, &tblinfo[i].dobj,
7597 14036 : tblinfo[i].relpages,
7598 : PQgetvalue(res, i, i_reltuples),
7599 : relallvisible, relallfrozen,
7600 14036 : tblinfo[i].relkind, NULL, 0);
7601 14036 : if (tblinfo[i].relkind == RELKIND_MATVIEW)
7602 796 : tblinfo[i].stats = stats;
7603 : }
7604 :
7605 : /*
7606 : * Read-lock target tables to make sure they aren't DROPPED or altered
7607 : * in schema before we get around to dumping them.
7608 : *
7609 : * Note that we don't explicitly lock parents of the target tables; we
7610 : * assume our lock on the child is enough to prevent schema
7611 : * alterations to parent tables.
7612 : *
7613 : * NOTE: it'd be kinda nice to lock other relations too, not only
7614 : * plain or partitioned tables, but the backend doesn't presently
7615 : * allow that.
7616 : *
7617 : * We only need to lock the table for certain components; see
7618 : * pg_dump.h
7619 : */
7620 99514 : if ((tblinfo[i].dobj.dump & DUMP_COMPONENTS_REQUIRING_LOCK) &&
7621 14036 : (tblinfo[i].relkind == RELKIND_RELATION ||
7622 3958 : tblinfo[i].relkind == RELKIND_PARTITIONED_TABLE))
7623 : {
7624 : /*
7625 : * Tables are locked in batches. When dumping from a remote
7626 : * server this can save a significant amount of time by reducing
7627 : * the number of round trips.
7628 : */
7629 11286 : if (query->len == 0)
7630 248 : appendPQExpBuffer(query, "LOCK TABLE %s",
7631 248 : fmtQualifiedDumpable(&tblinfo[i]));
7632 : else
7633 : {
7634 11038 : appendPQExpBuffer(query, ", %s",
7635 11038 : fmtQualifiedDumpable(&tblinfo[i]));
7636 :
7637 : /* Arbitrarily end a batch when query length reaches 100K. */
7638 11038 : if (query->len >= 100000)
7639 : {
7640 : /* Lock another batch of tables. */
7641 0 : appendPQExpBufferStr(query, " IN ACCESS SHARE MODE");
7642 0 : ExecuteSqlStatement(fout, query->data);
7643 0 : resetPQExpBuffer(query);
7644 : }
7645 : }
7646 : }
7647 : }
7648 :
7649 378 : if (query->len != 0)
7650 : {
7651 : /* Lock the tables in the last batch. */
7652 248 : appendPQExpBufferStr(query, " IN ACCESS SHARE MODE");
7653 248 : ExecuteSqlStatement(fout, query->data);
7654 : }
7655 :
7656 376 : if (dopt->lockWaitTimeout)
7657 : {
7658 4 : ExecuteSqlStatement(fout, "SET statement_timeout = 0");
7659 : }
7660 :
7661 376 : PQclear(res);
7662 :
7663 376 : destroyPQExpBuffer(query);
7664 :
7665 376 : return tblinfo;
7666 : }
7667 :
7668 : /*
7669 : * getOwnedSeqs
7670 : * identify owned sequences and mark them as dumpable if owning table is
7671 : *
7672 : * We used to do this in getTables(), but it's better to do it after the
7673 : * index used by findTableByOid() has been set up.
7674 : */
7675 : void
7676 376 : getOwnedSeqs(Archive *fout, TableInfo tblinfo[], int numTables)
7677 : {
7678 : int i;
7679 :
7680 : /*
7681 : * Force sequences that are "owned" by table columns to be dumped whenever
7682 : * their owning table is being dumped.
7683 : */
7684 99348 : for (i = 0; i < numTables; i++)
7685 : {
7686 98972 : TableInfo *seqinfo = &tblinfo[i];
7687 : TableInfo *owning_tab;
7688 :
7689 98972 : if (!OidIsValid(seqinfo->owning_tab))
7690 98148 : continue; /* not an owned sequence */
7691 :
7692 824 : owning_tab = findTableByOid(seqinfo->owning_tab);
7693 824 : if (owning_tab == NULL)
7694 0 : pg_fatal("failed sanity check, parent table with OID %u of sequence with OID %u not found",
7695 : seqinfo->owning_tab, seqinfo->dobj.catId.oid);
7696 :
7697 : /*
7698 : * For an identity sequence, dump exactly the same components for the
7699 : * sequence as for the owning table. This is important because we
7700 : * treat the identity sequence as an integral part of the table. For
7701 : * example, there is not any DDL command that allows creation of such
7702 : * a sequence independently of the table.
7703 : *
7704 : * For other owned sequences such as serial sequences, we need to dump
7705 : * the components that are being dumped for the table and any
7706 : * components that the sequence is explicitly marked with.
7707 : *
7708 : * We can't simply use the set of components which are being dumped
7709 : * for the table as the table might be in an extension (and only the
7710 : * non-extension components, eg: ACLs if changed, security labels, and
7711 : * policies, are being dumped) while the sequence is not (and
7712 : * therefore the definition and other components should also be
7713 : * dumped).
7714 : *
7715 : * If the sequence is part of the extension then it should be properly
7716 : * marked by checkExtensionMembership() and this will be a no-op as
7717 : * the table will be equivalently marked.
7718 : */
7719 824 : if (seqinfo->is_identity_sequence)
7720 398 : seqinfo->dobj.dump = owning_tab->dobj.dump;
7721 : else
7722 426 : seqinfo->dobj.dump |= owning_tab->dobj.dump;
7723 :
7724 : /* Make sure that necessary data is available if we're dumping it */
7725 824 : if (seqinfo->dobj.dump != DUMP_COMPONENT_NONE)
7726 : {
7727 632 : seqinfo->interesting = true;
7728 632 : owning_tab->interesting = true;
7729 : }
7730 : }
7731 376 : }
7732 :
7733 : /*
7734 : * getInherits
7735 : * read all the inheritance information
7736 : * from the system catalogs return them in the InhInfo* structure
7737 : *
7738 : * numInherits is set to the number of pairs read in
7739 : */
7740 : InhInfo *
7741 376 : getInherits(Archive *fout, int *numInherits)
7742 : {
7743 : PGresult *res;
7744 : int ntups;
7745 : int i;
7746 376 : PQExpBuffer query = createPQExpBuffer();
7747 : InhInfo *inhinfo;
7748 :
7749 : int i_inhrelid;
7750 : int i_inhparent;
7751 :
7752 : /* find all the inheritance information */
7753 376 : appendPQExpBufferStr(query, "SELECT inhrelid, inhparent FROM pg_inherits");
7754 :
7755 376 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
7756 :
7757 376 : ntups = PQntuples(res);
7758 :
7759 376 : *numInherits = ntups;
7760 :
7761 376 : inhinfo = (InhInfo *) pg_malloc(ntups * sizeof(InhInfo));
7762 :
7763 376 : i_inhrelid = PQfnumber(res, "inhrelid");
7764 376 : i_inhparent = PQfnumber(res, "inhparent");
7765 :
7766 7168 : for (i = 0; i < ntups; i++)
7767 : {
7768 6792 : inhinfo[i].inhrelid = atooid(PQgetvalue(res, i, i_inhrelid));
7769 6792 : inhinfo[i].inhparent = atooid(PQgetvalue(res, i, i_inhparent));
7770 : }
7771 :
7772 376 : PQclear(res);
7773 :
7774 376 : destroyPQExpBuffer(query);
7775 :
7776 376 : return inhinfo;
7777 : }
7778 :
7779 : /*
7780 : * getPartitioningInfo
7781 : * get information about partitioning
7782 : *
7783 : * For the most part, we only collect partitioning info about tables we
7784 : * intend to dump. However, this function has to consider all partitioned
7785 : * tables in the database, because we need to know about parents of partitions
7786 : * we are going to dump even if the parents themselves won't be dumped.
7787 : *
7788 : * Specifically, what we need to know is whether each partitioned table
7789 : * has an "unsafe" partitioning scheme that requires us to force
7790 : * load-via-partition-root mode for its children. Currently the only case
7791 : * for which we force that is hash partitioning on enum columns, since the
7792 : * hash codes depend on enum value OIDs which won't be replicated across
7793 : * dump-and-reload. There are other cases in which load-via-partition-root
7794 : * might be necessary, but we expect users to cope with them.
7795 : */
7796 : void
7797 376 : getPartitioningInfo(Archive *fout)
7798 : {
7799 : PQExpBuffer query;
7800 : PGresult *res;
7801 : int ntups;
7802 :
7803 : /* hash partitioning didn't exist before v11 */
7804 376 : if (fout->remoteVersion < 110000)
7805 0 : return;
7806 : /* needn't bother if not dumping data */
7807 376 : if (!fout->dopt->dumpData)
7808 84 : return;
7809 :
7810 292 : query = createPQExpBuffer();
7811 :
7812 : /*
7813 : * Unsafe partitioning schemes are exactly those for which hash enum_ops
7814 : * appears among the partition opclasses. We needn't check partstrat.
7815 : *
7816 : * Note that this query may well retrieve info about tables we aren't
7817 : * going to dump and hence have no lock on. That's okay since we need not
7818 : * invoke any unsafe server-side functions.
7819 : */
7820 292 : appendPQExpBufferStr(query,
7821 : "SELECT partrelid FROM pg_partitioned_table WHERE\n"
7822 : "(SELECT c.oid FROM pg_opclass c JOIN pg_am a "
7823 : "ON c.opcmethod = a.oid\n"
7824 : "WHERE opcname = 'enum_ops' "
7825 : "AND opcnamespace = 'pg_catalog'::regnamespace "
7826 : "AND amname = 'hash') = ANY(partclass)");
7827 :
7828 292 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
7829 :
7830 292 : ntups = PQntuples(res);
7831 :
7832 378 : for (int i = 0; i < ntups; i++)
7833 : {
7834 86 : Oid tabrelid = atooid(PQgetvalue(res, i, 0));
7835 : TableInfo *tbinfo;
7836 :
7837 86 : tbinfo = findTableByOid(tabrelid);
7838 86 : if (tbinfo == NULL)
7839 0 : pg_fatal("failed sanity check, table OID %u appearing in pg_partitioned_table not found",
7840 : tabrelid);
7841 86 : tbinfo->unsafe_partitions = true;
7842 : }
7843 :
7844 292 : PQclear(res);
7845 :
7846 292 : destroyPQExpBuffer(query);
7847 : }
7848 :
7849 : /*
7850 : * getIndexes
7851 : * get information about every index on a dumpable table
7852 : *
7853 : * Note: index data is not returned directly to the caller, but it
7854 : * does get entered into the DumpableObject tables.
7855 : */
7856 : void
7857 376 : getIndexes(Archive *fout, TableInfo tblinfo[], int numTables)
7858 : {
7859 376 : PQExpBuffer query = createPQExpBuffer();
7860 376 : PQExpBuffer tbloids = createPQExpBuffer();
7861 : PGresult *res;
7862 : int ntups;
7863 : int curtblindx;
7864 : IndxInfo *indxinfo;
7865 : int i_tableoid,
7866 : i_oid,
7867 : i_indrelid,
7868 : i_indexname,
7869 : i_relpages,
7870 : i_reltuples,
7871 : i_relallvisible,
7872 : i_relallfrozen,
7873 : i_parentidx,
7874 : i_indexdef,
7875 : i_indnkeyatts,
7876 : i_indnatts,
7877 : i_indkey,
7878 : i_indisclustered,
7879 : i_indisreplident,
7880 : i_indnullsnotdistinct,
7881 : i_contype,
7882 : i_conname,
7883 : i_condeferrable,
7884 : i_condeferred,
7885 : i_conperiod,
7886 : i_contableoid,
7887 : i_conoid,
7888 : i_condef,
7889 : i_indattnames,
7890 : i_tablespace,
7891 : i_indreloptions,
7892 : i_indstatcols,
7893 : i_indstatvals;
7894 :
7895 : /*
7896 : * We want to perform just one query against pg_index. However, we
7897 : * mustn't try to select every row of the catalog and then sort it out on
7898 : * the client side, because some of the server-side functions we need
7899 : * would be unsafe to apply to tables we don't have lock on. Hence, we
7900 : * build an array of the OIDs of tables we care about (and now have lock
7901 : * on!), and use a WHERE clause to constrain which rows are selected.
7902 : */
7903 376 : appendPQExpBufferChar(tbloids, '{');
7904 99348 : for (int i = 0; i < numTables; i++)
7905 : {
7906 98972 : TableInfo *tbinfo = &tblinfo[i];
7907 :
7908 98972 : if (!tbinfo->hasindex)
7909 70108 : continue;
7910 :
7911 : /*
7912 : * We can ignore indexes of uninteresting tables.
7913 : */
7914 28864 : if (!tbinfo->interesting)
7915 24850 : continue;
7916 :
7917 : /* OK, we need info for this table */
7918 4014 : if (tbloids->len > 1) /* do we have more than the '{'? */
7919 3856 : appendPQExpBufferChar(tbloids, ',');
7920 4014 : appendPQExpBuffer(tbloids, "%u", tbinfo->dobj.catId.oid);
7921 : }
7922 376 : appendPQExpBufferChar(tbloids, '}');
7923 :
7924 376 : appendPQExpBufferStr(query,
7925 : "SELECT t.tableoid, t.oid, i.indrelid, "
7926 : "t.relname AS indexname, "
7927 : "t.relpages, t.reltuples, t.relallvisible, ");
7928 :
7929 376 : if (fout->remoteVersion >= 180000)
7930 376 : appendPQExpBufferStr(query, "t.relallfrozen, ");
7931 : else
7932 0 : appendPQExpBufferStr(query, "0 AS relallfrozen, ");
7933 :
7934 376 : appendPQExpBufferStr(query,
7935 : "pg_catalog.pg_get_indexdef(i.indexrelid) AS indexdef, "
7936 : "i.indkey, i.indisclustered, "
7937 : "c.contype, c.conname, "
7938 : "c.condeferrable, c.condeferred, "
7939 : "c.tableoid AS contableoid, "
7940 : "c.oid AS conoid, "
7941 : "pg_catalog.pg_get_constraintdef(c.oid, false) AS condef, "
7942 : "CASE WHEN i.indexprs IS NOT NULL THEN "
7943 : "(SELECT pg_catalog.array_agg(attname ORDER BY attnum)"
7944 : " FROM pg_catalog.pg_attribute "
7945 : " WHERE attrelid = i.indexrelid) "
7946 : "ELSE NULL END AS indattnames, "
7947 : "(SELECT spcname FROM pg_catalog.pg_tablespace s WHERE s.oid = t.reltablespace) AS tablespace, "
7948 : "t.reloptions AS indreloptions, ");
7949 :
7950 :
7951 376 : if (fout->remoteVersion >= 90400)
7952 376 : appendPQExpBufferStr(query,
7953 : "i.indisreplident, ");
7954 : else
7955 0 : appendPQExpBufferStr(query,
7956 : "false AS indisreplident, ");
7957 :
7958 376 : if (fout->remoteVersion >= 110000)
7959 376 : appendPQExpBufferStr(query,
7960 : "inh.inhparent AS parentidx, "
7961 : "i.indnkeyatts AS indnkeyatts, "
7962 : "i.indnatts AS indnatts, "
7963 : "(SELECT pg_catalog.array_agg(attnum ORDER BY attnum) "
7964 : " FROM pg_catalog.pg_attribute "
7965 : " WHERE attrelid = i.indexrelid AND "
7966 : " attstattarget >= 0) AS indstatcols, "
7967 : "(SELECT pg_catalog.array_agg(attstattarget ORDER BY attnum) "
7968 : " FROM pg_catalog.pg_attribute "
7969 : " WHERE attrelid = i.indexrelid AND "
7970 : " attstattarget >= 0) AS indstatvals, ");
7971 : else
7972 0 : appendPQExpBufferStr(query,
7973 : "0 AS parentidx, "
7974 : "i.indnatts AS indnkeyatts, "
7975 : "i.indnatts AS indnatts, "
7976 : "'' AS indstatcols, "
7977 : "'' AS indstatvals, ");
7978 :
7979 376 : if (fout->remoteVersion >= 150000)
7980 376 : appendPQExpBufferStr(query,
7981 : "i.indnullsnotdistinct, ");
7982 : else
7983 0 : appendPQExpBufferStr(query,
7984 : "false AS indnullsnotdistinct, ");
7985 :
7986 376 : if (fout->remoteVersion >= 180000)
7987 376 : appendPQExpBufferStr(query,
7988 : "c.conperiod ");
7989 : else
7990 0 : appendPQExpBufferStr(query,
7991 : "NULL AS conperiod ");
7992 :
7993 : /*
7994 : * The point of the messy-looking outer join is to find a constraint that
7995 : * is related by an internal dependency link to the index. If we find one,
7996 : * create a CONSTRAINT entry linked to the INDEX entry. We assume an
7997 : * index won't have more than one internal dependency.
7998 : *
7999 : * Note: the check on conrelid is redundant, but useful because that
8000 : * column is indexed while conindid is not.
8001 : */
8002 376 : if (fout->remoteVersion >= 110000)
8003 : {
8004 376 : appendPQExpBuffer(query,
8005 : "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
8006 : "JOIN pg_catalog.pg_index i ON (src.tbloid = i.indrelid) "
8007 : "JOIN pg_catalog.pg_class t ON (t.oid = i.indexrelid) "
8008 : "JOIN pg_catalog.pg_class t2 ON (t2.oid = i.indrelid) "
8009 : "LEFT JOIN pg_catalog.pg_constraint c "
8010 : "ON (i.indrelid = c.conrelid AND "
8011 : "i.indexrelid = c.conindid AND "
8012 : "c.contype IN ('p','u','x')) "
8013 : "LEFT JOIN pg_catalog.pg_inherits inh "
8014 : "ON (inh.inhrelid = indexrelid) "
8015 : "WHERE (i.indisvalid OR t2.relkind = 'p') "
8016 : "AND i.indisready "
8017 : "ORDER BY i.indrelid, indexname",
8018 : tbloids->data);
8019 : }
8020 : else
8021 : {
8022 : /*
8023 : * the test on indisready is necessary in 9.2, and harmless in
8024 : * earlier/later versions
8025 : */
8026 0 : appendPQExpBuffer(query,
8027 : "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
8028 : "JOIN pg_catalog.pg_index i ON (src.tbloid = i.indrelid) "
8029 : "JOIN pg_catalog.pg_class t ON (t.oid = i.indexrelid) "
8030 : "LEFT JOIN pg_catalog.pg_constraint c "
8031 : "ON (i.indrelid = c.conrelid AND "
8032 : "i.indexrelid = c.conindid AND "
8033 : "c.contype IN ('p','u','x')) "
8034 : "WHERE i.indisvalid AND i.indisready "
8035 : "ORDER BY i.indrelid, indexname",
8036 : tbloids->data);
8037 : }
8038 :
8039 376 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
8040 :
8041 376 : ntups = PQntuples(res);
8042 :
8043 376 : i_tableoid = PQfnumber(res, "tableoid");
8044 376 : i_oid = PQfnumber(res, "oid");
8045 376 : i_indrelid = PQfnumber(res, "indrelid");
8046 376 : i_indexname = PQfnumber(res, "indexname");
8047 376 : i_relpages = PQfnumber(res, "relpages");
8048 376 : i_reltuples = PQfnumber(res, "reltuples");
8049 376 : i_relallvisible = PQfnumber(res, "relallvisible");
8050 376 : i_relallfrozen = PQfnumber(res, "relallfrozen");
8051 376 : i_parentidx = PQfnumber(res, "parentidx");
8052 376 : i_indexdef = PQfnumber(res, "indexdef");
8053 376 : i_indnkeyatts = PQfnumber(res, "indnkeyatts");
8054 376 : i_indnatts = PQfnumber(res, "indnatts");
8055 376 : i_indkey = PQfnumber(res, "indkey");
8056 376 : i_indisclustered = PQfnumber(res, "indisclustered");
8057 376 : i_indisreplident = PQfnumber(res, "indisreplident");
8058 376 : i_indnullsnotdistinct = PQfnumber(res, "indnullsnotdistinct");
8059 376 : i_contype = PQfnumber(res, "contype");
8060 376 : i_conname = PQfnumber(res, "conname");
8061 376 : i_condeferrable = PQfnumber(res, "condeferrable");
8062 376 : i_condeferred = PQfnumber(res, "condeferred");
8063 376 : i_conperiod = PQfnumber(res, "conperiod");
8064 376 : i_contableoid = PQfnumber(res, "contableoid");
8065 376 : i_conoid = PQfnumber(res, "conoid");
8066 376 : i_condef = PQfnumber(res, "condef");
8067 376 : i_indattnames = PQfnumber(res, "indattnames");
8068 376 : i_tablespace = PQfnumber(res, "tablespace");
8069 376 : i_indreloptions = PQfnumber(res, "indreloptions");
8070 376 : i_indstatcols = PQfnumber(res, "indstatcols");
8071 376 : i_indstatvals = PQfnumber(res, "indstatvals");
8072 :
8073 376 : indxinfo = (IndxInfo *) pg_malloc(ntups * sizeof(IndxInfo));
8074 :
8075 : /*
8076 : * Outer loop iterates once per table, not once per row. Incrementing of
8077 : * j is handled by the inner loop.
8078 : */
8079 376 : curtblindx = -1;
8080 4350 : for (int j = 0; j < ntups;)
8081 : {
8082 3974 : Oid indrelid = atooid(PQgetvalue(res, j, i_indrelid));
8083 3974 : TableInfo *tbinfo = NULL;
8084 3974 : char **indAttNames = NULL;
8085 3974 : int nindAttNames = 0;
8086 : int numinds;
8087 :
8088 : /* Count rows for this table */
8089 5218 : for (numinds = 1; numinds < ntups - j; numinds++)
8090 5060 : if (atooid(PQgetvalue(res, j + numinds, i_indrelid)) != indrelid)
8091 3816 : break;
8092 :
8093 : /*
8094 : * Locate the associated TableInfo; we rely on tblinfo[] being in OID
8095 : * order.
8096 : */
8097 46662 : while (++curtblindx < numTables)
8098 : {
8099 46662 : tbinfo = &tblinfo[curtblindx];
8100 46662 : if (tbinfo->dobj.catId.oid == indrelid)
8101 3974 : break;
8102 : }
8103 3974 : if (curtblindx >= numTables)
8104 0 : pg_fatal("unrecognized table OID %u", indrelid);
8105 : /* cross-check that we only got requested tables */
8106 3974 : if (!tbinfo->hasindex ||
8107 3974 : !tbinfo->interesting)
8108 0 : pg_fatal("unexpected index data for table \"%s\"",
8109 : tbinfo->dobj.name);
8110 :
8111 : /* Save data for this table */
8112 3974 : tbinfo->indexes = indxinfo + j;
8113 3974 : tbinfo->numIndexes = numinds;
8114 :
8115 9192 : for (int c = 0; c < numinds; c++, j++)
8116 : {
8117 : char contype;
8118 : char indexkind;
8119 : RelStatsInfo *relstats;
8120 5218 : int32 relpages = atoi(PQgetvalue(res, j, i_relpages));
8121 5218 : int32 relallvisible = atoi(PQgetvalue(res, j, i_relallvisible));
8122 5218 : int32 relallfrozen = atoi(PQgetvalue(res, j, i_relallfrozen));
8123 :
8124 5218 : indxinfo[j].dobj.objType = DO_INDEX;
8125 5218 : indxinfo[j].dobj.catId.tableoid = atooid(PQgetvalue(res, j, i_tableoid));
8126 5218 : indxinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
8127 5218 : AssignDumpId(&indxinfo[j].dobj);
8128 5218 : indxinfo[j].dobj.dump = tbinfo->dobj.dump;
8129 5218 : indxinfo[j].dobj.name = pg_strdup(PQgetvalue(res, j, i_indexname));
8130 5218 : indxinfo[j].dobj.namespace = tbinfo->dobj.namespace;
8131 5218 : indxinfo[j].indextable = tbinfo;
8132 5218 : indxinfo[j].indexdef = pg_strdup(PQgetvalue(res, j, i_indexdef));
8133 5218 : indxinfo[j].indnkeyattrs = atoi(PQgetvalue(res, j, i_indnkeyatts));
8134 5218 : indxinfo[j].indnattrs = atoi(PQgetvalue(res, j, i_indnatts));
8135 5218 : indxinfo[j].tablespace = pg_strdup(PQgetvalue(res, j, i_tablespace));
8136 5218 : indxinfo[j].indreloptions = pg_strdup(PQgetvalue(res, j, i_indreloptions));
8137 5218 : indxinfo[j].indstatcols = pg_strdup(PQgetvalue(res, j, i_indstatcols));
8138 5218 : indxinfo[j].indstatvals = pg_strdup(PQgetvalue(res, j, i_indstatvals));
8139 5218 : indxinfo[j].indkeys = (Oid *) pg_malloc(indxinfo[j].indnattrs * sizeof(Oid));
8140 5218 : parseOidArray(PQgetvalue(res, j, i_indkey),
8141 5218 : indxinfo[j].indkeys, indxinfo[j].indnattrs);
8142 5218 : indxinfo[j].indisclustered = (PQgetvalue(res, j, i_indisclustered)[0] == 't');
8143 5218 : indxinfo[j].indisreplident = (PQgetvalue(res, j, i_indisreplident)[0] == 't');
8144 5218 : indxinfo[j].indnullsnotdistinct = (PQgetvalue(res, j, i_indnullsnotdistinct)[0] == 't');
8145 5218 : indxinfo[j].parentidx = atooid(PQgetvalue(res, j, i_parentidx));
8146 5218 : indxinfo[j].partattaches = (SimplePtrList)
8147 : {
8148 : NULL, NULL
8149 : };
8150 :
8151 5218 : if (indxinfo[j].parentidx == 0)
8152 4070 : indexkind = RELKIND_INDEX;
8153 : else
8154 1148 : indexkind = RELKIND_PARTITIONED_INDEX;
8155 :
8156 5218 : if (!PQgetisnull(res, j, i_indattnames))
8157 : {
8158 292 : if (!parsePGArray(PQgetvalue(res, j, i_indattnames),
8159 : &indAttNames, &nindAttNames))
8160 0 : pg_fatal("could not parse %s array", "indattnames");
8161 : }
8162 :
8163 5218 : relstats = getRelationStatistics(fout, &indxinfo[j].dobj, relpages,
8164 : PQgetvalue(res, j, i_reltuples),
8165 : relallvisible, relallfrozen, indexkind,
8166 : indAttNames, nindAttNames);
8167 :
8168 5218 : contype = *(PQgetvalue(res, j, i_contype));
8169 5218 : if (contype == 'p' || contype == 'u' || contype == 'x')
8170 3034 : {
8171 : /*
8172 : * If we found a constraint matching the index, create an
8173 : * entry for it.
8174 : */
8175 : ConstraintInfo *constrinfo;
8176 :
8177 3034 : constrinfo = (ConstraintInfo *) pg_malloc(sizeof(ConstraintInfo));
8178 3034 : constrinfo->dobj.objType = DO_CONSTRAINT;
8179 3034 : constrinfo->dobj.catId.tableoid = atooid(PQgetvalue(res, j, i_contableoid));
8180 3034 : constrinfo->dobj.catId.oid = atooid(PQgetvalue(res, j, i_conoid));
8181 3034 : AssignDumpId(&constrinfo->dobj);
8182 3034 : constrinfo->dobj.dump = tbinfo->dobj.dump;
8183 3034 : constrinfo->dobj.name = pg_strdup(PQgetvalue(res, j, i_conname));
8184 3034 : constrinfo->dobj.namespace = tbinfo->dobj.namespace;
8185 3034 : constrinfo->contable = tbinfo;
8186 3034 : constrinfo->condomain = NULL;
8187 3034 : constrinfo->contype = contype;
8188 3034 : if (contype == 'x')
8189 20 : constrinfo->condef = pg_strdup(PQgetvalue(res, j, i_condef));
8190 : else
8191 3014 : constrinfo->condef = NULL;
8192 3034 : constrinfo->confrelid = InvalidOid;
8193 3034 : constrinfo->conindex = indxinfo[j].dobj.dumpId;
8194 3034 : constrinfo->condeferrable = *(PQgetvalue(res, j, i_condeferrable)) == 't';
8195 3034 : constrinfo->condeferred = *(PQgetvalue(res, j, i_condeferred)) == 't';
8196 3034 : constrinfo->conperiod = *(PQgetvalue(res, j, i_conperiod)) == 't';
8197 3034 : constrinfo->conislocal = true;
8198 3034 : constrinfo->separate = true;
8199 :
8200 3034 : indxinfo[j].indexconstraint = constrinfo->dobj.dumpId;
8201 3034 : if (relstats != NULL)
8202 1060 : addObjectDependency(&relstats->dobj, constrinfo->dobj.dumpId);
8203 : }
8204 : else
8205 : {
8206 : /* Plain secondary index */
8207 2184 : indxinfo[j].indexconstraint = 0;
8208 : }
8209 : }
8210 : }
8211 :
8212 376 : PQclear(res);
8213 :
8214 376 : destroyPQExpBuffer(query);
8215 376 : destroyPQExpBuffer(tbloids);
8216 376 : }
8217 :
8218 : /*
8219 : * getExtendedStatistics
8220 : * get information about extended-statistics objects.
8221 : *
8222 : * Note: extended statistics data is not returned directly to the caller, but
8223 : * it does get entered into the DumpableObject tables.
8224 : */
8225 : void
8226 376 : getExtendedStatistics(Archive *fout)
8227 : {
8228 : PQExpBuffer query;
8229 : PGresult *res;
8230 : StatsExtInfo *statsextinfo;
8231 : int ntups;
8232 : int i_tableoid;
8233 : int i_oid;
8234 : int i_stxname;
8235 : int i_stxnamespace;
8236 : int i_stxowner;
8237 : int i_stxrelid;
8238 : int i_stattarget;
8239 : int i;
8240 :
8241 : /* Extended statistics were new in v10 */
8242 376 : if (fout->remoteVersion < 100000)
8243 0 : return;
8244 :
8245 376 : query = createPQExpBuffer();
8246 :
8247 376 : if (fout->remoteVersion < 130000)
8248 0 : appendPQExpBufferStr(query, "SELECT tableoid, oid, stxname, "
8249 : "stxnamespace, stxowner, stxrelid, NULL AS stxstattarget "
8250 : "FROM pg_catalog.pg_statistic_ext");
8251 : else
8252 376 : appendPQExpBufferStr(query, "SELECT tableoid, oid, stxname, "
8253 : "stxnamespace, stxowner, stxrelid, stxstattarget "
8254 : "FROM pg_catalog.pg_statistic_ext");
8255 :
8256 376 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
8257 :
8258 376 : ntups = PQntuples(res);
8259 :
8260 376 : i_tableoid = PQfnumber(res, "tableoid");
8261 376 : i_oid = PQfnumber(res, "oid");
8262 376 : i_stxname = PQfnumber(res, "stxname");
8263 376 : i_stxnamespace = PQfnumber(res, "stxnamespace");
8264 376 : i_stxowner = PQfnumber(res, "stxowner");
8265 376 : i_stxrelid = PQfnumber(res, "stxrelid");
8266 376 : i_stattarget = PQfnumber(res, "stxstattarget");
8267 :
8268 376 : statsextinfo = (StatsExtInfo *) pg_malloc(ntups * sizeof(StatsExtInfo));
8269 :
8270 792 : for (i = 0; i < ntups; i++)
8271 : {
8272 416 : statsextinfo[i].dobj.objType = DO_STATSEXT;
8273 416 : statsextinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
8274 416 : statsextinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
8275 416 : AssignDumpId(&statsextinfo[i].dobj);
8276 416 : statsextinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_stxname));
8277 832 : statsextinfo[i].dobj.namespace =
8278 416 : findNamespace(atooid(PQgetvalue(res, i, i_stxnamespace)));
8279 416 : statsextinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_stxowner));
8280 832 : statsextinfo[i].stattable =
8281 416 : findTableByOid(atooid(PQgetvalue(res, i, i_stxrelid)));
8282 416 : if (PQgetisnull(res, i, i_stattarget))
8283 326 : statsextinfo[i].stattarget = -1;
8284 : else
8285 90 : statsextinfo[i].stattarget = atoi(PQgetvalue(res, i, i_stattarget));
8286 :
8287 : /* Decide whether we want to dump it */
8288 416 : selectDumpableStatisticsObject(&(statsextinfo[i]), fout);
8289 :
8290 416 : if (fout->dopt->dumpStatistics)
8291 304 : statsextinfo[i].dobj.components |= DUMP_COMPONENT_STATISTICS;
8292 : }
8293 :
8294 376 : PQclear(res);
8295 376 : destroyPQExpBuffer(query);
8296 : }
8297 :
8298 : /*
8299 : * getConstraints
8300 : *
8301 : * Get info about constraints on dumpable tables.
8302 : *
8303 : * Currently handles foreign keys only.
8304 : * Unique and primary key constraints are handled with indexes,
8305 : * while check constraints are processed in getTableAttrs().
8306 : */
8307 : void
8308 376 : getConstraints(Archive *fout, TableInfo tblinfo[], int numTables)
8309 : {
8310 376 : PQExpBuffer query = createPQExpBuffer();
8311 376 : PQExpBuffer tbloids = createPQExpBuffer();
8312 : PGresult *res;
8313 : int ntups;
8314 : int curtblindx;
8315 376 : TableInfo *tbinfo = NULL;
8316 : ConstraintInfo *constrinfo;
8317 : int i_contableoid,
8318 : i_conoid,
8319 : i_conrelid,
8320 : i_conname,
8321 : i_confrelid,
8322 : i_conindid,
8323 : i_condef;
8324 :
8325 : /*
8326 : * We want to perform just one query against pg_constraint. However, we
8327 : * mustn't try to select every row of the catalog and then sort it out on
8328 : * the client side, because some of the server-side functions we need
8329 : * would be unsafe to apply to tables we don't have lock on. Hence, we
8330 : * build an array of the OIDs of tables we care about (and now have lock
8331 : * on!), and use a WHERE clause to constrain which rows are selected.
8332 : */
8333 376 : appendPQExpBufferChar(tbloids, '{');
8334 99348 : for (int i = 0; i < numTables; i++)
8335 : {
8336 98972 : TableInfo *tinfo = &tblinfo[i];
8337 :
8338 98972 : if (!(tinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
8339 85040 : continue;
8340 :
8341 : /* OK, we need info for this table */
8342 13932 : if (tbloids->len > 1) /* do we have more than the '{'? */
8343 13682 : appendPQExpBufferChar(tbloids, ',');
8344 13932 : appendPQExpBuffer(tbloids, "%u", tinfo->dobj.catId.oid);
8345 : }
8346 376 : appendPQExpBufferChar(tbloids, '}');
8347 :
8348 376 : appendPQExpBufferStr(query,
8349 : "SELECT c.tableoid, c.oid, "
8350 : "conrelid, conname, confrelid, ");
8351 376 : if (fout->remoteVersion >= 110000)
8352 376 : appendPQExpBufferStr(query, "conindid, ");
8353 : else
8354 0 : appendPQExpBufferStr(query, "0 AS conindid, ");
8355 376 : appendPQExpBuffer(query,
8356 : "pg_catalog.pg_get_constraintdef(c.oid) AS condef\n"
8357 : "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
8358 : "JOIN pg_catalog.pg_constraint c ON (src.tbloid = c.conrelid)\n"
8359 : "WHERE contype = 'f' ",
8360 : tbloids->data);
8361 376 : if (fout->remoteVersion >= 110000)
8362 376 : appendPQExpBufferStr(query,
8363 : "AND conparentid = 0 ");
8364 376 : appendPQExpBufferStr(query,
8365 : "ORDER BY conrelid, conname");
8366 :
8367 376 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
8368 :
8369 376 : ntups = PQntuples(res);
8370 :
8371 376 : i_contableoid = PQfnumber(res, "tableoid");
8372 376 : i_conoid = PQfnumber(res, "oid");
8373 376 : i_conrelid = PQfnumber(res, "conrelid");
8374 376 : i_conname = PQfnumber(res, "conname");
8375 376 : i_confrelid = PQfnumber(res, "confrelid");
8376 376 : i_conindid = PQfnumber(res, "conindid");
8377 376 : i_condef = PQfnumber(res, "condef");
8378 :
8379 376 : constrinfo = (ConstraintInfo *) pg_malloc(ntups * sizeof(ConstraintInfo));
8380 :
8381 376 : curtblindx = -1;
8382 718 : for (int j = 0; j < ntups; j++)
8383 : {
8384 342 : Oid conrelid = atooid(PQgetvalue(res, j, i_conrelid));
8385 : TableInfo *reftable;
8386 :
8387 : /*
8388 : * Locate the associated TableInfo; we rely on tblinfo[] being in OID
8389 : * order.
8390 : */
8391 342 : if (tbinfo == NULL || tbinfo->dobj.catId.oid != conrelid)
8392 : {
8393 27066 : while (++curtblindx < numTables)
8394 : {
8395 27066 : tbinfo = &tblinfo[curtblindx];
8396 27066 : if (tbinfo->dobj.catId.oid == conrelid)
8397 322 : break;
8398 : }
8399 322 : if (curtblindx >= numTables)
8400 0 : pg_fatal("unrecognized table OID %u", conrelid);
8401 : }
8402 :
8403 342 : constrinfo[j].dobj.objType = DO_FK_CONSTRAINT;
8404 342 : constrinfo[j].dobj.catId.tableoid = atooid(PQgetvalue(res, j, i_contableoid));
8405 342 : constrinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_conoid));
8406 342 : AssignDumpId(&constrinfo[j].dobj);
8407 342 : constrinfo[j].dobj.name = pg_strdup(PQgetvalue(res, j, i_conname));
8408 342 : constrinfo[j].dobj.namespace = tbinfo->dobj.namespace;
8409 342 : constrinfo[j].contable = tbinfo;
8410 342 : constrinfo[j].condomain = NULL;
8411 342 : constrinfo[j].contype = 'f';
8412 342 : constrinfo[j].condef = pg_strdup(PQgetvalue(res, j, i_condef));
8413 342 : constrinfo[j].confrelid = atooid(PQgetvalue(res, j, i_confrelid));
8414 342 : constrinfo[j].conindex = 0;
8415 342 : constrinfo[j].condeferrable = false;
8416 342 : constrinfo[j].condeferred = false;
8417 342 : constrinfo[j].conislocal = true;
8418 342 : constrinfo[j].separate = true;
8419 :
8420 : /*
8421 : * Restoring an FK that points to a partitioned table requires that
8422 : * all partition indexes have been attached beforehand. Ensure that
8423 : * happens by making the constraint depend on each index partition
8424 : * attach object.
8425 : */
8426 342 : reftable = findTableByOid(constrinfo[j].confrelid);
8427 342 : if (reftable && reftable->relkind == RELKIND_PARTITIONED_TABLE)
8428 : {
8429 40 : Oid indexOid = atooid(PQgetvalue(res, j, i_conindid));
8430 :
8431 40 : if (indexOid != InvalidOid)
8432 : {
8433 40 : for (int k = 0; k < reftable->numIndexes; k++)
8434 : {
8435 : IndxInfo *refidx;
8436 :
8437 : /* not our index? */
8438 40 : if (reftable->indexes[k].dobj.catId.oid != indexOid)
8439 0 : continue;
8440 :
8441 40 : refidx = &reftable->indexes[k];
8442 40 : addConstrChildIdxDeps(&constrinfo[j].dobj, refidx);
8443 40 : break;
8444 : }
8445 : }
8446 : }
8447 : }
8448 :
8449 376 : PQclear(res);
8450 :
8451 376 : destroyPQExpBuffer(query);
8452 376 : destroyPQExpBuffer(tbloids);
8453 376 : }
8454 :
8455 : /*
8456 : * addConstrChildIdxDeps
8457 : *
8458 : * Recursive subroutine for getConstraints
8459 : *
8460 : * Given an object representing a foreign key constraint and an index on the
8461 : * partitioned table it references, mark the constraint object as dependent
8462 : * on the DO_INDEX_ATTACH object of each index partition, recursively
8463 : * drilling down to their partitions if any. This ensures that the FK is not
8464 : * restored until the index is fully marked valid.
8465 : */
8466 : static void
8467 90 : addConstrChildIdxDeps(DumpableObject *dobj, const IndxInfo *refidx)
8468 : {
8469 : SimplePtrListCell *cell;
8470 :
8471 : Assert(dobj->objType == DO_FK_CONSTRAINT);
8472 :
8473 310 : for (cell = refidx->partattaches.head; cell; cell = cell->next)
8474 : {
8475 220 : IndexAttachInfo *attach = (IndexAttachInfo *) cell->ptr;
8476 :
8477 220 : addObjectDependency(dobj, attach->dobj.dumpId);
8478 :
8479 220 : if (attach->partitionIdx->partattaches.head != NULL)
8480 50 : addConstrChildIdxDeps(dobj, attach->partitionIdx);
8481 : }
8482 90 : }
8483 :
8484 : /*
8485 : * getDomainConstraints
8486 : *
8487 : * Get info about constraints on a domain.
8488 : */
8489 : static void
8490 316 : getDomainConstraints(Archive *fout, TypeInfo *tyinfo)
8491 : {
8492 : ConstraintInfo *constrinfo;
8493 316 : PQExpBuffer query = createPQExpBuffer();
8494 : PGresult *res;
8495 : int i_tableoid,
8496 : i_oid,
8497 : i_conname,
8498 : i_consrc,
8499 : i_convalidated,
8500 : i_contype;
8501 : int ntups;
8502 :
8503 316 : if (!fout->is_prepared[PREPQUERY_GETDOMAINCONSTRAINTS])
8504 : {
8505 : /*
8506 : * Set up query for constraint-specific details. For servers 17 and
8507 : * up, domains have constraints of type 'n' as well as 'c', otherwise
8508 : * just the latter.
8509 : */
8510 86 : appendPQExpBuffer(query,
8511 : "PREPARE getDomainConstraints(pg_catalog.oid) AS\n"
8512 : "SELECT tableoid, oid, conname, "
8513 : "pg_catalog.pg_get_constraintdef(oid) AS consrc, "
8514 : "convalidated, contype "
8515 : "FROM pg_catalog.pg_constraint "
8516 : "WHERE contypid = $1 AND contype IN (%s) "
8517 : "ORDER BY conname",
8518 86 : fout->remoteVersion < 170000 ? "'c'" : "'c', 'n'");
8519 :
8520 86 : ExecuteSqlStatement(fout, query->data);
8521 :
8522 86 : fout->is_prepared[PREPQUERY_GETDOMAINCONSTRAINTS] = true;
8523 : }
8524 :
8525 316 : printfPQExpBuffer(query,
8526 : "EXECUTE getDomainConstraints('%u')",
8527 : tyinfo->dobj.catId.oid);
8528 :
8529 316 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
8530 :
8531 316 : ntups = PQntuples(res);
8532 :
8533 316 : i_tableoid = PQfnumber(res, "tableoid");
8534 316 : i_oid = PQfnumber(res, "oid");
8535 316 : i_conname = PQfnumber(res, "conname");
8536 316 : i_consrc = PQfnumber(res, "consrc");
8537 316 : i_convalidated = PQfnumber(res, "convalidated");
8538 316 : i_contype = PQfnumber(res, "contype");
8539 :
8540 316 : constrinfo = (ConstraintInfo *) pg_malloc(ntups * sizeof(ConstraintInfo));
8541 316 : tyinfo->domChecks = constrinfo;
8542 :
8543 : /* 'i' tracks result rows; 'j' counts CHECK constraints */
8544 648 : for (int i = 0, j = 0; i < ntups; i++)
8545 : {
8546 332 : bool validated = PQgetvalue(res, i, i_convalidated)[0] == 't';
8547 332 : char contype = (PQgetvalue(res, i, i_contype))[0];
8548 : ConstraintInfo *constraint;
8549 :
8550 332 : if (contype == CONSTRAINT_CHECK)
8551 : {
8552 226 : constraint = &constrinfo[j++];
8553 226 : tyinfo->nDomChecks++;
8554 : }
8555 : else
8556 : {
8557 : Assert(contype == CONSTRAINT_NOTNULL);
8558 : Assert(tyinfo->notnull == NULL);
8559 : /* use last item in array for the not-null constraint */
8560 106 : tyinfo->notnull = &(constrinfo[ntups - 1]);
8561 106 : constraint = tyinfo->notnull;
8562 : }
8563 :
8564 332 : constraint->dobj.objType = DO_CONSTRAINT;
8565 332 : constraint->dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
8566 332 : constraint->dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
8567 332 : AssignDumpId(&(constraint->dobj));
8568 332 : constraint->dobj.name = pg_strdup(PQgetvalue(res, i, i_conname));
8569 332 : constraint->dobj.namespace = tyinfo->dobj.namespace;
8570 332 : constraint->contable = NULL;
8571 332 : constraint->condomain = tyinfo;
8572 332 : constraint->contype = contype;
8573 332 : constraint->condef = pg_strdup(PQgetvalue(res, i, i_consrc));
8574 332 : constraint->confrelid = InvalidOid;
8575 332 : constraint->conindex = 0;
8576 332 : constraint->condeferrable = false;
8577 332 : constraint->condeferred = false;
8578 332 : constraint->conislocal = true;
8579 :
8580 332 : constraint->separate = !validated;
8581 :
8582 : /*
8583 : * Make the domain depend on the constraint, ensuring it won't be
8584 : * output till any constraint dependencies are OK. If the constraint
8585 : * has not been validated, it's going to be dumped after the domain
8586 : * anyway, so this doesn't matter.
8587 : */
8588 332 : if (validated)
8589 322 : addObjectDependency(&tyinfo->dobj, constraint->dobj.dumpId);
8590 : }
8591 :
8592 316 : PQclear(res);
8593 :
8594 316 : destroyPQExpBuffer(query);
8595 316 : }
8596 :
8597 : /*
8598 : * getRules
8599 : * get basic information about every rule in the system
8600 : */
8601 : void
8602 376 : getRules(Archive *fout)
8603 : {
8604 : PGresult *res;
8605 : int ntups;
8606 : int i;
8607 376 : PQExpBuffer query = createPQExpBuffer();
8608 : RuleInfo *ruleinfo;
8609 : int i_tableoid;
8610 : int i_oid;
8611 : int i_rulename;
8612 : int i_ruletable;
8613 : int i_ev_type;
8614 : int i_is_instead;
8615 : int i_ev_enabled;
8616 :
8617 376 : appendPQExpBufferStr(query, "SELECT "
8618 : "tableoid, oid, rulename, "
8619 : "ev_class AS ruletable, ev_type, is_instead, "
8620 : "ev_enabled "
8621 : "FROM pg_rewrite "
8622 : "ORDER BY oid");
8623 :
8624 376 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
8625 :
8626 376 : ntups = PQntuples(res);
8627 :
8628 376 : ruleinfo = (RuleInfo *) pg_malloc(ntups * sizeof(RuleInfo));
8629 :
8630 376 : i_tableoid = PQfnumber(res, "tableoid");
8631 376 : i_oid = PQfnumber(res, "oid");
8632 376 : i_rulename = PQfnumber(res, "rulename");
8633 376 : i_ruletable = PQfnumber(res, "ruletable");
8634 376 : i_ev_type = PQfnumber(res, "ev_type");
8635 376 : i_is_instead = PQfnumber(res, "is_instead");
8636 376 : i_ev_enabled = PQfnumber(res, "ev_enabled");
8637 :
8638 58686 : for (i = 0; i < ntups; i++)
8639 : {
8640 : Oid ruletableoid;
8641 :
8642 58310 : ruleinfo[i].dobj.objType = DO_RULE;
8643 58310 : ruleinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
8644 58310 : ruleinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
8645 58310 : AssignDumpId(&ruleinfo[i].dobj);
8646 58310 : ruleinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_rulename));
8647 58310 : ruletableoid = atooid(PQgetvalue(res, i, i_ruletable));
8648 58310 : ruleinfo[i].ruletable = findTableByOid(ruletableoid);
8649 58310 : if (ruleinfo[i].ruletable == NULL)
8650 0 : pg_fatal("failed sanity check, parent table with OID %u of pg_rewrite entry with OID %u not found",
8651 : ruletableoid, ruleinfo[i].dobj.catId.oid);
8652 58310 : ruleinfo[i].dobj.namespace = ruleinfo[i].ruletable->dobj.namespace;
8653 58310 : ruleinfo[i].dobj.dump = ruleinfo[i].ruletable->dobj.dump;
8654 58310 : ruleinfo[i].ev_type = *(PQgetvalue(res, i, i_ev_type));
8655 58310 : ruleinfo[i].is_instead = *(PQgetvalue(res, i, i_is_instead)) == 't';
8656 58310 : ruleinfo[i].ev_enabled = *(PQgetvalue(res, i, i_ev_enabled));
8657 58310 : if (ruleinfo[i].ruletable)
8658 : {
8659 : /*
8660 : * If the table is a view or materialized view, force its ON
8661 : * SELECT rule to be sorted before the view itself --- this
8662 : * ensures that any dependencies for the rule affect the table's
8663 : * positioning. Other rules are forced to appear after their
8664 : * table.
8665 : */
8666 58310 : if ((ruleinfo[i].ruletable->relkind == RELKIND_VIEW ||
8667 1398 : ruleinfo[i].ruletable->relkind == RELKIND_MATVIEW) &&
8668 57848 : ruleinfo[i].ev_type == '1' && ruleinfo[i].is_instead)
8669 : {
8670 57012 : addObjectDependency(&ruleinfo[i].ruletable->dobj,
8671 57012 : ruleinfo[i].dobj.dumpId);
8672 : /* We'll merge the rule into CREATE VIEW, if possible */
8673 57012 : ruleinfo[i].separate = false;
8674 : }
8675 : else
8676 : {
8677 1298 : addObjectDependency(&ruleinfo[i].dobj,
8678 1298 : ruleinfo[i].ruletable->dobj.dumpId);
8679 1298 : ruleinfo[i].separate = true;
8680 : }
8681 : }
8682 : else
8683 0 : ruleinfo[i].separate = true;
8684 : }
8685 :
8686 376 : PQclear(res);
8687 :
8688 376 : destroyPQExpBuffer(query);
8689 376 : }
8690 :
8691 : /*
8692 : * getTriggers
8693 : * get information about every trigger on a dumpable table
8694 : *
8695 : * Note: trigger data is not returned directly to the caller, but it
8696 : * does get entered into the DumpableObject tables.
8697 : */
8698 : void
8699 376 : getTriggers(Archive *fout, TableInfo tblinfo[], int numTables)
8700 : {
8701 376 : PQExpBuffer query = createPQExpBuffer();
8702 376 : PQExpBuffer tbloids = createPQExpBuffer();
8703 : PGresult *res;
8704 : int ntups;
8705 : int curtblindx;
8706 : TriggerInfo *tginfo;
8707 : int i_tableoid,
8708 : i_oid,
8709 : i_tgrelid,
8710 : i_tgname,
8711 : i_tgenabled,
8712 : i_tgispartition,
8713 : i_tgdef;
8714 :
8715 : /*
8716 : * We want to perform just one query against pg_trigger. However, we
8717 : * mustn't try to select every row of the catalog and then sort it out on
8718 : * the client side, because some of the server-side functions we need
8719 : * would be unsafe to apply to tables we don't have lock on. Hence, we
8720 : * build an array of the OIDs of tables we care about (and now have lock
8721 : * on!), and use a WHERE clause to constrain which rows are selected.
8722 : */
8723 376 : appendPQExpBufferChar(tbloids, '{');
8724 99348 : for (int i = 0; i < numTables; i++)
8725 : {
8726 98972 : TableInfo *tbinfo = &tblinfo[i];
8727 :
8728 98972 : if (!tbinfo->hastriggers ||
8729 2228 : !(tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
8730 97268 : continue;
8731 :
8732 : /* OK, we need info for this table */
8733 1704 : if (tbloids->len > 1) /* do we have more than the '{'? */
8734 1602 : appendPQExpBufferChar(tbloids, ',');
8735 1704 : appendPQExpBuffer(tbloids, "%u", tbinfo->dobj.catId.oid);
8736 : }
8737 376 : appendPQExpBufferChar(tbloids, '}');
8738 :
8739 376 : if (fout->remoteVersion >= 150000)
8740 : {
8741 : /*
8742 : * NB: think not to use pretty=true in pg_get_triggerdef. It could
8743 : * result in non-forward-compatible dumps of WHEN clauses due to
8744 : * under-parenthesization.
8745 : *
8746 : * NB: We need to see partition triggers in case the tgenabled flag
8747 : * has been changed from the parent.
8748 : */
8749 376 : appendPQExpBuffer(query,
8750 : "SELECT t.tgrelid, t.tgname, "
8751 : "pg_catalog.pg_get_triggerdef(t.oid, false) AS tgdef, "
8752 : "t.tgenabled, t.tableoid, t.oid, "
8753 : "t.tgparentid <> 0 AS tgispartition\n"
8754 : "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
8755 : "JOIN pg_catalog.pg_trigger t ON (src.tbloid = t.tgrelid) "
8756 : "LEFT JOIN pg_catalog.pg_trigger u ON (u.oid = t.tgparentid) "
8757 : "WHERE ((NOT t.tgisinternal AND t.tgparentid = 0) "
8758 : "OR t.tgenabled != u.tgenabled) "
8759 : "ORDER BY t.tgrelid, t.tgname",
8760 : tbloids->data);
8761 : }
8762 0 : else if (fout->remoteVersion >= 130000)
8763 : {
8764 : /*
8765 : * NB: think not to use pretty=true in pg_get_triggerdef. It could
8766 : * result in non-forward-compatible dumps of WHEN clauses due to
8767 : * under-parenthesization.
8768 : *
8769 : * NB: We need to see tgisinternal triggers in partitions, in case the
8770 : * tgenabled flag has been changed from the parent.
8771 : */
8772 0 : appendPQExpBuffer(query,
8773 : "SELECT t.tgrelid, t.tgname, "
8774 : "pg_catalog.pg_get_triggerdef(t.oid, false) AS tgdef, "
8775 : "t.tgenabled, t.tableoid, t.oid, t.tgisinternal as tgispartition\n"
8776 : "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
8777 : "JOIN pg_catalog.pg_trigger t ON (src.tbloid = t.tgrelid) "
8778 : "LEFT JOIN pg_catalog.pg_trigger u ON (u.oid = t.tgparentid) "
8779 : "WHERE (NOT t.tgisinternal OR t.tgenabled != u.tgenabled) "
8780 : "ORDER BY t.tgrelid, t.tgname",
8781 : tbloids->data);
8782 : }
8783 0 : else if (fout->remoteVersion >= 110000)
8784 : {
8785 : /*
8786 : * NB: We need to see tgisinternal triggers in partitions, in case the
8787 : * tgenabled flag has been changed from the parent. No tgparentid in
8788 : * version 11-12, so we have to match them via pg_depend.
8789 : *
8790 : * See above about pretty=true in pg_get_triggerdef.
8791 : */
8792 0 : appendPQExpBuffer(query,
8793 : "SELECT t.tgrelid, t.tgname, "
8794 : "pg_catalog.pg_get_triggerdef(t.oid, false) AS tgdef, "
8795 : "t.tgenabled, t.tableoid, t.oid, t.tgisinternal as tgispartition "
8796 : "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
8797 : "JOIN pg_catalog.pg_trigger t ON (src.tbloid = t.tgrelid) "
8798 : "LEFT JOIN pg_catalog.pg_depend AS d ON "
8799 : " d.classid = 'pg_catalog.pg_trigger'::pg_catalog.regclass AND "
8800 : " d.refclassid = 'pg_catalog.pg_trigger'::pg_catalog.regclass AND "
8801 : " d.objid = t.oid "
8802 : "LEFT JOIN pg_catalog.pg_trigger AS pt ON pt.oid = refobjid "
8803 : "WHERE (NOT t.tgisinternal OR t.tgenabled != pt.tgenabled) "
8804 : "ORDER BY t.tgrelid, t.tgname",
8805 : tbloids->data);
8806 : }
8807 : else
8808 : {
8809 : /* See above about pretty=true in pg_get_triggerdef */
8810 0 : appendPQExpBuffer(query,
8811 : "SELECT t.tgrelid, t.tgname, "
8812 : "pg_catalog.pg_get_triggerdef(t.oid, false) AS tgdef, "
8813 : "t.tgenabled, false as tgispartition, "
8814 : "t.tableoid, t.oid "
8815 : "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
8816 : "JOIN pg_catalog.pg_trigger t ON (src.tbloid = t.tgrelid) "
8817 : "WHERE NOT tgisinternal "
8818 : "ORDER BY t.tgrelid, t.tgname",
8819 : tbloids->data);
8820 : }
8821 :
8822 376 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
8823 :
8824 376 : ntups = PQntuples(res);
8825 :
8826 376 : i_tableoid = PQfnumber(res, "tableoid");
8827 376 : i_oid = PQfnumber(res, "oid");
8828 376 : i_tgrelid = PQfnumber(res, "tgrelid");
8829 376 : i_tgname = PQfnumber(res, "tgname");
8830 376 : i_tgenabled = PQfnumber(res, "tgenabled");
8831 376 : i_tgispartition = PQfnumber(res, "tgispartition");
8832 376 : i_tgdef = PQfnumber(res, "tgdef");
8833 :
8834 376 : tginfo = (TriggerInfo *) pg_malloc(ntups * sizeof(TriggerInfo));
8835 :
8836 : /*
8837 : * Outer loop iterates once per table, not once per row. Incrementing of
8838 : * j is handled by the inner loop.
8839 : */
8840 376 : curtblindx = -1;
8841 988 : for (int j = 0; j < ntups;)
8842 : {
8843 612 : Oid tgrelid = atooid(PQgetvalue(res, j, i_tgrelid));
8844 612 : TableInfo *tbinfo = NULL;
8845 : int numtrigs;
8846 :
8847 : /* Count rows for this table */
8848 1046 : for (numtrigs = 1; numtrigs < ntups - j; numtrigs++)
8849 944 : if (atooid(PQgetvalue(res, j + numtrigs, i_tgrelid)) != tgrelid)
8850 510 : break;
8851 :
8852 : /*
8853 : * Locate the associated TableInfo; we rely on tblinfo[] being in OID
8854 : * order.
8855 : */
8856 32090 : while (++curtblindx < numTables)
8857 : {
8858 32090 : tbinfo = &tblinfo[curtblindx];
8859 32090 : if (tbinfo->dobj.catId.oid == tgrelid)
8860 612 : break;
8861 : }
8862 612 : if (curtblindx >= numTables)
8863 0 : pg_fatal("unrecognized table OID %u", tgrelid);
8864 :
8865 : /* Save data for this table */
8866 612 : tbinfo->triggers = tginfo + j;
8867 612 : tbinfo->numTriggers = numtrigs;
8868 :
8869 1658 : for (int c = 0; c < numtrigs; c++, j++)
8870 : {
8871 1046 : tginfo[j].dobj.objType = DO_TRIGGER;
8872 1046 : tginfo[j].dobj.catId.tableoid = atooid(PQgetvalue(res, j, i_tableoid));
8873 1046 : tginfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
8874 1046 : AssignDumpId(&tginfo[j].dobj);
8875 1046 : tginfo[j].dobj.name = pg_strdup(PQgetvalue(res, j, i_tgname));
8876 1046 : tginfo[j].dobj.namespace = tbinfo->dobj.namespace;
8877 1046 : tginfo[j].tgtable = tbinfo;
8878 1046 : tginfo[j].tgenabled = *(PQgetvalue(res, j, i_tgenabled));
8879 1046 : tginfo[j].tgispartition = *(PQgetvalue(res, j, i_tgispartition)) == 't';
8880 1046 : tginfo[j].tgdef = pg_strdup(PQgetvalue(res, j, i_tgdef));
8881 : }
8882 : }
8883 :
8884 376 : PQclear(res);
8885 :
8886 376 : destroyPQExpBuffer(query);
8887 376 : destroyPQExpBuffer(tbloids);
8888 376 : }
8889 :
8890 : /*
8891 : * getEventTriggers
8892 : * get information about event triggers
8893 : */
8894 : void
8895 376 : getEventTriggers(Archive *fout)
8896 : {
8897 : int i;
8898 : PQExpBuffer query;
8899 : PGresult *res;
8900 : EventTriggerInfo *evtinfo;
8901 : int i_tableoid,
8902 : i_oid,
8903 : i_evtname,
8904 : i_evtevent,
8905 : i_evtowner,
8906 : i_evttags,
8907 : i_evtfname,
8908 : i_evtenabled;
8909 : int ntups;
8910 :
8911 : /* Before 9.3, there are no event triggers */
8912 376 : if (fout->remoteVersion < 90300)
8913 0 : return;
8914 :
8915 376 : query = createPQExpBuffer();
8916 :
8917 376 : appendPQExpBufferStr(query,
8918 : "SELECT e.tableoid, e.oid, evtname, evtenabled, "
8919 : "evtevent, evtowner, "
8920 : "array_to_string(array("
8921 : "select quote_literal(x) "
8922 : " from unnest(evttags) as t(x)), ', ') as evttags, "
8923 : "e.evtfoid::regproc as evtfname "
8924 : "FROM pg_event_trigger e "
8925 : "ORDER BY e.oid");
8926 :
8927 376 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
8928 :
8929 376 : ntups = PQntuples(res);
8930 :
8931 376 : evtinfo = (EventTriggerInfo *) pg_malloc(ntups * sizeof(EventTriggerInfo));
8932 :
8933 376 : i_tableoid = PQfnumber(res, "tableoid");
8934 376 : i_oid = PQfnumber(res, "oid");
8935 376 : i_evtname = PQfnumber(res, "evtname");
8936 376 : i_evtevent = PQfnumber(res, "evtevent");
8937 376 : i_evtowner = PQfnumber(res, "evtowner");
8938 376 : i_evttags = PQfnumber(res, "evttags");
8939 376 : i_evtfname = PQfnumber(res, "evtfname");
8940 376 : i_evtenabled = PQfnumber(res, "evtenabled");
8941 :
8942 480 : for (i = 0; i < ntups; i++)
8943 : {
8944 104 : evtinfo[i].dobj.objType = DO_EVENT_TRIGGER;
8945 104 : evtinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
8946 104 : evtinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
8947 104 : AssignDumpId(&evtinfo[i].dobj);
8948 104 : evtinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_evtname));
8949 104 : evtinfo[i].evtname = pg_strdup(PQgetvalue(res, i, i_evtname));
8950 104 : evtinfo[i].evtevent = pg_strdup(PQgetvalue(res, i, i_evtevent));
8951 104 : evtinfo[i].evtowner = getRoleName(PQgetvalue(res, i, i_evtowner));
8952 104 : evtinfo[i].evttags = pg_strdup(PQgetvalue(res, i, i_evttags));
8953 104 : evtinfo[i].evtfname = pg_strdup(PQgetvalue(res, i, i_evtfname));
8954 104 : evtinfo[i].evtenabled = *(PQgetvalue(res, i, i_evtenabled));
8955 :
8956 : /* Decide whether we want to dump it */
8957 104 : selectDumpableObject(&(evtinfo[i].dobj), fout);
8958 : }
8959 :
8960 376 : PQclear(res);
8961 :
8962 376 : destroyPQExpBuffer(query);
8963 : }
8964 :
8965 : /*
8966 : * getProcLangs
8967 : * get basic information about every procedural language in the system
8968 : *
8969 : * NB: this must run after getFuncs() because we assume we can do
8970 : * findFuncByOid().
8971 : */
8972 : void
8973 376 : getProcLangs(Archive *fout)
8974 : {
8975 : PGresult *res;
8976 : int ntups;
8977 : int i;
8978 376 : PQExpBuffer query = createPQExpBuffer();
8979 : ProcLangInfo *planginfo;
8980 : int i_tableoid;
8981 : int i_oid;
8982 : int i_lanname;
8983 : int i_lanpltrusted;
8984 : int i_lanplcallfoid;
8985 : int i_laninline;
8986 : int i_lanvalidator;
8987 : int i_lanacl;
8988 : int i_acldefault;
8989 : int i_lanowner;
8990 :
8991 376 : appendPQExpBufferStr(query, "SELECT tableoid, oid, "
8992 : "lanname, lanpltrusted, lanplcallfoid, "
8993 : "laninline, lanvalidator, "
8994 : "lanacl, "
8995 : "acldefault('l', lanowner) AS acldefault, "
8996 : "lanowner "
8997 : "FROM pg_language "
8998 : "WHERE lanispl "
8999 : "ORDER BY oid");
9000 :
9001 376 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
9002 :
9003 376 : ntups = PQntuples(res);
9004 :
9005 376 : planginfo = (ProcLangInfo *) pg_malloc(ntups * sizeof(ProcLangInfo));
9006 :
9007 376 : i_tableoid = PQfnumber(res, "tableoid");
9008 376 : i_oid = PQfnumber(res, "oid");
9009 376 : i_lanname = PQfnumber(res, "lanname");
9010 376 : i_lanpltrusted = PQfnumber(res, "lanpltrusted");
9011 376 : i_lanplcallfoid = PQfnumber(res, "lanplcallfoid");
9012 376 : i_laninline = PQfnumber(res, "laninline");
9013 376 : i_lanvalidator = PQfnumber(res, "lanvalidator");
9014 376 : i_lanacl = PQfnumber(res, "lanacl");
9015 376 : i_acldefault = PQfnumber(res, "acldefault");
9016 376 : i_lanowner = PQfnumber(res, "lanowner");
9017 :
9018 842 : for (i = 0; i < ntups; i++)
9019 : {
9020 466 : planginfo[i].dobj.objType = DO_PROCLANG;
9021 466 : planginfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
9022 466 : planginfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
9023 466 : AssignDumpId(&planginfo[i].dobj);
9024 :
9025 466 : planginfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_lanname));
9026 466 : planginfo[i].dacl.acl = pg_strdup(PQgetvalue(res, i, i_lanacl));
9027 466 : planginfo[i].dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault));
9028 466 : planginfo[i].dacl.privtype = 0;
9029 466 : planginfo[i].dacl.initprivs = NULL;
9030 466 : planginfo[i].lanpltrusted = *(PQgetvalue(res, i, i_lanpltrusted)) == 't';
9031 466 : planginfo[i].lanplcallfoid = atooid(PQgetvalue(res, i, i_lanplcallfoid));
9032 466 : planginfo[i].laninline = atooid(PQgetvalue(res, i, i_laninline));
9033 466 : planginfo[i].lanvalidator = atooid(PQgetvalue(res, i, i_lanvalidator));
9034 466 : planginfo[i].lanowner = getRoleName(PQgetvalue(res, i, i_lanowner));
9035 :
9036 : /* Decide whether we want to dump it */
9037 466 : selectDumpableProcLang(&(planginfo[i]), fout);
9038 :
9039 : /* Mark whether language has an ACL */
9040 466 : if (!PQgetisnull(res, i, i_lanacl))
9041 90 : planginfo[i].dobj.components |= DUMP_COMPONENT_ACL;
9042 : }
9043 :
9044 376 : PQclear(res);
9045 :
9046 376 : destroyPQExpBuffer(query);
9047 376 : }
9048 :
9049 : /*
9050 : * getCasts
9051 : * get basic information about most casts in the system
9052 : *
9053 : * Skip casts from a range to its multirange, since we'll create those
9054 : * automatically.
9055 : */
9056 : void
9057 376 : getCasts(Archive *fout)
9058 : {
9059 : PGresult *res;
9060 : int ntups;
9061 : int i;
9062 376 : PQExpBuffer query = createPQExpBuffer();
9063 : CastInfo *castinfo;
9064 : int i_tableoid;
9065 : int i_oid;
9066 : int i_castsource;
9067 : int i_casttarget;
9068 : int i_castfunc;
9069 : int i_castcontext;
9070 : int i_castmethod;
9071 :
9072 376 : if (fout->remoteVersion >= 140000)
9073 : {
9074 376 : appendPQExpBufferStr(query, "SELECT tableoid, oid, "
9075 : "castsource, casttarget, castfunc, castcontext, "
9076 : "castmethod "
9077 : "FROM pg_cast c "
9078 : "WHERE NOT EXISTS ( "
9079 : "SELECT 1 FROM pg_range r "
9080 : "WHERE c.castsource = r.rngtypid "
9081 : "AND c.casttarget = r.rngmultitypid "
9082 : ") "
9083 : "ORDER BY 3,4");
9084 : }
9085 : else
9086 : {
9087 0 : appendPQExpBufferStr(query, "SELECT tableoid, oid, "
9088 : "castsource, casttarget, castfunc, castcontext, "
9089 : "castmethod "
9090 : "FROM pg_cast ORDER BY 3,4");
9091 : }
9092 :
9093 376 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
9094 :
9095 376 : ntups = PQntuples(res);
9096 :
9097 376 : castinfo = (CastInfo *) pg_malloc(ntups * sizeof(CastInfo));
9098 :
9099 376 : i_tableoid = PQfnumber(res, "tableoid");
9100 376 : i_oid = PQfnumber(res, "oid");
9101 376 : i_castsource = PQfnumber(res, "castsource");
9102 376 : i_casttarget = PQfnumber(res, "casttarget");
9103 376 : i_castfunc = PQfnumber(res, "castfunc");
9104 376 : i_castcontext = PQfnumber(res, "castcontext");
9105 376 : i_castmethod = PQfnumber(res, "castmethod");
9106 :
9107 91166 : for (i = 0; i < ntups; i++)
9108 : {
9109 : PQExpBufferData namebuf;
9110 : TypeInfo *sTypeInfo;
9111 : TypeInfo *tTypeInfo;
9112 :
9113 90790 : castinfo[i].dobj.objType = DO_CAST;
9114 90790 : castinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
9115 90790 : castinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
9116 90790 : AssignDumpId(&castinfo[i].dobj);
9117 90790 : castinfo[i].castsource = atooid(PQgetvalue(res, i, i_castsource));
9118 90790 : castinfo[i].casttarget = atooid(PQgetvalue(res, i, i_casttarget));
9119 90790 : castinfo[i].castfunc = atooid(PQgetvalue(res, i, i_castfunc));
9120 90790 : castinfo[i].castcontext = *(PQgetvalue(res, i, i_castcontext));
9121 90790 : castinfo[i].castmethod = *(PQgetvalue(res, i, i_castmethod));
9122 :
9123 : /*
9124 : * Try to name cast as concatenation of typnames. This is only used
9125 : * for purposes of sorting. If we fail to find either type, the name
9126 : * will be an empty string.
9127 : */
9128 90790 : initPQExpBuffer(&namebuf);
9129 90790 : sTypeInfo = findTypeByOid(castinfo[i].castsource);
9130 90790 : tTypeInfo = findTypeByOid(castinfo[i].casttarget);
9131 90790 : if (sTypeInfo && tTypeInfo)
9132 90790 : appendPQExpBuffer(&namebuf, "%s %s",
9133 : sTypeInfo->dobj.name, tTypeInfo->dobj.name);
9134 90790 : castinfo[i].dobj.name = namebuf.data;
9135 :
9136 : /* Decide whether we want to dump it */
9137 90790 : selectDumpableCast(&(castinfo[i]), fout);
9138 : }
9139 :
9140 376 : PQclear(res);
9141 :
9142 376 : destroyPQExpBuffer(query);
9143 376 : }
9144 :
9145 : static char *
9146 176 : get_language_name(Archive *fout, Oid langid)
9147 : {
9148 : PQExpBuffer query;
9149 : PGresult *res;
9150 : char *lanname;
9151 :
9152 176 : query = createPQExpBuffer();
9153 176 : appendPQExpBuffer(query, "SELECT lanname FROM pg_language WHERE oid = %u", langid);
9154 176 : res = ExecuteSqlQueryForSingleRow(fout, query->data);
9155 176 : lanname = pg_strdup(fmtId(PQgetvalue(res, 0, 0)));
9156 176 : destroyPQExpBuffer(query);
9157 176 : PQclear(res);
9158 :
9159 176 : return lanname;
9160 : }
9161 :
9162 : /*
9163 : * getTransforms
9164 : * get basic information about every transform in the system
9165 : */
9166 : void
9167 376 : getTransforms(Archive *fout)
9168 : {
9169 : PGresult *res;
9170 : int ntups;
9171 : int i;
9172 : PQExpBuffer query;
9173 : TransformInfo *transforminfo;
9174 : int i_tableoid;
9175 : int i_oid;
9176 : int i_trftype;
9177 : int i_trflang;
9178 : int i_trffromsql;
9179 : int i_trftosql;
9180 :
9181 : /* Transforms didn't exist pre-9.5 */
9182 376 : if (fout->remoteVersion < 90500)
9183 0 : return;
9184 :
9185 376 : query = createPQExpBuffer();
9186 :
9187 376 : appendPQExpBufferStr(query, "SELECT tableoid, oid, "
9188 : "trftype, trflang, trffromsql::oid, trftosql::oid "
9189 : "FROM pg_transform "
9190 : "ORDER BY 3,4");
9191 :
9192 376 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
9193 :
9194 376 : ntups = PQntuples(res);
9195 :
9196 376 : transforminfo = (TransformInfo *) pg_malloc(ntups * sizeof(TransformInfo));
9197 :
9198 376 : i_tableoid = PQfnumber(res, "tableoid");
9199 376 : i_oid = PQfnumber(res, "oid");
9200 376 : i_trftype = PQfnumber(res, "trftype");
9201 376 : i_trflang = PQfnumber(res, "trflang");
9202 376 : i_trffromsql = PQfnumber(res, "trffromsql");
9203 376 : i_trftosql = PQfnumber(res, "trftosql");
9204 :
9205 480 : for (i = 0; i < ntups; i++)
9206 : {
9207 : PQExpBufferData namebuf;
9208 : TypeInfo *typeInfo;
9209 : char *lanname;
9210 :
9211 104 : transforminfo[i].dobj.objType = DO_TRANSFORM;
9212 104 : transforminfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
9213 104 : transforminfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
9214 104 : AssignDumpId(&transforminfo[i].dobj);
9215 104 : transforminfo[i].trftype = atooid(PQgetvalue(res, i, i_trftype));
9216 104 : transforminfo[i].trflang = atooid(PQgetvalue(res, i, i_trflang));
9217 104 : transforminfo[i].trffromsql = atooid(PQgetvalue(res, i, i_trffromsql));
9218 104 : transforminfo[i].trftosql = atooid(PQgetvalue(res, i, i_trftosql));
9219 :
9220 : /*
9221 : * Try to name transform as concatenation of type and language name.
9222 : * This is only used for purposes of sorting. If we fail to find
9223 : * either, the name will be an empty string.
9224 : */
9225 104 : initPQExpBuffer(&namebuf);
9226 104 : typeInfo = findTypeByOid(transforminfo[i].trftype);
9227 104 : lanname = get_language_name(fout, transforminfo[i].trflang);
9228 104 : if (typeInfo && lanname)
9229 104 : appendPQExpBuffer(&namebuf, "%s %s",
9230 : typeInfo->dobj.name, lanname);
9231 104 : transforminfo[i].dobj.name = namebuf.data;
9232 104 : free(lanname);
9233 :
9234 : /* Decide whether we want to dump it */
9235 104 : selectDumpableObject(&(transforminfo[i].dobj), fout);
9236 : }
9237 :
9238 376 : PQclear(res);
9239 :
9240 376 : destroyPQExpBuffer(query);
9241 : }
9242 :
9243 : /*
9244 : * getTableAttrs -
9245 : * for each interesting table, read info about its attributes
9246 : * (names, types, default values, CHECK constraints, etc)
9247 : *
9248 : * modifies tblinfo
9249 : */
9250 : void
9251 376 : getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
9252 : {
9253 376 : DumpOptions *dopt = fout->dopt;
9254 376 : PQExpBuffer q = createPQExpBuffer();
9255 376 : PQExpBuffer tbloids = createPQExpBuffer();
9256 376 : PQExpBuffer checkoids = createPQExpBuffer();
9257 376 : PQExpBuffer invalidnotnulloids = NULL;
9258 : PGresult *res;
9259 : int ntups;
9260 : int curtblindx;
9261 : int i_attrelid;
9262 : int i_attnum;
9263 : int i_attname;
9264 : int i_atttypname;
9265 : int i_attstattarget;
9266 : int i_attstorage;
9267 : int i_typstorage;
9268 : int i_attidentity;
9269 : int i_attgenerated;
9270 : int i_attisdropped;
9271 : int i_attlen;
9272 : int i_attalign;
9273 : int i_attislocal;
9274 : int i_notnull_name;
9275 : int i_notnull_comment;
9276 : int i_notnull_noinherit;
9277 : int i_notnull_islocal;
9278 : int i_notnull_invalidoid;
9279 : int i_attoptions;
9280 : int i_attcollation;
9281 : int i_attcompression;
9282 : int i_attfdwoptions;
9283 : int i_attmissingval;
9284 : int i_atthasdef;
9285 :
9286 : /*
9287 : * We want to perform just one query against pg_attribute, and then just
9288 : * one against pg_attrdef (for DEFAULTs) and two against pg_constraint
9289 : * (for CHECK constraints and for NOT NULL constraints). However, we
9290 : * mustn't try to select every row of those catalogs and then sort it out
9291 : * on the client side, because some of the server-side functions we need
9292 : * would be unsafe to apply to tables we don't have lock on. Hence, we
9293 : * build an array of the OIDs of tables we care about (and now have lock
9294 : * on!), and use a WHERE clause to constrain which rows are selected.
9295 : */
9296 376 : appendPQExpBufferChar(tbloids, '{');
9297 376 : appendPQExpBufferChar(checkoids, '{');
9298 99348 : for (int i = 0; i < numTables; i++)
9299 : {
9300 98972 : TableInfo *tbinfo = &tblinfo[i];
9301 :
9302 : /* Don't bother to collect info for sequences */
9303 98972 : if (tbinfo->relkind == RELKIND_SEQUENCE)
9304 1276 : continue;
9305 :
9306 : /*
9307 : * Don't bother with uninteresting tables, either. For binary
9308 : * upgrades, this is bypassed for pg_largeobject_metadata and
9309 : * pg_shdepend so that the columns names are collected for the
9310 : * corresponding COPY commands. Restoring the data for those catalogs
9311 : * is faster than restoring the equivalent set of large object
9312 : * commands. We can only do this for upgrades from v12 and newer; in
9313 : * older versions, pg_largeobject_metadata was created WITH OIDS, so
9314 : * the OID column is hidden and won't be dumped.
9315 : */
9316 97696 : if (!tbinfo->interesting &&
9317 84470 : !(fout->dopt->binary_upgrade && fout->remoteVersion >= 120000 &&
9318 16376 : (tbinfo->dobj.catId.oid == LargeObjectMetadataRelationId ||
9319 16300 : tbinfo->dobj.catId.oid == SharedDependRelationId)))
9320 84318 : continue;
9321 :
9322 : /* OK, we need info for this table */
9323 13378 : if (tbloids->len > 1) /* do we have more than the '{'? */
9324 13090 : appendPQExpBufferChar(tbloids, ',');
9325 13378 : appendPQExpBuffer(tbloids, "%u", tbinfo->dobj.catId.oid);
9326 :
9327 13378 : if (tbinfo->ncheck > 0)
9328 : {
9329 : /* Also make a list of the ones with check constraints */
9330 1056 : if (checkoids->len > 1) /* do we have more than the '{'? */
9331 918 : appendPQExpBufferChar(checkoids, ',');
9332 1056 : appendPQExpBuffer(checkoids, "%u", tbinfo->dobj.catId.oid);
9333 : }
9334 : }
9335 376 : appendPQExpBufferChar(tbloids, '}');
9336 376 : appendPQExpBufferChar(checkoids, '}');
9337 :
9338 : /*
9339 : * Find all the user attributes and their types.
9340 : *
9341 : * Since we only want to dump COLLATE clauses for attributes whose
9342 : * collation is different from their type's default, we use a CASE here to
9343 : * suppress uninteresting attcollations cheaply.
9344 : */
9345 376 : appendPQExpBufferStr(q,
9346 : "SELECT\n"
9347 : "a.attrelid,\n"
9348 : "a.attnum,\n"
9349 : "a.attname,\n"
9350 : "a.attstattarget,\n"
9351 : "a.attstorage,\n"
9352 : "t.typstorage,\n"
9353 : "a.atthasdef,\n"
9354 : "a.attisdropped,\n"
9355 : "a.attlen,\n"
9356 : "a.attalign,\n"
9357 : "a.attislocal,\n"
9358 : "pg_catalog.format_type(t.oid, a.atttypmod) AS atttypname,\n"
9359 : "array_to_string(a.attoptions, ', ') AS attoptions,\n"
9360 : "CASE WHEN a.attcollation <> t.typcollation "
9361 : "THEN a.attcollation ELSE 0 END AS attcollation,\n"
9362 : "pg_catalog.array_to_string(ARRAY("
9363 : "SELECT pg_catalog.quote_ident(option_name) || "
9364 : "' ' || pg_catalog.quote_literal(option_value) "
9365 : "FROM pg_catalog.pg_options_to_table(attfdwoptions) "
9366 : "ORDER BY option_name"
9367 : "), E',\n ') AS attfdwoptions,\n");
9368 :
9369 : /*
9370 : * Find out any NOT NULL markings for each column. In 18 and up we read
9371 : * pg_constraint to obtain the constraint name, and for valid constraints
9372 : * also pg_description to obtain its comment. notnull_noinherit is set
9373 : * according to the NO INHERIT property. For versions prior to 18, we
9374 : * store an empty string as the name when a constraint is marked as
9375 : * attnotnull (this cues dumpTableSchema to print the NOT NULL clause
9376 : * without a name); also, such cases are never NO INHERIT.
9377 : *
9378 : * For invalid constraints, we need to store their OIDs for processing
9379 : * elsewhere, so we bring the pg_constraint.oid value when the constraint
9380 : * is invalid, and NULL otherwise. Their comments are handled not here
9381 : * but by collectComments, because they're their own dumpable object.
9382 : *
9383 : * We track in notnull_islocal whether the constraint was defined directly
9384 : * in this table or via an ancestor, for binary upgrade. flagInhAttrs
9385 : * might modify this later.
9386 : */
9387 376 : if (fout->remoteVersion >= 180000)
9388 376 : appendPQExpBufferStr(q,
9389 : "co.conname AS notnull_name,\n"
9390 : "CASE WHEN co.convalidated THEN pt.description"
9391 : " ELSE NULL END AS notnull_comment,\n"
9392 : "CASE WHEN NOT co.convalidated THEN co.oid "
9393 : "ELSE NULL END AS notnull_invalidoid,\n"
9394 : "co.connoinherit AS notnull_noinherit,\n"
9395 : "co.conislocal AS notnull_islocal,\n");
9396 : else
9397 0 : appendPQExpBufferStr(q,
9398 : "CASE WHEN a.attnotnull THEN '' ELSE NULL END AS notnull_name,\n"
9399 : "NULL AS notnull_comment,\n"
9400 : "NULL AS notnull_invalidoid,\n"
9401 : "false AS notnull_noinherit,\n"
9402 : "CASE WHEN a.attislocal THEN true\n"
9403 : " WHEN a.attnotnull AND NOT a.attislocal THEN true\n"
9404 : " ELSE false\n"
9405 : "END AS notnull_islocal,\n");
9406 :
9407 376 : if (fout->remoteVersion >= 140000)
9408 376 : appendPQExpBufferStr(q,
9409 : "a.attcompression AS attcompression,\n");
9410 : else
9411 0 : appendPQExpBufferStr(q,
9412 : "'' AS attcompression,\n");
9413 :
9414 376 : if (fout->remoteVersion >= 100000)
9415 376 : appendPQExpBufferStr(q,
9416 : "a.attidentity,\n");
9417 : else
9418 0 : appendPQExpBufferStr(q,
9419 : "'' AS attidentity,\n");
9420 :
9421 376 : if (fout->remoteVersion >= 110000)
9422 376 : appendPQExpBufferStr(q,
9423 : "CASE WHEN a.atthasmissing AND NOT a.attisdropped "
9424 : "THEN a.attmissingval ELSE null END AS attmissingval,\n");
9425 : else
9426 0 : appendPQExpBufferStr(q,
9427 : "NULL AS attmissingval,\n");
9428 :
9429 376 : if (fout->remoteVersion >= 120000)
9430 376 : appendPQExpBufferStr(q,
9431 : "a.attgenerated\n");
9432 : else
9433 0 : appendPQExpBufferStr(q,
9434 : "'' AS attgenerated\n");
9435 :
9436 : /* need left join to pg_type to not fail on dropped columns ... */
9437 376 : appendPQExpBuffer(q,
9438 : "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
9439 : "JOIN pg_catalog.pg_attribute a ON (src.tbloid = a.attrelid) "
9440 : "LEFT JOIN pg_catalog.pg_type t "
9441 : "ON (a.atttypid = t.oid)\n",
9442 : tbloids->data);
9443 :
9444 : /*
9445 : * In versions 18 and up, we need pg_constraint for explicit NOT NULL
9446 : * entries and pg_description to get their comments.
9447 : */
9448 376 : if (fout->remoteVersion >= 180000)
9449 376 : appendPQExpBufferStr(q,
9450 : " LEFT JOIN pg_catalog.pg_constraint co ON "
9451 : "(a.attrelid = co.conrelid\n"
9452 : " AND co.contype = 'n' AND "
9453 : "co.conkey = array[a.attnum])\n"
9454 : " LEFT JOIN pg_catalog.pg_description pt ON "
9455 : "(pt.classoid = co.tableoid AND pt.objoid = co.oid)\n");
9456 :
9457 376 : appendPQExpBufferStr(q,
9458 : "WHERE a.attnum > 0::pg_catalog.int2\n"
9459 : "ORDER BY a.attrelid, a.attnum");
9460 :
9461 376 : res = ExecuteSqlQuery(fout, q->data, PGRES_TUPLES_OK);
9462 :
9463 376 : ntups = PQntuples(res);
9464 :
9465 376 : i_attrelid = PQfnumber(res, "attrelid");
9466 376 : i_attnum = PQfnumber(res, "attnum");
9467 376 : i_attname = PQfnumber(res, "attname");
9468 376 : i_atttypname = PQfnumber(res, "atttypname");
9469 376 : i_attstattarget = PQfnumber(res, "attstattarget");
9470 376 : i_attstorage = PQfnumber(res, "attstorage");
9471 376 : i_typstorage = PQfnumber(res, "typstorage");
9472 376 : i_attidentity = PQfnumber(res, "attidentity");
9473 376 : i_attgenerated = PQfnumber(res, "attgenerated");
9474 376 : i_attisdropped = PQfnumber(res, "attisdropped");
9475 376 : i_attlen = PQfnumber(res, "attlen");
9476 376 : i_attalign = PQfnumber(res, "attalign");
9477 376 : i_attislocal = PQfnumber(res, "attislocal");
9478 376 : i_notnull_name = PQfnumber(res, "notnull_name");
9479 376 : i_notnull_comment = PQfnumber(res, "notnull_comment");
9480 376 : i_notnull_invalidoid = PQfnumber(res, "notnull_invalidoid");
9481 376 : i_notnull_noinherit = PQfnumber(res, "notnull_noinherit");
9482 376 : i_notnull_islocal = PQfnumber(res, "notnull_islocal");
9483 376 : i_attoptions = PQfnumber(res, "attoptions");
9484 376 : i_attcollation = PQfnumber(res, "attcollation");
9485 376 : i_attcompression = PQfnumber(res, "attcompression");
9486 376 : i_attfdwoptions = PQfnumber(res, "attfdwoptions");
9487 376 : i_attmissingval = PQfnumber(res, "attmissingval");
9488 376 : i_atthasdef = PQfnumber(res, "atthasdef");
9489 :
9490 : /* Within the next loop, we'll accumulate OIDs of tables with defaults */
9491 376 : resetPQExpBuffer(tbloids);
9492 376 : appendPQExpBufferChar(tbloids, '{');
9493 :
9494 : /*
9495 : * Outer loop iterates once per table, not once per row. Incrementing of
9496 : * r is handled by the inner loop.
9497 : */
9498 376 : curtblindx = -1;
9499 13478 : for (int r = 0; r < ntups;)
9500 : {
9501 13102 : Oid attrelid = atooid(PQgetvalue(res, r, i_attrelid));
9502 13102 : TableInfo *tbinfo = NULL;
9503 : int numatts;
9504 : bool hasdefaults;
9505 :
9506 : /* Count rows for this table */
9507 49554 : for (numatts = 1; numatts < ntups - r; numatts++)
9508 49272 : if (atooid(PQgetvalue(res, r + numatts, i_attrelid)) != attrelid)
9509 12820 : break;
9510 :
9511 : /*
9512 : * Locate the associated TableInfo; we rely on tblinfo[] being in OID
9513 : * order.
9514 : */
9515 68344 : while (++curtblindx < numTables)
9516 : {
9517 68344 : tbinfo = &tblinfo[curtblindx];
9518 68344 : if (tbinfo->dobj.catId.oid == attrelid)
9519 13102 : break;
9520 : }
9521 13102 : if (curtblindx >= numTables)
9522 0 : pg_fatal("unrecognized table OID %u", attrelid);
9523 : /* cross-check that we only got requested tables */
9524 13102 : if (tbinfo->relkind == RELKIND_SEQUENCE ||
9525 13102 : (!tbinfo->interesting &&
9526 152 : !(fout->dopt->binary_upgrade && fout->remoteVersion >= 120000 &&
9527 152 : (tbinfo->dobj.catId.oid == LargeObjectMetadataRelationId ||
9528 76 : tbinfo->dobj.catId.oid == SharedDependRelationId))))
9529 0 : pg_fatal("unexpected column data for table \"%s\"",
9530 : tbinfo->dobj.name);
9531 :
9532 : /* Save data for this table */
9533 13102 : tbinfo->numatts = numatts;
9534 13102 : tbinfo->attnames = (char **) pg_malloc(numatts * sizeof(char *));
9535 13102 : tbinfo->atttypnames = (char **) pg_malloc(numatts * sizeof(char *));
9536 13102 : tbinfo->attstattarget = (int *) pg_malloc(numatts * sizeof(int));
9537 13102 : tbinfo->attstorage = (char *) pg_malloc(numatts * sizeof(char));
9538 13102 : tbinfo->typstorage = (char *) pg_malloc(numatts * sizeof(char));
9539 13102 : tbinfo->attidentity = (char *) pg_malloc(numatts * sizeof(char));
9540 13102 : tbinfo->attgenerated = (char *) pg_malloc(numatts * sizeof(char));
9541 13102 : tbinfo->attisdropped = (bool *) pg_malloc(numatts * sizeof(bool));
9542 13102 : tbinfo->attlen = (int *) pg_malloc(numatts * sizeof(int));
9543 13102 : tbinfo->attalign = (char *) pg_malloc(numatts * sizeof(char));
9544 13102 : tbinfo->attislocal = (bool *) pg_malloc(numatts * sizeof(bool));
9545 13102 : tbinfo->attoptions = (char **) pg_malloc(numatts * sizeof(char *));
9546 13102 : tbinfo->attcollation = (Oid *) pg_malloc(numatts * sizeof(Oid));
9547 13102 : tbinfo->attcompression = (char *) pg_malloc(numatts * sizeof(char));
9548 13102 : tbinfo->attfdwoptions = (char **) pg_malloc(numatts * sizeof(char *));
9549 13102 : tbinfo->attmissingval = (char **) pg_malloc(numatts * sizeof(char *));
9550 13102 : tbinfo->notnull_constrs = (char **) pg_malloc(numatts * sizeof(char *));
9551 13102 : tbinfo->notnull_comment = (char **) pg_malloc(numatts * sizeof(char *));
9552 13102 : tbinfo->notnull_invalid = (bool *) pg_malloc(numatts * sizeof(bool));
9553 13102 : tbinfo->notnull_noinh = (bool *) pg_malloc(numatts * sizeof(bool));
9554 13102 : tbinfo->notnull_islocal = (bool *) pg_malloc(numatts * sizeof(bool));
9555 13102 : tbinfo->attrdefs = (AttrDefInfo **) pg_malloc(numatts * sizeof(AttrDefInfo *));
9556 13102 : hasdefaults = false;
9557 :
9558 62656 : for (int j = 0; j < numatts; j++, r++)
9559 : {
9560 49554 : if (j + 1 != atoi(PQgetvalue(res, r, i_attnum)))
9561 0 : pg_fatal("invalid column numbering in table \"%s\"",
9562 : tbinfo->dobj.name);
9563 49554 : tbinfo->attnames[j] = pg_strdup(PQgetvalue(res, r, i_attname));
9564 49554 : tbinfo->atttypnames[j] = pg_strdup(PQgetvalue(res, r, i_atttypname));
9565 49554 : if (PQgetisnull(res, r, i_attstattarget))
9566 49474 : tbinfo->attstattarget[j] = -1;
9567 : else
9568 80 : tbinfo->attstattarget[j] = atoi(PQgetvalue(res, r, i_attstattarget));
9569 49554 : tbinfo->attstorage[j] = *(PQgetvalue(res, r, i_attstorage));
9570 49554 : tbinfo->typstorage[j] = *(PQgetvalue(res, r, i_typstorage));
9571 49554 : tbinfo->attidentity[j] = *(PQgetvalue(res, r, i_attidentity));
9572 49554 : tbinfo->attgenerated[j] = *(PQgetvalue(res, r, i_attgenerated));
9573 49554 : tbinfo->needs_override = tbinfo->needs_override || (tbinfo->attidentity[j] == ATTRIBUTE_IDENTITY_ALWAYS);
9574 49554 : tbinfo->attisdropped[j] = (PQgetvalue(res, r, i_attisdropped)[0] == 't');
9575 49554 : tbinfo->attlen[j] = atoi(PQgetvalue(res, r, i_attlen));
9576 49554 : tbinfo->attalign[j] = *(PQgetvalue(res, r, i_attalign));
9577 49554 : tbinfo->attislocal[j] = (PQgetvalue(res, r, i_attislocal)[0] == 't');
9578 :
9579 : /* Handle not-null constraint name and flags */
9580 49554 : determineNotNullFlags(fout, res, r,
9581 : tbinfo, j,
9582 : i_notnull_name,
9583 : i_notnull_comment,
9584 : i_notnull_invalidoid,
9585 : i_notnull_noinherit,
9586 : i_notnull_islocal,
9587 : &invalidnotnulloids);
9588 :
9589 49554 : tbinfo->notnull_comment[j] = PQgetisnull(res, r, i_notnull_comment) ?
9590 49554 : NULL : pg_strdup(PQgetvalue(res, r, i_notnull_comment));
9591 49554 : tbinfo->attoptions[j] = pg_strdup(PQgetvalue(res, r, i_attoptions));
9592 49554 : tbinfo->attcollation[j] = atooid(PQgetvalue(res, r, i_attcollation));
9593 49554 : tbinfo->attcompression[j] = *(PQgetvalue(res, r, i_attcompression));
9594 49554 : tbinfo->attfdwoptions[j] = pg_strdup(PQgetvalue(res, r, i_attfdwoptions));
9595 49554 : tbinfo->attmissingval[j] = pg_strdup(PQgetvalue(res, r, i_attmissingval));
9596 49554 : tbinfo->attrdefs[j] = NULL; /* fix below */
9597 49554 : if (PQgetvalue(res, r, i_atthasdef)[0] == 't')
9598 2536 : hasdefaults = true;
9599 : }
9600 :
9601 13102 : if (hasdefaults)
9602 : {
9603 : /* Collect OIDs of interesting tables that have defaults */
9604 1904 : if (tbloids->len > 1) /* do we have more than the '{'? */
9605 1768 : appendPQExpBufferChar(tbloids, ',');
9606 1904 : appendPQExpBuffer(tbloids, "%u", tbinfo->dobj.catId.oid);
9607 : }
9608 : }
9609 :
9610 : /* If invalidnotnulloids has any data, finalize it */
9611 376 : if (invalidnotnulloids != NULL)
9612 86 : appendPQExpBufferChar(invalidnotnulloids, '}');
9613 :
9614 376 : PQclear(res);
9615 :
9616 : /*
9617 : * Now get info about column defaults. This is skipped for a data-only
9618 : * dump, as it is only needed for table schemas.
9619 : */
9620 376 : if (dopt->dumpSchema && tbloids->len > 1)
9621 : {
9622 : AttrDefInfo *attrdefs;
9623 : int numDefaults;
9624 120 : TableInfo *tbinfo = NULL;
9625 :
9626 120 : pg_log_info("finding table default expressions");
9627 :
9628 120 : appendPQExpBufferChar(tbloids, '}');
9629 :
9630 120 : printfPQExpBuffer(q, "SELECT a.tableoid, a.oid, adrelid, adnum, "
9631 : "pg_catalog.pg_get_expr(adbin, adrelid) AS adsrc\n"
9632 : "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
9633 : "JOIN pg_catalog.pg_attrdef a ON (src.tbloid = a.adrelid)\n"
9634 : "ORDER BY a.adrelid, a.adnum",
9635 : tbloids->data);
9636 :
9637 120 : res = ExecuteSqlQuery(fout, q->data, PGRES_TUPLES_OK);
9638 :
9639 120 : numDefaults = PQntuples(res);
9640 120 : attrdefs = (AttrDefInfo *) pg_malloc(numDefaults * sizeof(AttrDefInfo));
9641 :
9642 120 : curtblindx = -1;
9643 2460 : for (int j = 0; j < numDefaults; j++)
9644 : {
9645 2340 : Oid adtableoid = atooid(PQgetvalue(res, j, 0));
9646 2340 : Oid adoid = atooid(PQgetvalue(res, j, 1));
9647 2340 : Oid adrelid = atooid(PQgetvalue(res, j, 2));
9648 2340 : int adnum = atoi(PQgetvalue(res, j, 3));
9649 2340 : char *adsrc = PQgetvalue(res, j, 4);
9650 :
9651 : /*
9652 : * Locate the associated TableInfo; we rely on tblinfo[] being in
9653 : * OID order.
9654 : */
9655 2340 : if (tbinfo == NULL || tbinfo->dobj.catId.oid != adrelid)
9656 : {
9657 37456 : while (++curtblindx < numTables)
9658 : {
9659 37456 : tbinfo = &tblinfo[curtblindx];
9660 37456 : if (tbinfo->dobj.catId.oid == adrelid)
9661 1768 : break;
9662 : }
9663 1768 : if (curtblindx >= numTables)
9664 0 : pg_fatal("unrecognized table OID %u", adrelid);
9665 : }
9666 :
9667 2340 : if (adnum <= 0 || adnum > tbinfo->numatts)
9668 0 : pg_fatal("invalid adnum value %d for table \"%s\"",
9669 : adnum, tbinfo->dobj.name);
9670 :
9671 : /*
9672 : * dropped columns shouldn't have defaults, but just in case,
9673 : * ignore 'em
9674 : */
9675 2340 : if (tbinfo->attisdropped[adnum - 1])
9676 0 : continue;
9677 :
9678 2340 : attrdefs[j].dobj.objType = DO_ATTRDEF;
9679 2340 : attrdefs[j].dobj.catId.tableoid = adtableoid;
9680 2340 : attrdefs[j].dobj.catId.oid = adoid;
9681 2340 : AssignDumpId(&attrdefs[j].dobj);
9682 2340 : attrdefs[j].adtable = tbinfo;
9683 2340 : attrdefs[j].adnum = adnum;
9684 2340 : attrdefs[j].adef_expr = pg_strdup(adsrc);
9685 :
9686 2340 : attrdefs[j].dobj.name = pg_strdup(tbinfo->dobj.name);
9687 2340 : attrdefs[j].dobj.namespace = tbinfo->dobj.namespace;
9688 :
9689 2340 : attrdefs[j].dobj.dump = tbinfo->dobj.dump;
9690 :
9691 : /*
9692 : * Figure out whether the default/generation expression should be
9693 : * dumped as part of the main CREATE TABLE (or similar) command or
9694 : * as a separate ALTER TABLE (or similar) command. The preference
9695 : * is to put it into the CREATE command, but in some cases that's
9696 : * not possible.
9697 : */
9698 2340 : if (tbinfo->attgenerated[adnum - 1])
9699 : {
9700 : /*
9701 : * Column generation expressions cannot be dumped separately,
9702 : * because there is no syntax for it. By setting separate to
9703 : * false here we prevent the "default" from being processed as
9704 : * its own dumpable object. Later, flagInhAttrs() will mark
9705 : * it as not to be dumped at all, if possible (that is, if it
9706 : * can be inherited from a parent).
9707 : */
9708 1312 : attrdefs[j].separate = false;
9709 : }
9710 1028 : else if (tbinfo->relkind == RELKIND_VIEW)
9711 : {
9712 : /*
9713 : * Defaults on a VIEW must always be dumped as separate ALTER
9714 : * TABLE commands.
9715 : */
9716 64 : attrdefs[j].separate = true;
9717 : }
9718 964 : else if (!shouldPrintColumn(dopt, tbinfo, adnum - 1))
9719 : {
9720 : /* column will be suppressed, print default separately */
9721 8 : attrdefs[j].separate = true;
9722 : }
9723 : else
9724 : {
9725 956 : attrdefs[j].separate = false;
9726 : }
9727 :
9728 2340 : if (!attrdefs[j].separate)
9729 : {
9730 : /*
9731 : * Mark the default as needing to appear before the table, so
9732 : * that any dependencies it has must be emitted before the
9733 : * CREATE TABLE. If this is not possible, we'll change to
9734 : * "separate" mode while sorting dependencies.
9735 : */
9736 2268 : addObjectDependency(&tbinfo->dobj,
9737 2268 : attrdefs[j].dobj.dumpId);
9738 : }
9739 :
9740 2340 : tbinfo->attrdefs[adnum - 1] = &attrdefs[j];
9741 : }
9742 :
9743 120 : PQclear(res);
9744 : }
9745 :
9746 : /*
9747 : * Get info about NOT NULL NOT VALID constraints. This is skipped for a
9748 : * data-only dump, as it is only needed for table schemas.
9749 : */
9750 376 : if (dopt->dumpSchema && invalidnotnulloids)
9751 : {
9752 : ConstraintInfo *constrs;
9753 : int numConstrs;
9754 : int i_tableoid;
9755 : int i_oid;
9756 : int i_conrelid;
9757 : int i_conname;
9758 : int i_consrc;
9759 : int i_conislocal;
9760 :
9761 74 : pg_log_info("finding invalid not-null constraints");
9762 :
9763 74 : resetPQExpBuffer(q);
9764 74 : appendPQExpBuffer(q,
9765 : "SELECT c.tableoid, c.oid, conrelid, conname, "
9766 : "pg_catalog.pg_get_constraintdef(c.oid) AS consrc, "
9767 : "conislocal, convalidated "
9768 : "FROM unnest('%s'::pg_catalog.oid[]) AS src(conoid)\n"
9769 : "JOIN pg_catalog.pg_constraint c ON (src.conoid = c.oid)\n"
9770 : "ORDER BY c.conrelid, c.conname",
9771 74 : invalidnotnulloids->data);
9772 :
9773 74 : res = ExecuteSqlQuery(fout, q->data, PGRES_TUPLES_OK);
9774 :
9775 74 : numConstrs = PQntuples(res);
9776 74 : constrs = (ConstraintInfo *) pg_malloc(numConstrs * sizeof(ConstraintInfo));
9777 :
9778 74 : i_tableoid = PQfnumber(res, "tableoid");
9779 74 : i_oid = PQfnumber(res, "oid");
9780 74 : i_conrelid = PQfnumber(res, "conrelid");
9781 74 : i_conname = PQfnumber(res, "conname");
9782 74 : i_consrc = PQfnumber(res, "consrc");
9783 74 : i_conislocal = PQfnumber(res, "conislocal");
9784 :
9785 : /* As above, this loop iterates once per table, not once per row */
9786 74 : curtblindx = -1;
9787 208 : for (int j = 0; j < numConstrs;)
9788 : {
9789 134 : Oid conrelid = atooid(PQgetvalue(res, j, i_conrelid));
9790 134 : TableInfo *tbinfo = NULL;
9791 : int numcons;
9792 :
9793 : /* Count rows for this table */
9794 134 : for (numcons = 1; numcons < numConstrs - j; numcons++)
9795 60 : if (atooid(PQgetvalue(res, j + numcons, i_conrelid)) != conrelid)
9796 60 : break;
9797 :
9798 : /*
9799 : * Locate the associated TableInfo; we rely on tblinfo[] being in
9800 : * OID order.
9801 : */
9802 24876 : while (++curtblindx < numTables)
9803 : {
9804 24876 : tbinfo = &tblinfo[curtblindx];
9805 24876 : if (tbinfo->dobj.catId.oid == conrelid)
9806 134 : break;
9807 : }
9808 134 : if (curtblindx >= numTables)
9809 0 : pg_fatal("unrecognized table OID %u", conrelid);
9810 :
9811 268 : for (int c = 0; c < numcons; c++, j++)
9812 : {
9813 134 : constrs[j].dobj.objType = DO_CONSTRAINT;
9814 134 : constrs[j].dobj.catId.tableoid = atooid(PQgetvalue(res, j, i_tableoid));
9815 134 : constrs[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
9816 134 : AssignDumpId(&constrs[j].dobj);
9817 134 : constrs[j].dobj.name = pg_strdup(PQgetvalue(res, j, i_conname));
9818 134 : constrs[j].dobj.namespace = tbinfo->dobj.namespace;
9819 134 : constrs[j].contable = tbinfo;
9820 134 : constrs[j].condomain = NULL;
9821 134 : constrs[j].contype = 'n';
9822 134 : constrs[j].condef = pg_strdup(PQgetvalue(res, j, i_consrc));
9823 134 : constrs[j].confrelid = InvalidOid;
9824 134 : constrs[j].conindex = 0;
9825 134 : constrs[j].condeferrable = false;
9826 134 : constrs[j].condeferred = false;
9827 134 : constrs[j].conislocal = (PQgetvalue(res, j, i_conislocal)[0] == 't');
9828 :
9829 : /*
9830 : * All invalid not-null constraints must be dumped separately,
9831 : * because CREATE TABLE would not create them as invalid, and
9832 : * also because they must be created after potentially
9833 : * violating data has been loaded.
9834 : */
9835 134 : constrs[j].separate = true;
9836 :
9837 134 : constrs[j].dobj.dump = tbinfo->dobj.dump;
9838 : }
9839 : }
9840 74 : PQclear(res);
9841 : }
9842 :
9843 : /*
9844 : * Get info about table CHECK constraints. This is skipped for a
9845 : * data-only dump, as it is only needed for table schemas.
9846 : */
9847 376 : if (dopt->dumpSchema && checkoids->len > 2)
9848 : {
9849 : ConstraintInfo *constrs;
9850 : int numConstrs;
9851 : int i_tableoid;
9852 : int i_oid;
9853 : int i_conrelid;
9854 : int i_conname;
9855 : int i_consrc;
9856 : int i_conislocal;
9857 : int i_convalidated;
9858 :
9859 122 : pg_log_info("finding table check constraints");
9860 :
9861 122 : resetPQExpBuffer(q);
9862 122 : appendPQExpBuffer(q,
9863 : "SELECT c.tableoid, c.oid, conrelid, conname, "
9864 : "pg_catalog.pg_get_constraintdef(c.oid) AS consrc, "
9865 : "conislocal, convalidated "
9866 : "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n"
9867 : "JOIN pg_catalog.pg_constraint c ON (src.tbloid = c.conrelid)\n"
9868 : "WHERE contype = 'c' "
9869 : "ORDER BY c.conrelid, c.conname",
9870 : checkoids->data);
9871 :
9872 122 : res = ExecuteSqlQuery(fout, q->data, PGRES_TUPLES_OK);
9873 :
9874 122 : numConstrs = PQntuples(res);
9875 122 : constrs = (ConstraintInfo *) pg_malloc(numConstrs * sizeof(ConstraintInfo));
9876 :
9877 122 : i_tableoid = PQfnumber(res, "tableoid");
9878 122 : i_oid = PQfnumber(res, "oid");
9879 122 : i_conrelid = PQfnumber(res, "conrelid");
9880 122 : i_conname = PQfnumber(res, "conname");
9881 122 : i_consrc = PQfnumber(res, "consrc");
9882 122 : i_conislocal = PQfnumber(res, "conislocal");
9883 122 : i_convalidated = PQfnumber(res, "convalidated");
9884 :
9885 : /* As above, this loop iterates once per table, not once per row */
9886 122 : curtblindx = -1;
9887 1076 : for (int j = 0; j < numConstrs;)
9888 : {
9889 954 : Oid conrelid = atooid(PQgetvalue(res, j, i_conrelid));
9890 954 : TableInfo *tbinfo = NULL;
9891 : int numcons;
9892 :
9893 : /* Count rows for this table */
9894 1224 : for (numcons = 1; numcons < numConstrs - j; numcons++)
9895 1102 : if (atooid(PQgetvalue(res, j + numcons, i_conrelid)) != conrelid)
9896 832 : break;
9897 :
9898 : /*
9899 : * Locate the associated TableInfo; we rely on tblinfo[] being in
9900 : * OID order.
9901 : */
9902 36074 : while (++curtblindx < numTables)
9903 : {
9904 36074 : tbinfo = &tblinfo[curtblindx];
9905 36074 : if (tbinfo->dobj.catId.oid == conrelid)
9906 954 : break;
9907 : }
9908 954 : if (curtblindx >= numTables)
9909 0 : pg_fatal("unrecognized table OID %u", conrelid);
9910 :
9911 954 : if (numcons != tbinfo->ncheck)
9912 : {
9913 0 : pg_log_error(ngettext("expected %d check constraint on table \"%s\" but found %d",
9914 : "expected %d check constraints on table \"%s\" but found %d",
9915 : tbinfo->ncheck),
9916 : tbinfo->ncheck, tbinfo->dobj.name, numcons);
9917 0 : pg_log_error_hint("The system catalogs might be corrupted.");
9918 0 : exit_nicely(1);
9919 : }
9920 :
9921 954 : tbinfo->checkexprs = constrs + j;
9922 :
9923 2178 : for (int c = 0; c < numcons; c++, j++)
9924 : {
9925 1224 : bool validated = PQgetvalue(res, j, i_convalidated)[0] == 't';
9926 :
9927 1224 : constrs[j].dobj.objType = DO_CONSTRAINT;
9928 1224 : constrs[j].dobj.catId.tableoid = atooid(PQgetvalue(res, j, i_tableoid));
9929 1224 : constrs[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
9930 1224 : AssignDumpId(&constrs[j].dobj);
9931 1224 : constrs[j].dobj.name = pg_strdup(PQgetvalue(res, j, i_conname));
9932 1224 : constrs[j].dobj.namespace = tbinfo->dobj.namespace;
9933 1224 : constrs[j].contable = tbinfo;
9934 1224 : constrs[j].condomain = NULL;
9935 1224 : constrs[j].contype = 'c';
9936 1224 : constrs[j].condef = pg_strdup(PQgetvalue(res, j, i_consrc));
9937 1224 : constrs[j].confrelid = InvalidOid;
9938 1224 : constrs[j].conindex = 0;
9939 1224 : constrs[j].condeferrable = false;
9940 1224 : constrs[j].condeferred = false;
9941 1224 : constrs[j].conislocal = (PQgetvalue(res, j, i_conislocal)[0] == 't');
9942 :
9943 : /*
9944 : * An unvalidated constraint needs to be dumped separately, so
9945 : * that potentially-violating existing data is loaded before
9946 : * the constraint.
9947 : */
9948 1224 : constrs[j].separate = !validated;
9949 :
9950 1224 : constrs[j].dobj.dump = tbinfo->dobj.dump;
9951 :
9952 : /*
9953 : * Mark the constraint as needing to appear before the table
9954 : * --- this is so that any other dependencies of the
9955 : * constraint will be emitted before we try to create the
9956 : * table. If the constraint is to be dumped separately, it
9957 : * will be dumped after data is loaded anyway, so don't do it.
9958 : * (There's an automatic dependency in the opposite direction
9959 : * anyway, so don't need to add one manually here.)
9960 : */
9961 1224 : if (!constrs[j].separate)
9962 1094 : addObjectDependency(&tbinfo->dobj,
9963 1094 : constrs[j].dobj.dumpId);
9964 :
9965 : /*
9966 : * We will detect later whether the constraint must be split
9967 : * out from the table definition.
9968 : */
9969 : }
9970 : }
9971 :
9972 122 : PQclear(res);
9973 : }
9974 :
9975 376 : destroyPQExpBuffer(q);
9976 376 : destroyPQExpBuffer(tbloids);
9977 376 : destroyPQExpBuffer(checkoids);
9978 376 : }
9979 :
9980 : /*
9981 : * Based on the getTableAttrs query's row corresponding to one column, set
9982 : * the name and flags to handle a not-null constraint for that column in
9983 : * the tbinfo struct.
9984 : *
9985 : * Result row 'r' is for tbinfo's attribute 'j'.
9986 : *
9987 : * There are four possibilities:
9988 : * 1) the column has no not-null constraints. In that case, ->notnull_constrs
9989 : * (the constraint name) remains NULL.
9990 : * 2) The column has a constraint with no name (this is the case when
9991 : * constraints come from pre-18 servers). In this case, ->notnull_constrs
9992 : * is set to the empty string; dumpTableSchema will print just "NOT NULL".
9993 : * 3) The column has an invalid not-null constraint. This must be treated
9994 : * as a separate object (because it must be created after the table data
9995 : * is loaded). So we add its OID to invalidnotnulloids for processing
9996 : * elsewhere and do nothing further with it here. We distinguish this
9997 : * case because the "notnull_invalidoid" column has been set to a non-NULL
9998 : * value, which is the constraint OID. Valid constraints have a null OID.
9999 : * 4) The column has a constraint with a known name; in that case
10000 : * notnull_constrs carries that name and dumpTableSchema will print
10001 : * "CONSTRAINT the_name NOT NULL". However, if the name is the default
10002 : * (table_column_not_null) and there's no comment on the constraint,
10003 : * there's no need to print that name in the dump, so notnull_constrs
10004 : * is set to the empty string and it behaves as case 2.
10005 : *
10006 : * In a child table that inherits from a parent already containing NOT NULL
10007 : * constraints and the columns in the child don't have their own NOT NULL
10008 : * declarations, we suppress printing constraints in the child: the
10009 : * constraints are acquired at the point where the child is attached to the
10010 : * parent. This is tracked in ->notnull_islocal; for servers pre-18 this is
10011 : * set not here but in flagInhAttrs. That flag is also used when the
10012 : * constraint was validated in a child but all its parent have it as NOT
10013 : * VALID.
10014 : *
10015 : * Any of these constraints might have the NO INHERIT bit. If so we set
10016 : * ->notnull_noinh and NO INHERIT will be printed by dumpTableSchema.
10017 : *
10018 : * In case 4 above, the name comparison is a bit of a hack; it actually fails
10019 : * to do the right thing in all but the trivial case. However, the downside
10020 : * of getting it wrong is simply that the name is printed rather than
10021 : * suppressed, so it's not a big deal.
10022 : *
10023 : * invalidnotnulloids is expected to be given as NULL; if any invalid not-null
10024 : * constraints are found, it is initialized and filled with the array of
10025 : * OIDs of such constraints, for later processing.
10026 : */
10027 : static void
10028 49554 : determineNotNullFlags(Archive *fout, PGresult *res, int r,
10029 : TableInfo *tbinfo, int j,
10030 : int i_notnull_name,
10031 : int i_notnull_comment,
10032 : int i_notnull_invalidoid,
10033 : int i_notnull_noinherit,
10034 : int i_notnull_islocal,
10035 : PQExpBuffer *invalidnotnulloids)
10036 : {
10037 49554 : DumpOptions *dopt = fout->dopt;
10038 :
10039 : /*
10040 : * If this not-null constraint is not valid, list its OID in
10041 : * invalidnotnulloids and do nothing further. It'll be processed
10042 : * elsewhere later.
10043 : *
10044 : * Because invalid not-null constraints are rare, we don't want to malloc
10045 : * invalidnotnulloids until we're sure we're going it need it, which
10046 : * happens here.
10047 : */
10048 49554 : if (!PQgetisnull(res, r, i_notnull_invalidoid))
10049 : {
10050 146 : char *constroid = PQgetvalue(res, r, i_notnull_invalidoid);
10051 :
10052 146 : if (*invalidnotnulloids == NULL)
10053 : {
10054 86 : *invalidnotnulloids = createPQExpBuffer();
10055 86 : appendPQExpBufferChar(*invalidnotnulloids, '{');
10056 86 : appendPQExpBufferStr(*invalidnotnulloids, constroid);
10057 : }
10058 : else
10059 60 : appendPQExpBuffer(*invalidnotnulloids, ",%s", constroid);
10060 :
10061 : /*
10062 : * Track when a parent constraint is invalid for the cases where a
10063 : * child constraint has been validated independenly.
10064 : */
10065 146 : tbinfo->notnull_invalid[j] = true;
10066 :
10067 : /* nothing else to do */
10068 146 : tbinfo->notnull_constrs[j] = NULL;
10069 146 : return;
10070 : }
10071 :
10072 : /*
10073 : * notnull_noinh is straight from the query result. notnull_islocal also,
10074 : * though flagInhAttrs may change that one later.
10075 : */
10076 49408 : tbinfo->notnull_noinh[j] = PQgetvalue(res, r, i_notnull_noinherit)[0] == 't';
10077 49408 : tbinfo->notnull_islocal[j] = PQgetvalue(res, r, i_notnull_islocal)[0] == 't';
10078 49408 : tbinfo->notnull_invalid[j] = false;
10079 :
10080 : /*
10081 : * Determine a constraint name to use. If the column is not marked not-
10082 : * null, we set NULL which cues ... to do nothing. An empty string says
10083 : * to print an unnamed NOT NULL, and anything else is a constraint name to
10084 : * use.
10085 : */
10086 49408 : if (fout->remoteVersion < 180000)
10087 : {
10088 : /*
10089 : * < 18 doesn't have not-null names, so an unnamed constraint is
10090 : * sufficient.
10091 : */
10092 0 : if (PQgetisnull(res, r, i_notnull_name))
10093 0 : tbinfo->notnull_constrs[j] = NULL;
10094 : else
10095 0 : tbinfo->notnull_constrs[j] = "";
10096 : }
10097 : else
10098 : {
10099 49408 : if (PQgetisnull(res, r, i_notnull_name))
10100 44158 : tbinfo->notnull_constrs[j] = NULL;
10101 : else
10102 : {
10103 : /*
10104 : * In binary upgrade of inheritance child tables, must have a
10105 : * constraint name that we can UPDATE later; same if there's a
10106 : * comment on the constraint.
10107 : */
10108 5250 : if ((dopt->binary_upgrade &&
10109 666 : !tbinfo->ispartition &&
10110 5756 : !tbinfo->notnull_islocal) ||
10111 5250 : !PQgetisnull(res, r, i_notnull_comment))
10112 : {
10113 96 : tbinfo->notnull_constrs[j] =
10114 96 : pstrdup(PQgetvalue(res, r, i_notnull_name));
10115 : }
10116 : else
10117 : {
10118 : char *default_name;
10119 :
10120 : /* XXX should match ChooseConstraintName better */
10121 5154 : default_name = psprintf("%s_%s_not_null", tbinfo->dobj.name,
10122 5154 : tbinfo->attnames[j]);
10123 5154 : if (strcmp(default_name,
10124 5154 : PQgetvalue(res, r, i_notnull_name)) == 0)
10125 3406 : tbinfo->notnull_constrs[j] = "";
10126 : else
10127 : {
10128 1748 : tbinfo->notnull_constrs[j] =
10129 1748 : pstrdup(PQgetvalue(res, r, i_notnull_name));
10130 : }
10131 5154 : free(default_name);
10132 : }
10133 : }
10134 : }
10135 : }
10136 :
10137 : /*
10138 : * Test whether a column should be printed as part of table's CREATE TABLE.
10139 : * Column number is zero-based.
10140 : *
10141 : * Normally this is always true, but it's false for dropped columns, as well
10142 : * as those that were inherited without any local definition. (If we print
10143 : * such a column it will mistakenly get pg_attribute.attislocal set to true.)
10144 : * For partitions, it's always true, because we want the partitions to be
10145 : * created independently and ATTACH PARTITION used afterwards.
10146 : *
10147 : * In binary_upgrade mode, we must print all columns and fix the attislocal/
10148 : * attisdropped state later, so as to keep control of the physical column
10149 : * order.
10150 : *
10151 : * This function exists because there are scattered nonobvious places that
10152 : * must be kept in sync with this decision.
10153 : */
10154 : bool
10155 80380 : shouldPrintColumn(const DumpOptions *dopt, const TableInfo *tbinfo, int colno)
10156 : {
10157 80380 : if (dopt->binary_upgrade)
10158 12464 : return true;
10159 67916 : if (tbinfo->attisdropped[colno])
10160 1452 : return false;
10161 66464 : return (tbinfo->attislocal[colno] || tbinfo->ispartition);
10162 : }
10163 :
10164 :
10165 : /*
10166 : * getTSParsers:
10167 : * get information about all text search parsers in the system catalogs
10168 : */
10169 : void
10170 376 : getTSParsers(Archive *fout)
10171 : {
10172 : PGresult *res;
10173 : int ntups;
10174 : int i;
10175 : PQExpBuffer query;
10176 : TSParserInfo *prsinfo;
10177 : int i_tableoid;
10178 : int i_oid;
10179 : int i_prsname;
10180 : int i_prsnamespace;
10181 : int i_prsstart;
10182 : int i_prstoken;
10183 : int i_prsend;
10184 : int i_prsheadline;
10185 : int i_prslextype;
10186 :
10187 376 : query = createPQExpBuffer();
10188 :
10189 : /*
10190 : * find all text search objects, including builtin ones; we filter out
10191 : * system-defined objects at dump-out time.
10192 : */
10193 :
10194 376 : appendPQExpBufferStr(query, "SELECT tableoid, oid, prsname, prsnamespace, "
10195 : "prsstart::oid, prstoken::oid, "
10196 : "prsend::oid, prsheadline::oid, prslextype::oid "
10197 : "FROM pg_ts_parser");
10198 :
10199 376 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
10200 :
10201 376 : ntups = PQntuples(res);
10202 :
10203 376 : prsinfo = (TSParserInfo *) pg_malloc(ntups * sizeof(TSParserInfo));
10204 :
10205 376 : i_tableoid = PQfnumber(res, "tableoid");
10206 376 : i_oid = PQfnumber(res, "oid");
10207 376 : i_prsname = PQfnumber(res, "prsname");
10208 376 : i_prsnamespace = PQfnumber(res, "prsnamespace");
10209 376 : i_prsstart = PQfnumber(res, "prsstart");
10210 376 : i_prstoken = PQfnumber(res, "prstoken");
10211 376 : i_prsend = PQfnumber(res, "prsend");
10212 376 : i_prsheadline = PQfnumber(res, "prsheadline");
10213 376 : i_prslextype = PQfnumber(res, "prslextype");
10214 :
10215 842 : for (i = 0; i < ntups; i++)
10216 : {
10217 466 : prsinfo[i].dobj.objType = DO_TSPARSER;
10218 466 : prsinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
10219 466 : prsinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
10220 466 : AssignDumpId(&prsinfo[i].dobj);
10221 466 : prsinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_prsname));
10222 932 : prsinfo[i].dobj.namespace =
10223 466 : findNamespace(atooid(PQgetvalue(res, i, i_prsnamespace)));
10224 466 : prsinfo[i].prsstart = atooid(PQgetvalue(res, i, i_prsstart));
10225 466 : prsinfo[i].prstoken = atooid(PQgetvalue(res, i, i_prstoken));
10226 466 : prsinfo[i].prsend = atooid(PQgetvalue(res, i, i_prsend));
10227 466 : prsinfo[i].prsheadline = atooid(PQgetvalue(res, i, i_prsheadline));
10228 466 : prsinfo[i].prslextype = atooid(PQgetvalue(res, i, i_prslextype));
10229 :
10230 : /* Decide whether we want to dump it */
10231 466 : selectDumpableObject(&(prsinfo[i].dobj), fout);
10232 : }
10233 :
10234 376 : PQclear(res);
10235 :
10236 376 : destroyPQExpBuffer(query);
10237 376 : }
10238 :
10239 : /*
10240 : * getTSDictionaries:
10241 : * get information about all text search dictionaries in the system catalogs
10242 : */
10243 : void
10244 376 : getTSDictionaries(Archive *fout)
10245 : {
10246 : PGresult *res;
10247 : int ntups;
10248 : int i;
10249 : PQExpBuffer query;
10250 : TSDictInfo *dictinfo;
10251 : int i_tableoid;
10252 : int i_oid;
10253 : int i_dictname;
10254 : int i_dictnamespace;
10255 : int i_dictowner;
10256 : int i_dicttemplate;
10257 : int i_dictinitoption;
10258 :
10259 376 : query = createPQExpBuffer();
10260 :
10261 376 : appendPQExpBufferStr(query, "SELECT tableoid, oid, dictname, "
10262 : "dictnamespace, dictowner, "
10263 : "dicttemplate, dictinitoption "
10264 : "FROM pg_ts_dict");
10265 :
10266 376 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
10267 :
10268 376 : ntups = PQntuples(res);
10269 :
10270 376 : dictinfo = (TSDictInfo *) pg_malloc(ntups * sizeof(TSDictInfo));
10271 :
10272 376 : i_tableoid = PQfnumber(res, "tableoid");
10273 376 : i_oid = PQfnumber(res, "oid");
10274 376 : i_dictname = PQfnumber(res, "dictname");
10275 376 : i_dictnamespace = PQfnumber(res, "dictnamespace");
10276 376 : i_dictowner = PQfnumber(res, "dictowner");
10277 376 : i_dictinitoption = PQfnumber(res, "dictinitoption");
10278 376 : i_dicttemplate = PQfnumber(res, "dicttemplate");
10279 :
10280 12624 : for (i = 0; i < ntups; i++)
10281 : {
10282 12248 : dictinfo[i].dobj.objType = DO_TSDICT;
10283 12248 : dictinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
10284 12248 : dictinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
10285 12248 : AssignDumpId(&dictinfo[i].dobj);
10286 12248 : dictinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_dictname));
10287 24496 : dictinfo[i].dobj.namespace =
10288 12248 : findNamespace(atooid(PQgetvalue(res, i, i_dictnamespace)));
10289 12248 : dictinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_dictowner));
10290 12248 : dictinfo[i].dicttemplate = atooid(PQgetvalue(res, i, i_dicttemplate));
10291 12248 : if (PQgetisnull(res, i, i_dictinitoption))
10292 466 : dictinfo[i].dictinitoption = NULL;
10293 : else
10294 11782 : dictinfo[i].dictinitoption = pg_strdup(PQgetvalue(res, i, i_dictinitoption));
10295 :
10296 : /* Decide whether we want to dump it */
10297 12248 : selectDumpableObject(&(dictinfo[i].dobj), fout);
10298 : }
10299 :
10300 376 : PQclear(res);
10301 :
10302 376 : destroyPQExpBuffer(query);
10303 376 : }
10304 :
10305 : /*
10306 : * getTSTemplates:
10307 : * get information about all text search templates in the system catalogs
10308 : */
10309 : void
10310 376 : getTSTemplates(Archive *fout)
10311 : {
10312 : PGresult *res;
10313 : int ntups;
10314 : int i;
10315 : PQExpBuffer query;
10316 : TSTemplateInfo *tmplinfo;
10317 : int i_tableoid;
10318 : int i_oid;
10319 : int i_tmplname;
10320 : int i_tmplnamespace;
10321 : int i_tmplinit;
10322 : int i_tmpllexize;
10323 :
10324 376 : query = createPQExpBuffer();
10325 :
10326 376 : appendPQExpBufferStr(query, "SELECT tableoid, oid, tmplname, "
10327 : "tmplnamespace, tmplinit::oid, tmpllexize::oid "
10328 : "FROM pg_ts_template");
10329 :
10330 376 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
10331 :
10332 376 : ntups = PQntuples(res);
10333 :
10334 376 : tmplinfo = (TSTemplateInfo *) pg_malloc(ntups * sizeof(TSTemplateInfo));
10335 :
10336 376 : i_tableoid = PQfnumber(res, "tableoid");
10337 376 : i_oid = PQfnumber(res, "oid");
10338 376 : i_tmplname = PQfnumber(res, "tmplname");
10339 376 : i_tmplnamespace = PQfnumber(res, "tmplnamespace");
10340 376 : i_tmplinit = PQfnumber(res, "tmplinit");
10341 376 : i_tmpllexize = PQfnumber(res, "tmpllexize");
10342 :
10343 2346 : for (i = 0; i < ntups; i++)
10344 : {
10345 1970 : tmplinfo[i].dobj.objType = DO_TSTEMPLATE;
10346 1970 : tmplinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
10347 1970 : tmplinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
10348 1970 : AssignDumpId(&tmplinfo[i].dobj);
10349 1970 : tmplinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_tmplname));
10350 3940 : tmplinfo[i].dobj.namespace =
10351 1970 : findNamespace(atooid(PQgetvalue(res, i, i_tmplnamespace)));
10352 1970 : tmplinfo[i].tmplinit = atooid(PQgetvalue(res, i, i_tmplinit));
10353 1970 : tmplinfo[i].tmpllexize = atooid(PQgetvalue(res, i, i_tmpllexize));
10354 :
10355 : /* Decide whether we want to dump it */
10356 1970 : selectDumpableObject(&(tmplinfo[i].dobj), fout);
10357 : }
10358 :
10359 376 : PQclear(res);
10360 :
10361 376 : destroyPQExpBuffer(query);
10362 376 : }
10363 :
10364 : /*
10365 : * getTSConfigurations:
10366 : * get information about all text search configurations
10367 : */
10368 : void
10369 376 : getTSConfigurations(Archive *fout)
10370 : {
10371 : PGresult *res;
10372 : int ntups;
10373 : int i;
10374 : PQExpBuffer query;
10375 : TSConfigInfo *cfginfo;
10376 : int i_tableoid;
10377 : int i_oid;
10378 : int i_cfgname;
10379 : int i_cfgnamespace;
10380 : int i_cfgowner;
10381 : int i_cfgparser;
10382 :
10383 376 : query = createPQExpBuffer();
10384 :
10385 376 : appendPQExpBufferStr(query, "SELECT tableoid, oid, cfgname, "
10386 : "cfgnamespace, cfgowner, cfgparser "
10387 : "FROM pg_ts_config");
10388 :
10389 376 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
10390 :
10391 376 : ntups = PQntuples(res);
10392 :
10393 376 : cfginfo = (TSConfigInfo *) pg_malloc(ntups * sizeof(TSConfigInfo));
10394 :
10395 376 : i_tableoid = PQfnumber(res, "tableoid");
10396 376 : i_oid = PQfnumber(res, "oid");
10397 376 : i_cfgname = PQfnumber(res, "cfgname");
10398 376 : i_cfgnamespace = PQfnumber(res, "cfgnamespace");
10399 376 : i_cfgowner = PQfnumber(res, "cfgowner");
10400 376 : i_cfgparser = PQfnumber(res, "cfgparser");
10401 :
10402 12554 : for (i = 0; i < ntups; i++)
10403 : {
10404 12178 : cfginfo[i].dobj.objType = DO_TSCONFIG;
10405 12178 : cfginfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
10406 12178 : cfginfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
10407 12178 : AssignDumpId(&cfginfo[i].dobj);
10408 12178 : cfginfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_cfgname));
10409 24356 : cfginfo[i].dobj.namespace =
10410 12178 : findNamespace(atooid(PQgetvalue(res, i, i_cfgnamespace)));
10411 12178 : cfginfo[i].rolname = getRoleName(PQgetvalue(res, i, i_cfgowner));
10412 12178 : cfginfo[i].cfgparser = atooid(PQgetvalue(res, i, i_cfgparser));
10413 :
10414 : /* Decide whether we want to dump it */
10415 12178 : selectDumpableObject(&(cfginfo[i].dobj), fout);
10416 : }
10417 :
10418 376 : PQclear(res);
10419 :
10420 376 : destroyPQExpBuffer(query);
10421 376 : }
10422 :
10423 : /*
10424 : * getForeignDataWrappers:
10425 : * get information about all foreign-data wrappers in the system catalogs
10426 : */
10427 : void
10428 376 : getForeignDataWrappers(Archive *fout)
10429 : {
10430 : PGresult *res;
10431 : int ntups;
10432 : int i;
10433 : PQExpBuffer query;
10434 : FdwInfo *fdwinfo;
10435 : int i_tableoid;
10436 : int i_oid;
10437 : int i_fdwname;
10438 : int i_fdwowner;
10439 : int i_fdwhandler;
10440 : int i_fdwvalidator;
10441 : int i_fdwacl;
10442 : int i_acldefault;
10443 : int i_fdwoptions;
10444 :
10445 376 : query = createPQExpBuffer();
10446 :
10447 376 : appendPQExpBufferStr(query, "SELECT tableoid, oid, fdwname, "
10448 : "fdwowner, "
10449 : "fdwhandler::pg_catalog.regproc, "
10450 : "fdwvalidator::pg_catalog.regproc, "
10451 : "fdwacl, "
10452 : "acldefault('F', fdwowner) AS acldefault, "
10453 : "array_to_string(ARRAY("
10454 : "SELECT quote_ident(option_name) || ' ' || "
10455 : "quote_literal(option_value) "
10456 : "FROM pg_options_to_table(fdwoptions) "
10457 : "ORDER BY option_name"
10458 : "), E',\n ') AS fdwoptions "
10459 : "FROM pg_foreign_data_wrapper");
10460 :
10461 376 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
10462 :
10463 376 : ntups = PQntuples(res);
10464 :
10465 376 : fdwinfo = (FdwInfo *) pg_malloc(ntups * sizeof(FdwInfo));
10466 :
10467 376 : i_tableoid = PQfnumber(res, "tableoid");
10468 376 : i_oid = PQfnumber(res, "oid");
10469 376 : i_fdwname = PQfnumber(res, "fdwname");
10470 376 : i_fdwowner = PQfnumber(res, "fdwowner");
10471 376 : i_fdwhandler = PQfnumber(res, "fdwhandler");
10472 376 : i_fdwvalidator = PQfnumber(res, "fdwvalidator");
10473 376 : i_fdwacl = PQfnumber(res, "fdwacl");
10474 376 : i_acldefault = PQfnumber(res, "acldefault");
10475 376 : i_fdwoptions = PQfnumber(res, "fdwoptions");
10476 :
10477 518 : for (i = 0; i < ntups; i++)
10478 : {
10479 142 : fdwinfo[i].dobj.objType = DO_FDW;
10480 142 : fdwinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
10481 142 : fdwinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
10482 142 : AssignDumpId(&fdwinfo[i].dobj);
10483 142 : fdwinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_fdwname));
10484 142 : fdwinfo[i].dobj.namespace = NULL;
10485 142 : fdwinfo[i].dacl.acl = pg_strdup(PQgetvalue(res, i, i_fdwacl));
10486 142 : fdwinfo[i].dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault));
10487 142 : fdwinfo[i].dacl.privtype = 0;
10488 142 : fdwinfo[i].dacl.initprivs = NULL;
10489 142 : fdwinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_fdwowner));
10490 142 : fdwinfo[i].fdwhandler = pg_strdup(PQgetvalue(res, i, i_fdwhandler));
10491 142 : fdwinfo[i].fdwvalidator = pg_strdup(PQgetvalue(res, i, i_fdwvalidator));
10492 142 : fdwinfo[i].fdwoptions = pg_strdup(PQgetvalue(res, i, i_fdwoptions));
10493 :
10494 : /* Decide whether we want to dump it */
10495 142 : selectDumpableObject(&(fdwinfo[i].dobj), fout);
10496 :
10497 : /* Mark whether FDW has an ACL */
10498 142 : if (!PQgetisnull(res, i, i_fdwacl))
10499 90 : fdwinfo[i].dobj.components |= DUMP_COMPONENT_ACL;
10500 : }
10501 :
10502 376 : PQclear(res);
10503 :
10504 376 : destroyPQExpBuffer(query);
10505 376 : }
10506 :
10507 : /*
10508 : * getForeignServers:
10509 : * get information about all foreign servers in the system catalogs
10510 : */
10511 : void
10512 376 : getForeignServers(Archive *fout)
10513 : {
10514 : PGresult *res;
10515 : int ntups;
10516 : int i;
10517 : PQExpBuffer query;
10518 : ForeignServerInfo *srvinfo;
10519 : int i_tableoid;
10520 : int i_oid;
10521 : int i_srvname;
10522 : int i_srvowner;
10523 : int i_srvfdw;
10524 : int i_srvtype;
10525 : int i_srvversion;
10526 : int i_srvacl;
10527 : int i_acldefault;
10528 : int i_srvoptions;
10529 :
10530 376 : query = createPQExpBuffer();
10531 :
10532 376 : appendPQExpBufferStr(query, "SELECT tableoid, oid, srvname, "
10533 : "srvowner, "
10534 : "srvfdw, srvtype, srvversion, srvacl, "
10535 : "acldefault('S', srvowner) AS acldefault, "
10536 : "array_to_string(ARRAY("
10537 : "SELECT quote_ident(option_name) || ' ' || "
10538 : "quote_literal(option_value) "
10539 : "FROM pg_options_to_table(srvoptions) "
10540 : "ORDER BY option_name"
10541 : "), E',\n ') AS srvoptions "
10542 : "FROM pg_foreign_server");
10543 :
10544 376 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
10545 :
10546 376 : ntups = PQntuples(res);
10547 :
10548 376 : srvinfo = (ForeignServerInfo *) pg_malloc(ntups * sizeof(ForeignServerInfo));
10549 :
10550 376 : i_tableoid = PQfnumber(res, "tableoid");
10551 376 : i_oid = PQfnumber(res, "oid");
10552 376 : i_srvname = PQfnumber(res, "srvname");
10553 376 : i_srvowner = PQfnumber(res, "srvowner");
10554 376 : i_srvfdw = PQfnumber(res, "srvfdw");
10555 376 : i_srvtype = PQfnumber(res, "srvtype");
10556 376 : i_srvversion = PQfnumber(res, "srvversion");
10557 376 : i_srvacl = PQfnumber(res, "srvacl");
10558 376 : i_acldefault = PQfnumber(res, "acldefault");
10559 376 : i_srvoptions = PQfnumber(res, "srvoptions");
10560 :
10561 526 : for (i = 0; i < ntups; i++)
10562 : {
10563 150 : srvinfo[i].dobj.objType = DO_FOREIGN_SERVER;
10564 150 : srvinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
10565 150 : srvinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
10566 150 : AssignDumpId(&srvinfo[i].dobj);
10567 150 : srvinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_srvname));
10568 150 : srvinfo[i].dobj.namespace = NULL;
10569 150 : srvinfo[i].dacl.acl = pg_strdup(PQgetvalue(res, i, i_srvacl));
10570 150 : srvinfo[i].dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault));
10571 150 : srvinfo[i].dacl.privtype = 0;
10572 150 : srvinfo[i].dacl.initprivs = NULL;
10573 150 : srvinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_srvowner));
10574 150 : srvinfo[i].srvfdw = atooid(PQgetvalue(res, i, i_srvfdw));
10575 150 : srvinfo[i].srvtype = pg_strdup(PQgetvalue(res, i, i_srvtype));
10576 150 : srvinfo[i].srvversion = pg_strdup(PQgetvalue(res, i, i_srvversion));
10577 150 : srvinfo[i].srvoptions = pg_strdup(PQgetvalue(res, i, i_srvoptions));
10578 :
10579 : /* Decide whether we want to dump it */
10580 150 : selectDumpableObject(&(srvinfo[i].dobj), fout);
10581 :
10582 : /* Servers have user mappings */
10583 150 : srvinfo[i].dobj.components |= DUMP_COMPONENT_USERMAP;
10584 :
10585 : /* Mark whether server has an ACL */
10586 150 : if (!PQgetisnull(res, i, i_srvacl))
10587 90 : srvinfo[i].dobj.components |= DUMP_COMPONENT_ACL;
10588 : }
10589 :
10590 376 : PQclear(res);
10591 :
10592 376 : destroyPQExpBuffer(query);
10593 376 : }
10594 :
10595 : /*
10596 : * getDefaultACLs:
10597 : * get information about all default ACL information in the system catalogs
10598 : */
10599 : void
10600 376 : getDefaultACLs(Archive *fout)
10601 : {
10602 376 : DumpOptions *dopt = fout->dopt;
10603 : DefaultACLInfo *daclinfo;
10604 : PQExpBuffer query;
10605 : PGresult *res;
10606 : int i_oid;
10607 : int i_tableoid;
10608 : int i_defaclrole;
10609 : int i_defaclnamespace;
10610 : int i_defaclobjtype;
10611 : int i_defaclacl;
10612 : int i_acldefault;
10613 : int i,
10614 : ntups;
10615 :
10616 376 : query = createPQExpBuffer();
10617 :
10618 : /*
10619 : * Global entries (with defaclnamespace=0) replace the hard-wired default
10620 : * ACL for their object type. We should dump them as deltas from the
10621 : * default ACL, since that will be used as a starting point for
10622 : * interpreting the ALTER DEFAULT PRIVILEGES commands. On the other hand,
10623 : * non-global entries can only add privileges not revoke them. We must
10624 : * dump those as-is (i.e., as deltas from an empty ACL).
10625 : *
10626 : * We can use defaclobjtype as the object type for acldefault(), except
10627 : * for the case of 'S' (DEFACLOBJ_SEQUENCE) which must be converted to
10628 : * 's'.
10629 : */
10630 376 : appendPQExpBufferStr(query,
10631 : "SELECT oid, tableoid, "
10632 : "defaclrole, "
10633 : "defaclnamespace, "
10634 : "defaclobjtype, "
10635 : "defaclacl, "
10636 : "CASE WHEN defaclnamespace = 0 THEN "
10637 : "acldefault(CASE WHEN defaclobjtype = 'S' "
10638 : "THEN 's'::\"char\" ELSE defaclobjtype END, "
10639 : "defaclrole) ELSE '{}' END AS acldefault "
10640 : "FROM pg_default_acl");
10641 :
10642 376 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
10643 :
10644 376 : ntups = PQntuples(res);
10645 :
10646 376 : daclinfo = (DefaultACLInfo *) pg_malloc(ntups * sizeof(DefaultACLInfo));
10647 :
10648 376 : i_oid = PQfnumber(res, "oid");
10649 376 : i_tableoid = PQfnumber(res, "tableoid");
10650 376 : i_defaclrole = PQfnumber(res, "defaclrole");
10651 376 : i_defaclnamespace = PQfnumber(res, "defaclnamespace");
10652 376 : i_defaclobjtype = PQfnumber(res, "defaclobjtype");
10653 376 : i_defaclacl = PQfnumber(res, "defaclacl");
10654 376 : i_acldefault = PQfnumber(res, "acldefault");
10655 :
10656 764 : for (i = 0; i < ntups; i++)
10657 : {
10658 388 : Oid nspid = atooid(PQgetvalue(res, i, i_defaclnamespace));
10659 :
10660 388 : daclinfo[i].dobj.objType = DO_DEFAULT_ACL;
10661 388 : daclinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
10662 388 : daclinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
10663 388 : AssignDumpId(&daclinfo[i].dobj);
10664 : /* cheesy ... is it worth coming up with a better object name? */
10665 388 : daclinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_defaclobjtype));
10666 :
10667 388 : if (nspid != InvalidOid)
10668 180 : daclinfo[i].dobj.namespace = findNamespace(nspid);
10669 : else
10670 208 : daclinfo[i].dobj.namespace = NULL;
10671 :
10672 388 : daclinfo[i].dacl.acl = pg_strdup(PQgetvalue(res, i, i_defaclacl));
10673 388 : daclinfo[i].dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault));
10674 388 : daclinfo[i].dacl.privtype = 0;
10675 388 : daclinfo[i].dacl.initprivs = NULL;
10676 388 : daclinfo[i].defaclrole = getRoleName(PQgetvalue(res, i, i_defaclrole));
10677 388 : daclinfo[i].defaclobjtype = *(PQgetvalue(res, i, i_defaclobjtype));
10678 :
10679 : /* Default ACLs are ACLs, of course */
10680 388 : daclinfo[i].dobj.components |= DUMP_COMPONENT_ACL;
10681 :
10682 : /* Decide whether we want to dump it */
10683 388 : selectDumpableDefaultACL(&(daclinfo[i]), dopt);
10684 : }
10685 :
10686 376 : PQclear(res);
10687 :
10688 376 : destroyPQExpBuffer(query);
10689 376 : }
10690 :
10691 : /*
10692 : * getRoleName -- look up the name of a role, given its OID
10693 : *
10694 : * In current usage, we don't expect failures, so error out for a bad OID.
10695 : */
10696 : static const char *
10697 1195042 : getRoleName(const char *roleoid_str)
10698 : {
10699 1195042 : Oid roleoid = atooid(roleoid_str);
10700 :
10701 : /*
10702 : * Do binary search to find the appropriate item.
10703 : */
10704 1195042 : if (nrolenames > 0)
10705 : {
10706 1195042 : RoleNameItem *low = &rolenames[0];
10707 1195042 : RoleNameItem *high = &rolenames[nrolenames - 1];
10708 :
10709 4779600 : while (low <= high)
10710 : {
10711 4779600 : RoleNameItem *middle = low + (high - low) / 2;
10712 :
10713 4779600 : if (roleoid < middle->roleoid)
10714 3582796 : high = middle - 1;
10715 1196804 : else if (roleoid > middle->roleoid)
10716 1762 : low = middle + 1;
10717 : else
10718 1195042 : return middle->rolename; /* found a match */
10719 : }
10720 : }
10721 :
10722 0 : pg_fatal("role with OID %u does not exist", roleoid);
10723 : return NULL; /* keep compiler quiet */
10724 : }
10725 :
10726 : /*
10727 : * collectRoleNames --
10728 : *
10729 : * Construct a table of all known roles.
10730 : * The table is sorted by OID for speed in lookup.
10731 : */
10732 : static void
10733 378 : collectRoleNames(Archive *fout)
10734 : {
10735 : PGresult *res;
10736 : const char *query;
10737 : int i;
10738 :
10739 378 : query = "SELECT oid, rolname FROM pg_catalog.pg_roles ORDER BY 1";
10740 :
10741 378 : res = ExecuteSqlQuery(fout, query, PGRES_TUPLES_OK);
10742 :
10743 378 : nrolenames = PQntuples(res);
10744 :
10745 378 : rolenames = (RoleNameItem *) pg_malloc(nrolenames * sizeof(RoleNameItem));
10746 :
10747 7414 : for (i = 0; i < nrolenames; i++)
10748 : {
10749 7036 : rolenames[i].roleoid = atooid(PQgetvalue(res, i, 0));
10750 7036 : rolenames[i].rolename = pg_strdup(PQgetvalue(res, i, 1));
10751 : }
10752 :
10753 378 : PQclear(res);
10754 378 : }
10755 :
10756 : /*
10757 : * getAdditionalACLs
10758 : *
10759 : * We have now created all the DumpableObjects, and collected the ACL data
10760 : * that appears in the directly-associated catalog entries. However, there's
10761 : * more ACL-related info to collect. If any of a table's columns have ACLs,
10762 : * we must set the TableInfo's DUMP_COMPONENT_ACL components flag, as well as
10763 : * its hascolumnACLs flag (we won't store the ACLs themselves here, though).
10764 : * Also, in versions having the pg_init_privs catalog, read that and load the
10765 : * information into the relevant DumpableObjects.
10766 : */
10767 : static void
10768 372 : getAdditionalACLs(Archive *fout)
10769 : {
10770 372 : PQExpBuffer query = createPQExpBuffer();
10771 : PGresult *res;
10772 : int ntups,
10773 : i;
10774 :
10775 : /* Check for per-column ACLs */
10776 372 : appendPQExpBufferStr(query,
10777 : "SELECT DISTINCT attrelid FROM pg_attribute "
10778 : "WHERE attacl IS NOT NULL");
10779 :
10780 372 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
10781 :
10782 372 : ntups = PQntuples(res);
10783 1084 : for (i = 0; i < ntups; i++)
10784 : {
10785 712 : Oid relid = atooid(PQgetvalue(res, i, 0));
10786 : TableInfo *tblinfo;
10787 :
10788 712 : tblinfo = findTableByOid(relid);
10789 : /* OK to ignore tables we haven't got a DumpableObject for */
10790 712 : if (tblinfo)
10791 : {
10792 712 : tblinfo->dobj.components |= DUMP_COMPONENT_ACL;
10793 712 : tblinfo->hascolumnACLs = true;
10794 : }
10795 : }
10796 372 : PQclear(res);
10797 :
10798 : /* Fetch initial-privileges data */
10799 372 : if (fout->remoteVersion >= 90600)
10800 : {
10801 372 : printfPQExpBuffer(query,
10802 : "SELECT objoid, classoid, objsubid, privtype, initprivs "
10803 : "FROM pg_init_privs");
10804 :
10805 372 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
10806 :
10807 372 : ntups = PQntuples(res);
10808 88408 : for (i = 0; i < ntups; i++)
10809 : {
10810 88036 : Oid objoid = atooid(PQgetvalue(res, i, 0));
10811 88036 : Oid classoid = atooid(PQgetvalue(res, i, 1));
10812 88036 : int objsubid = atoi(PQgetvalue(res, i, 2));
10813 88036 : char privtype = *(PQgetvalue(res, i, 3));
10814 88036 : char *initprivs = PQgetvalue(res, i, 4);
10815 : CatalogId objId;
10816 : DumpableObject *dobj;
10817 :
10818 88036 : objId.tableoid = classoid;
10819 88036 : objId.oid = objoid;
10820 88036 : dobj = findObjectByCatalogId(objId);
10821 : /* OK to ignore entries we haven't got a DumpableObject for */
10822 88036 : if (dobj)
10823 : {
10824 : /* Cope with sub-object initprivs */
10825 63200 : if (objsubid != 0)
10826 : {
10827 7488 : if (dobj->objType == DO_TABLE)
10828 : {
10829 : /* For a column initprivs, set the table's ACL flags */
10830 7488 : dobj->components |= DUMP_COMPONENT_ACL;
10831 7488 : ((TableInfo *) dobj)->hascolumnACLs = true;
10832 : }
10833 : else
10834 0 : pg_log_warning("unsupported pg_init_privs entry: %u %u %d",
10835 : classoid, objoid, objsubid);
10836 7852 : continue;
10837 : }
10838 :
10839 : /*
10840 : * We ignore any pg_init_privs.initprivs entry for the public
10841 : * schema, as explained in getNamespaces().
10842 : */
10843 55712 : if (dobj->objType == DO_NAMESPACE &&
10844 736 : strcmp(dobj->name, "public") == 0)
10845 364 : continue;
10846 :
10847 : /* Else it had better be of a type we think has ACLs */
10848 55348 : if (dobj->objType == DO_NAMESPACE ||
10849 54976 : dobj->objType == DO_TYPE ||
10850 54928 : dobj->objType == DO_FUNC ||
10851 54744 : dobj->objType == DO_AGG ||
10852 54696 : dobj->objType == DO_TABLE ||
10853 0 : dobj->objType == DO_PROCLANG ||
10854 0 : dobj->objType == DO_FDW ||
10855 0 : dobj->objType == DO_FOREIGN_SERVER)
10856 55348 : {
10857 55348 : DumpableObjectWithAcl *daobj = (DumpableObjectWithAcl *) dobj;
10858 :
10859 55348 : daobj->dacl.privtype = privtype;
10860 55348 : daobj->dacl.initprivs = pstrdup(initprivs);
10861 : }
10862 : else
10863 0 : pg_log_warning("unsupported pg_init_privs entry: %u %u %d",
10864 : classoid, objoid, objsubid);
10865 : }
10866 : }
10867 372 : PQclear(res);
10868 : }
10869 :
10870 372 : destroyPQExpBuffer(query);
10871 372 : }
10872 :
10873 : /*
10874 : * dumpCommentExtended --
10875 : *
10876 : * This routine is used to dump any comments associated with the
10877 : * object handed to this routine. The routine takes the object type
10878 : * and object name (ready to print, except for schema decoration), plus
10879 : * the namespace and owner of the object (for labeling the ArchiveEntry),
10880 : * plus catalog ID and subid which are the lookup key for pg_description,
10881 : * plus the dump ID for the object (for setting a dependency).
10882 : * If a matching pg_description entry is found, it is dumped.
10883 : *
10884 : * Note: in some cases, such as comments for triggers and rules, the "type"
10885 : * string really looks like, e.g., "TRIGGER name ON". This is a bit of a hack
10886 : * but it doesn't seem worth complicating the API for all callers to make
10887 : * it cleaner.
10888 : *
10889 : * Note: although this routine takes a dumpId for dependency purposes,
10890 : * that purpose is just to mark the dependency in the emitted dump file
10891 : * for possible future use by pg_restore. We do NOT use it for determining
10892 : * ordering of the comment in the dump file, because this routine is called
10893 : * after dependency sorting occurs. This routine should be called just after
10894 : * calling ArchiveEntry() for the specified object.
10895 : */
10896 : static void
10897 13026 : dumpCommentExtended(Archive *fout, const char *type,
10898 : const char *name, const char *namespace,
10899 : const char *owner, CatalogId catalogId,
10900 : int subid, DumpId dumpId,
10901 : const char *initdb_comment)
10902 : {
10903 13026 : DumpOptions *dopt = fout->dopt;
10904 : CommentItem *comments;
10905 : int ncomments;
10906 :
10907 : /* do nothing, if --no-comments is supplied */
10908 13026 : if (dopt->no_comments)
10909 0 : return;
10910 :
10911 : /* Comments are schema not data ... except LO comments are data */
10912 13026 : if (strcmp(type, "LARGE OBJECT") != 0)
10913 : {
10914 12908 : if (!dopt->dumpSchema)
10915 0 : return;
10916 : }
10917 : else
10918 : {
10919 : /* We do dump LO comments in binary-upgrade mode */
10920 118 : if (!dopt->dumpData && !dopt->binary_upgrade)
10921 0 : return;
10922 : }
10923 :
10924 : /* Search for comments associated with catalogId, using table */
10925 13026 : ncomments = findComments(catalogId.tableoid, catalogId.oid,
10926 : &comments);
10927 :
10928 : /* Is there one matching the subid? */
10929 13026 : while (ncomments > 0)
10930 : {
10931 12934 : if (comments->objsubid == subid)
10932 12934 : break;
10933 0 : comments++;
10934 0 : ncomments--;
10935 : }
10936 :
10937 13026 : if (initdb_comment != NULL)
10938 : {
10939 : static CommentItem empty_comment = {.descr = ""};
10940 :
10941 : /*
10942 : * initdb creates this object with a comment. Skip dumping the
10943 : * initdb-provided comment, which would complicate matters for
10944 : * non-superuser use of pg_dump. When the DBA has removed initdb's
10945 : * comment, replicate that.
10946 : */
10947 234 : if (ncomments == 0)
10948 : {
10949 8 : comments = &empty_comment;
10950 8 : ncomments = 1;
10951 : }
10952 226 : else if (strcmp(comments->descr, initdb_comment) == 0)
10953 226 : ncomments = 0;
10954 : }
10955 :
10956 : /* If a comment exists, build COMMENT ON statement */
10957 13026 : if (ncomments > 0)
10958 : {
10959 12716 : PQExpBuffer query = createPQExpBuffer();
10960 12716 : PQExpBuffer tag = createPQExpBuffer();
10961 :
10962 12716 : appendPQExpBuffer(query, "COMMENT ON %s ", type);
10963 12716 : if (namespace && *namespace)
10964 12360 : appendPQExpBuffer(query, "%s.", fmtId(namespace));
10965 12716 : appendPQExpBuffer(query, "%s IS ", name);
10966 12716 : appendStringLiteralAH(query, comments->descr, fout);
10967 12716 : appendPQExpBufferStr(query, ";\n");
10968 :
10969 12716 : appendPQExpBuffer(tag, "%s %s", type, name);
10970 :
10971 : /*
10972 : * We mark comments as SECTION_NONE because they really belong in the
10973 : * same section as their parent, whether that is pre-data or
10974 : * post-data.
10975 : */
10976 12716 : ArchiveEntry(fout, nilCatalogId, createDumpId(),
10977 12716 : ARCHIVE_OPTS(.tag = tag->data,
10978 : .namespace = namespace,
10979 : .owner = owner,
10980 : .description = "COMMENT",
10981 : .section = SECTION_NONE,
10982 : .createStmt = query->data,
10983 : .deps = &dumpId,
10984 : .nDeps = 1));
10985 :
10986 12716 : destroyPQExpBuffer(query);
10987 12716 : destroyPQExpBuffer(tag);
10988 : }
10989 : }
10990 :
10991 : /*
10992 : * dumpComment --
10993 : *
10994 : * Typical simplification of the above function.
10995 : */
10996 : static inline void
10997 12710 : dumpComment(Archive *fout, const char *type,
10998 : const char *name, const char *namespace,
10999 : const char *owner, CatalogId catalogId,
11000 : int subid, DumpId dumpId)
11001 : {
11002 12710 : dumpCommentExtended(fout, type, name, namespace, owner,
11003 : catalogId, subid, dumpId, NULL);
11004 12710 : }
11005 :
11006 : /*
11007 : * appendNamedArgument --
11008 : *
11009 : * Convenience routine for constructing parameters of the form:
11010 : * 'paraname', 'value'::type
11011 : */
11012 : static void
11013 11206 : appendNamedArgument(PQExpBuffer out, Archive *fout, const char *argname,
11014 : const char *argtype, const char *argval)
11015 : {
11016 11206 : appendPQExpBufferStr(out, ",\n\t");
11017 :
11018 11206 : appendStringLiteralAH(out, argname, fout);
11019 11206 : appendPQExpBufferStr(out, ", ");
11020 :
11021 11206 : appendStringLiteralAH(out, argval, fout);
11022 11206 : appendPQExpBuffer(out, "::%s", argtype);
11023 11206 : }
11024 :
11025 : /*
11026 : * fetchAttributeStats --
11027 : *
11028 : * Fetch next batch of attribute statistics for dumpRelationStats_dumper().
11029 : */
11030 : static PGresult *
11031 2070 : fetchAttributeStats(Archive *fout)
11032 : {
11033 2070 : ArchiveHandle *AH = (ArchiveHandle *) fout;
11034 2070 : PQExpBuffer nspnames = createPQExpBuffer();
11035 2070 : PQExpBuffer relnames = createPQExpBuffer();
11036 2070 : int count = 0;
11037 2070 : PGresult *res = NULL;
11038 : static TocEntry *te;
11039 : static bool restarted;
11040 2070 : int max_rels = MAX_ATTR_STATS_RELS;
11041 :
11042 : /*
11043 : * Our query for retrieving statistics for multiple relations uses WITH
11044 : * ORDINALITY and multi-argument UNNEST(), both of which were introduced
11045 : * in v9.4. For older versions, we resort to gathering statistics for a
11046 : * single relation at a time.
11047 : */
11048 2070 : if (fout->remoteVersion < 90400)
11049 0 : max_rels = 1;
11050 :
11051 : /* If we're just starting, set our TOC pointer. */
11052 2070 : if (!te)
11053 124 : te = AH->toc->next;
11054 :
11055 : /*
11056 : * We can't easily avoid a second TOC scan for the tar format because it
11057 : * writes restore.sql separately, which means we must execute the queries
11058 : * twice. This feels risky, but there is no known reason it should
11059 : * generate different output than the first pass. Even if it does, the
11060 : * worst-case scenario is that restore.sql might have different statistics
11061 : * data than the archive.
11062 : */
11063 2070 : if (!restarted && te == AH->toc && AH->format == archTar)
11064 : {
11065 2 : te = AH->toc->next;
11066 2 : restarted = true;
11067 : }
11068 :
11069 2070 : appendPQExpBufferChar(nspnames, '{');
11070 2070 : appendPQExpBufferChar(relnames, '{');
11071 :
11072 : /*
11073 : * Scan the TOC for the next set of relevant stats entries. We assume
11074 : * that statistics are dumped in the order they are listed in the TOC.
11075 : * This is perhaps not the sturdiest assumption, so we verify it matches
11076 : * reality in dumpRelationStats_dumper().
11077 : */
11078 31630 : for (; te != AH->toc && count < max_rels; te = te->next)
11079 : {
11080 29560 : if ((te->reqs & REQ_STATS) != 0 &&
11081 6588 : strcmp(te->desc, "STATISTICS DATA") == 0)
11082 : {
11083 6518 : appendPGArray(nspnames, te->namespace);
11084 6518 : appendPGArray(relnames, te->tag);
11085 6518 : count++;
11086 : }
11087 : }
11088 :
11089 2070 : appendPQExpBufferChar(nspnames, '}');
11090 2070 : appendPQExpBufferChar(relnames, '}');
11091 :
11092 : /* Execute the query for the next batch of relations. */
11093 2070 : if (count > 0)
11094 : {
11095 210 : PQExpBuffer query = createPQExpBuffer();
11096 :
11097 210 : appendPQExpBufferStr(query, "EXECUTE getAttributeStats(");
11098 210 : appendStringLiteralAH(query, nspnames->data, fout);
11099 210 : appendPQExpBufferStr(query, "::pg_catalog.name[],");
11100 210 : appendStringLiteralAH(query, relnames->data, fout);
11101 210 : appendPQExpBufferStr(query, "::pg_catalog.name[])");
11102 210 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
11103 210 : destroyPQExpBuffer(query);
11104 : }
11105 :
11106 2070 : destroyPQExpBuffer(nspnames);
11107 2070 : destroyPQExpBuffer(relnames);
11108 2070 : return res;
11109 : }
11110 :
11111 : /*
11112 : * dumpRelationStats_dumper --
11113 : *
11114 : * Generate command to import stats into the relation on the new database.
11115 : * This routine is called by the Archiver when it wants the statistics to be
11116 : * dumped.
11117 : */
11118 : static char *
11119 6518 : dumpRelationStats_dumper(Archive *fout, const void *userArg, const TocEntry *te)
11120 : {
11121 6518 : const RelStatsInfo *rsinfo = userArg;
11122 : static PGresult *res;
11123 : static int rownum;
11124 : PQExpBuffer query;
11125 : PQExpBufferData out_data;
11126 6518 : PQExpBuffer out = &out_data;
11127 : int i_schemaname;
11128 : int i_tablename;
11129 : int i_attname;
11130 : int i_inherited;
11131 : int i_null_frac;
11132 : int i_avg_width;
11133 : int i_n_distinct;
11134 : int i_most_common_vals;
11135 : int i_most_common_freqs;
11136 : int i_histogram_bounds;
11137 : int i_correlation;
11138 : int i_most_common_elems;
11139 : int i_most_common_elem_freqs;
11140 : int i_elem_count_histogram;
11141 : int i_range_length_histogram;
11142 : int i_range_empty_frac;
11143 : int i_range_bounds_histogram;
11144 : static TocEntry *expected_te;
11145 :
11146 : /*
11147 : * fetchAttributeStats() assumes that the statistics are dumped in the
11148 : * order they are listed in the TOC. We verify that here for safety.
11149 : */
11150 6518 : if (!expected_te)
11151 124 : expected_te = ((ArchiveHandle *) fout)->toc;
11152 :
11153 6518 : expected_te = expected_te->next;
11154 25892 : while ((expected_te->reqs & REQ_STATS) == 0 ||
11155 6520 : strcmp(expected_te->desc, "STATISTICS DATA") != 0)
11156 19374 : expected_te = expected_te->next;
11157 :
11158 6518 : if (te != expected_te)
11159 0 : pg_fatal("statistics dumped out of order (current: %d %s %s, expected: %d %s %s)",
11160 : te->dumpId, te->desc, te->tag,
11161 : expected_te->dumpId, expected_te->desc, expected_te->tag);
11162 :
11163 6518 : query = createPQExpBuffer();
11164 6518 : if (!fout->is_prepared[PREPQUERY_GETATTRIBUTESTATS])
11165 : {
11166 124 : appendPQExpBufferStr(query,
11167 : "PREPARE getAttributeStats(pg_catalog.name[], pg_catalog.name[]) AS\n"
11168 : "SELECT s.schemaname, s.tablename, s.attname, s.inherited, "
11169 : "s.null_frac, s.avg_width, s.n_distinct, "
11170 : "s.most_common_vals, s.most_common_freqs, "
11171 : "s.histogram_bounds, s.correlation, "
11172 : "s.most_common_elems, s.most_common_elem_freqs, "
11173 : "s.elem_count_histogram, ");
11174 :
11175 124 : if (fout->remoteVersion >= 170000)
11176 124 : appendPQExpBufferStr(query,
11177 : "s.range_length_histogram, "
11178 : "s.range_empty_frac, "
11179 : "s.range_bounds_histogram ");
11180 : else
11181 0 : appendPQExpBufferStr(query,
11182 : "NULL AS range_length_histogram,"
11183 : "NULL AS range_empty_frac,"
11184 : "NULL AS range_bounds_histogram ");
11185 :
11186 : /*
11187 : * The results must be in the order of the relations supplied in the
11188 : * parameters to ensure we remain in sync as we walk through the TOC.
11189 : * The redundant filter clause on s.tablename = ANY(...) seems
11190 : * sufficient to convince the planner to use
11191 : * pg_class_relname_nsp_index, which avoids a full scan of pg_stats.
11192 : * This may not work for all versions.
11193 : *
11194 : * Our query for retrieving statistics for multiple relations uses
11195 : * WITH ORDINALITY and multi-argument UNNEST(), both of which were
11196 : * introduced in v9.4. For older versions, we resort to gathering
11197 : * statistics for a single relation at a time.
11198 : */
11199 124 : if (fout->remoteVersion >= 90400)
11200 124 : appendPQExpBufferStr(query,
11201 : "FROM pg_catalog.pg_stats s "
11202 : "JOIN unnest($1, $2) WITH ORDINALITY AS u (schemaname, tablename, ord) "
11203 : "ON s.schemaname = u.schemaname "
11204 : "AND s.tablename = u.tablename "
11205 : "WHERE s.tablename = ANY($2) "
11206 : "ORDER BY u.ord, s.attname, s.inherited");
11207 : else
11208 0 : appendPQExpBufferStr(query,
11209 : "FROM pg_catalog.pg_stats s "
11210 : "WHERE s.schemaname = $1[1] "
11211 : "AND s.tablename = $2[1] "
11212 : "ORDER BY s.attname, s.inherited");
11213 :
11214 124 : ExecuteSqlStatement(fout, query->data);
11215 :
11216 124 : fout->is_prepared[PREPQUERY_GETATTRIBUTESTATS] = true;
11217 124 : resetPQExpBuffer(query);
11218 : }
11219 :
11220 6518 : initPQExpBuffer(out);
11221 :
11222 : /* restore relation stats */
11223 6518 : appendPQExpBufferStr(out, "SELECT * FROM pg_catalog.pg_restore_relation_stats(\n");
11224 6518 : appendPQExpBuffer(out, "\t'version', '%d'::integer,\n",
11225 : fout->remoteVersion);
11226 6518 : appendPQExpBufferStr(out, "\t'schemaname', ");
11227 6518 : appendStringLiteralAH(out, rsinfo->dobj.namespace->dobj.name, fout);
11228 6518 : appendPQExpBufferStr(out, ",\n");
11229 6518 : appendPQExpBufferStr(out, "\t'relname', ");
11230 6518 : appendStringLiteralAH(out, rsinfo->dobj.name, fout);
11231 6518 : appendPQExpBufferStr(out, ",\n");
11232 6518 : appendPQExpBuffer(out, "\t'relpages', '%d'::integer,\n", rsinfo->relpages);
11233 :
11234 : /*
11235 : * Before v14, a reltuples value of 0 was ambiguous: it could either mean
11236 : * the relation is empty, or it could mean that it hadn't yet been
11237 : * vacuumed or analyzed. (Newer versions use -1 for the latter case.)
11238 : * This ambiguity allegedly can cause the planner to choose inefficient
11239 : * plans after restoring to v18 or newer. To deal with this, let's just
11240 : * set reltuples to -1 in that case.
11241 : */
11242 6518 : if (fout->remoteVersion < 140000 && strcmp("0", rsinfo->reltuples) == 0)
11243 0 : appendPQExpBufferStr(out, "\t'reltuples', '-1'::real,\n");
11244 : else
11245 6518 : appendPQExpBuffer(out, "\t'reltuples', '%s'::real,\n", rsinfo->reltuples);
11246 :
11247 6518 : appendPQExpBuffer(out, "\t'relallvisible', '%d'::integer",
11248 6518 : rsinfo->relallvisible);
11249 :
11250 6518 : if (fout->remoteVersion >= 180000)
11251 6518 : appendPQExpBuffer(out, ",\n\t'relallfrozen', '%d'::integer", rsinfo->relallfrozen);
11252 :
11253 6518 : appendPQExpBufferStr(out, "\n);\n");
11254 :
11255 : /* Fetch the next batch of attribute statistics if needed. */
11256 6518 : if (rownum >= PQntuples(res))
11257 : {
11258 2070 : PQclear(res);
11259 2070 : res = fetchAttributeStats(fout);
11260 2070 : rownum = 0;
11261 : }
11262 :
11263 6518 : i_schemaname = PQfnumber(res, "schemaname");
11264 6518 : i_tablename = PQfnumber(res, "tablename");
11265 6518 : i_attname = PQfnumber(res, "attname");
11266 6518 : i_inherited = PQfnumber(res, "inherited");
11267 6518 : i_null_frac = PQfnumber(res, "null_frac");
11268 6518 : i_avg_width = PQfnumber(res, "avg_width");
11269 6518 : i_n_distinct = PQfnumber(res, "n_distinct");
11270 6518 : i_most_common_vals = PQfnumber(res, "most_common_vals");
11271 6518 : i_most_common_freqs = PQfnumber(res, "most_common_freqs");
11272 6518 : i_histogram_bounds = PQfnumber(res, "histogram_bounds");
11273 6518 : i_correlation = PQfnumber(res, "correlation");
11274 6518 : i_most_common_elems = PQfnumber(res, "most_common_elems");
11275 6518 : i_most_common_elem_freqs = PQfnumber(res, "most_common_elem_freqs");
11276 6518 : i_elem_count_histogram = PQfnumber(res, "elem_count_histogram");
11277 6518 : i_range_length_histogram = PQfnumber(res, "range_length_histogram");
11278 6518 : i_range_empty_frac = PQfnumber(res, "range_empty_frac");
11279 6518 : i_range_bounds_histogram = PQfnumber(res, "range_bounds_histogram");
11280 :
11281 : /* restore attribute stats */
11282 8148 : for (; rownum < PQntuples(res); rownum++)
11283 : {
11284 : const char *attname;
11285 :
11286 : /* Stop if the next stat row in our cache isn't for this relation. */
11287 6078 : if (strcmp(te->tag, PQgetvalue(res, rownum, i_tablename)) != 0 ||
11288 1630 : strcmp(te->namespace, PQgetvalue(res, rownum, i_schemaname)) != 0)
11289 : break;
11290 :
11291 1630 : appendPQExpBufferStr(out, "SELECT * FROM pg_catalog.pg_restore_attribute_stats(\n");
11292 1630 : appendPQExpBuffer(out, "\t'version', '%d'::integer,\n",
11293 : fout->remoteVersion);
11294 1630 : appendPQExpBufferStr(out, "\t'schemaname', ");
11295 1630 : appendStringLiteralAH(out, rsinfo->dobj.namespace->dobj.name, fout);
11296 1630 : appendPQExpBufferStr(out, ",\n\t'relname', ");
11297 1630 : appendStringLiteralAH(out, rsinfo->dobj.name, fout);
11298 :
11299 1630 : if (PQgetisnull(res, rownum, i_attname))
11300 0 : pg_fatal("unexpected null attname");
11301 1630 : attname = PQgetvalue(res, rownum, i_attname);
11302 :
11303 : /*
11304 : * Indexes look up attname in indAttNames to derive attnum, all others
11305 : * use attname directly. We must specify attnum for indexes, since
11306 : * their attnames are not necessarily stable across dump/reload.
11307 : */
11308 1630 : if (rsinfo->nindAttNames == 0)
11309 : {
11310 1560 : appendPQExpBufferStr(out, ",\n\t'attname', ");
11311 1560 : appendStringLiteralAH(out, attname, fout);
11312 : }
11313 : else
11314 : {
11315 70 : bool found = false;
11316 :
11317 132 : for (int i = 0; i < rsinfo->nindAttNames; i++)
11318 : {
11319 132 : if (strcmp(attname, rsinfo->indAttNames[i]) == 0)
11320 : {
11321 70 : appendPQExpBuffer(out, ",\n\t'attnum', '%d'::smallint",
11322 : i + 1);
11323 70 : found = true;
11324 70 : break;
11325 : }
11326 : }
11327 :
11328 70 : if (!found)
11329 0 : pg_fatal("could not find index attname \"%s\"", attname);
11330 : }
11331 :
11332 1630 : if (!PQgetisnull(res, rownum, i_inherited))
11333 1630 : appendNamedArgument(out, fout, "inherited", "boolean",
11334 1630 : PQgetvalue(res, rownum, i_inherited));
11335 1630 : if (!PQgetisnull(res, rownum, i_null_frac))
11336 1630 : appendNamedArgument(out, fout, "null_frac", "real",
11337 1630 : PQgetvalue(res, rownum, i_null_frac));
11338 1630 : if (!PQgetisnull(res, rownum, i_avg_width))
11339 1630 : appendNamedArgument(out, fout, "avg_width", "integer",
11340 1630 : PQgetvalue(res, rownum, i_avg_width));
11341 1630 : if (!PQgetisnull(res, rownum, i_n_distinct))
11342 1630 : appendNamedArgument(out, fout, "n_distinct", "real",
11343 1630 : PQgetvalue(res, rownum, i_n_distinct));
11344 1630 : if (!PQgetisnull(res, rownum, i_most_common_vals))
11345 810 : appendNamedArgument(out, fout, "most_common_vals", "text",
11346 810 : PQgetvalue(res, rownum, i_most_common_vals));
11347 1630 : if (!PQgetisnull(res, rownum, i_most_common_freqs))
11348 810 : appendNamedArgument(out, fout, "most_common_freqs", "real[]",
11349 810 : PQgetvalue(res, rownum, i_most_common_freqs));
11350 1630 : if (!PQgetisnull(res, rownum, i_histogram_bounds))
11351 1020 : appendNamedArgument(out, fout, "histogram_bounds", "text",
11352 1020 : PQgetvalue(res, rownum, i_histogram_bounds));
11353 1630 : if (!PQgetisnull(res, rownum, i_correlation))
11354 1564 : appendNamedArgument(out, fout, "correlation", "real",
11355 1564 : PQgetvalue(res, rownum, i_correlation));
11356 1630 : if (!PQgetisnull(res, rownum, i_most_common_elems))
11357 16 : appendNamedArgument(out, fout, "most_common_elems", "text",
11358 16 : PQgetvalue(res, rownum, i_most_common_elems));
11359 1630 : if (!PQgetisnull(res, rownum, i_most_common_elem_freqs))
11360 16 : appendNamedArgument(out, fout, "most_common_elem_freqs", "real[]",
11361 16 : PQgetvalue(res, rownum, i_most_common_elem_freqs));
11362 1630 : if (!PQgetisnull(res, rownum, i_elem_count_histogram))
11363 14 : appendNamedArgument(out, fout, "elem_count_histogram", "real[]",
11364 14 : PQgetvalue(res, rownum, i_elem_count_histogram));
11365 1630 : if (fout->remoteVersion >= 170000)
11366 : {
11367 1630 : if (!PQgetisnull(res, rownum, i_range_length_histogram))
11368 8 : appendNamedArgument(out, fout, "range_length_histogram", "text",
11369 8 : PQgetvalue(res, rownum, i_range_length_histogram));
11370 1630 : if (!PQgetisnull(res, rownum, i_range_empty_frac))
11371 8 : appendNamedArgument(out, fout, "range_empty_frac", "real",
11372 8 : PQgetvalue(res, rownum, i_range_empty_frac));
11373 1630 : if (!PQgetisnull(res, rownum, i_range_bounds_histogram))
11374 8 : appendNamedArgument(out, fout, "range_bounds_histogram", "text",
11375 8 : PQgetvalue(res, rownum, i_range_bounds_histogram));
11376 : }
11377 1630 : appendPQExpBufferStr(out, "\n);\n");
11378 : }
11379 :
11380 6518 : destroyPQExpBuffer(query);
11381 6518 : return out->data;
11382 : }
11383 :
11384 : /*
11385 : * dumpRelationStats --
11386 : *
11387 : * Make an ArchiveEntry for the relation statistics. The Archiver will take
11388 : * care of gathering the statistics and generating the restore commands when
11389 : * they are needed.
11390 : */
11391 : static void
11392 6658 : dumpRelationStats(Archive *fout, const RelStatsInfo *rsinfo)
11393 : {
11394 6658 : const DumpableObject *dobj = &rsinfo->dobj;
11395 :
11396 : /* nothing to do if we are not dumping statistics */
11397 6658 : if (!fout->dopt->dumpStatistics)
11398 0 : return;
11399 :
11400 6658 : ArchiveEntry(fout, nilCatalogId, createDumpId(),
11401 6658 : ARCHIVE_OPTS(.tag = dobj->name,
11402 : .namespace = dobj->namespace->dobj.name,
11403 : .description = "STATISTICS DATA",
11404 : .section = rsinfo->section,
11405 : .defnFn = dumpRelationStats_dumper,
11406 : .defnArg = rsinfo,
11407 : .deps = dobj->dependencies,
11408 : .nDeps = dobj->nDeps));
11409 : }
11410 :
11411 : /*
11412 : * dumpTableComment --
11413 : *
11414 : * As above, but dump comments for both the specified table (or view)
11415 : * and its columns.
11416 : */
11417 : static void
11418 148 : dumpTableComment(Archive *fout, const TableInfo *tbinfo,
11419 : const char *reltypename)
11420 : {
11421 148 : DumpOptions *dopt = fout->dopt;
11422 : CommentItem *comments;
11423 : int ncomments;
11424 : PQExpBuffer query;
11425 : PQExpBuffer tag;
11426 :
11427 : /* do nothing, if --no-comments is supplied */
11428 148 : if (dopt->no_comments)
11429 0 : return;
11430 :
11431 : /* Comments are SCHEMA not data */
11432 148 : if (!dopt->dumpSchema)
11433 0 : return;
11434 :
11435 : /* Search for comments associated with relation, using table */
11436 148 : ncomments = findComments(tbinfo->dobj.catId.tableoid,
11437 148 : tbinfo->dobj.catId.oid,
11438 : &comments);
11439 :
11440 : /* If comments exist, build COMMENT ON statements */
11441 148 : if (ncomments <= 0)
11442 0 : return;
11443 :
11444 148 : query = createPQExpBuffer();
11445 148 : tag = createPQExpBuffer();
11446 :
11447 424 : while (ncomments > 0)
11448 : {
11449 276 : const char *descr = comments->descr;
11450 276 : int objsubid = comments->objsubid;
11451 :
11452 276 : if (objsubid == 0)
11453 : {
11454 64 : resetPQExpBuffer(tag);
11455 64 : appendPQExpBuffer(tag, "%s %s", reltypename,
11456 64 : fmtId(tbinfo->dobj.name));
11457 :
11458 64 : resetPQExpBuffer(query);
11459 64 : appendPQExpBuffer(query, "COMMENT ON %s %s IS ", reltypename,
11460 64 : fmtQualifiedDumpable(tbinfo));
11461 64 : appendStringLiteralAH(query, descr, fout);
11462 64 : appendPQExpBufferStr(query, ";\n");
11463 :
11464 64 : ArchiveEntry(fout, nilCatalogId, createDumpId(),
11465 64 : ARCHIVE_OPTS(.tag = tag->data,
11466 : .namespace = tbinfo->dobj.namespace->dobj.name,
11467 : .owner = tbinfo->rolname,
11468 : .description = "COMMENT",
11469 : .section = SECTION_NONE,
11470 : .createStmt = query->data,
11471 : .deps = &(tbinfo->dobj.dumpId),
11472 : .nDeps = 1));
11473 : }
11474 212 : else if (objsubid > 0 && objsubid <= tbinfo->numatts)
11475 : {
11476 212 : resetPQExpBuffer(tag);
11477 212 : appendPQExpBuffer(tag, "COLUMN %s.",
11478 212 : fmtId(tbinfo->dobj.name));
11479 212 : appendPQExpBufferStr(tag, fmtId(tbinfo->attnames[objsubid - 1]));
11480 :
11481 212 : resetPQExpBuffer(query);
11482 212 : appendPQExpBuffer(query, "COMMENT ON COLUMN %s.",
11483 212 : fmtQualifiedDumpable(tbinfo));
11484 212 : appendPQExpBuffer(query, "%s IS ",
11485 212 : fmtId(tbinfo->attnames[objsubid - 1]));
11486 212 : appendStringLiteralAH(query, descr, fout);
11487 212 : appendPQExpBufferStr(query, ";\n");
11488 :
11489 212 : ArchiveEntry(fout, nilCatalogId, createDumpId(),
11490 212 : ARCHIVE_OPTS(.tag = tag->data,
11491 : .namespace = tbinfo->dobj.namespace->dobj.name,
11492 : .owner = tbinfo->rolname,
11493 : .description = "COMMENT",
11494 : .section = SECTION_NONE,
11495 : .createStmt = query->data,
11496 : .deps = &(tbinfo->dobj.dumpId),
11497 : .nDeps = 1));
11498 : }
11499 :
11500 276 : comments++;
11501 276 : ncomments--;
11502 : }
11503 :
11504 148 : destroyPQExpBuffer(query);
11505 148 : destroyPQExpBuffer(tag);
11506 : }
11507 :
11508 : /*
11509 : * findComments --
11510 : *
11511 : * Find the comment(s), if any, associated with the given object. All the
11512 : * objsubid values associated with the given classoid/objoid are found with
11513 : * one search.
11514 : */
11515 : static int
11516 13238 : findComments(Oid classoid, Oid objoid, CommentItem **items)
11517 : {
11518 13238 : CommentItem *middle = NULL;
11519 : CommentItem *low;
11520 : CommentItem *high;
11521 : int nmatch;
11522 :
11523 : /*
11524 : * Do binary search to find some item matching the object.
11525 : */
11526 13238 : low = &comments[0];
11527 13238 : high = &comments[ncomments - 1];
11528 132454 : while (low <= high)
11529 : {
11530 132362 : middle = low + (high - low) / 2;
11531 :
11532 132362 : if (classoid < middle->classoid)
11533 15570 : high = middle - 1;
11534 116792 : else if (classoid > middle->classoid)
11535 14438 : low = middle + 1;
11536 102354 : else if (objoid < middle->objoid)
11537 43348 : high = middle - 1;
11538 59006 : else if (objoid > middle->objoid)
11539 45860 : low = middle + 1;
11540 : else
11541 13146 : break; /* found a match */
11542 : }
11543 :
11544 13238 : if (low > high) /* no matches */
11545 : {
11546 92 : *items = NULL;
11547 92 : return 0;
11548 : }
11549 :
11550 : /*
11551 : * Now determine how many items match the object. The search loop
11552 : * invariant still holds: only items between low and high inclusive could
11553 : * match.
11554 : */
11555 13146 : nmatch = 1;
11556 13272 : while (middle > low)
11557 : {
11558 6042 : if (classoid != middle[-1].classoid ||
11559 5854 : objoid != middle[-1].objoid)
11560 : break;
11561 126 : middle--;
11562 126 : nmatch++;
11563 : }
11564 :
11565 13146 : *items = middle;
11566 :
11567 13146 : middle += nmatch;
11568 13148 : while (middle <= high)
11569 : {
11570 6676 : if (classoid != middle->classoid ||
11571 6392 : objoid != middle->objoid)
11572 : break;
11573 2 : middle++;
11574 2 : nmatch++;
11575 : }
11576 :
11577 13146 : return nmatch;
11578 : }
11579 :
11580 : /*
11581 : * collectComments --
11582 : *
11583 : * Construct a table of all comments available for database objects;
11584 : * also set the has-comment component flag for each relevant object.
11585 : *
11586 : * We used to do per-object queries for the comments, but it's much faster
11587 : * to pull them all over at once, and on most databases the memory cost
11588 : * isn't high.
11589 : *
11590 : * The table is sorted by classoid/objid/objsubid for speed in lookup.
11591 : */
11592 : static void
11593 376 : collectComments(Archive *fout)
11594 : {
11595 : PGresult *res;
11596 : PQExpBuffer query;
11597 : int i_description;
11598 : int i_classoid;
11599 : int i_objoid;
11600 : int i_objsubid;
11601 : int ntups;
11602 : int i;
11603 : DumpableObject *dobj;
11604 :
11605 376 : query = createPQExpBuffer();
11606 :
11607 376 : appendPQExpBufferStr(query, "SELECT description, classoid, objoid, objsubid "
11608 : "FROM pg_catalog.pg_description "
11609 : "ORDER BY classoid, objoid, objsubid");
11610 :
11611 376 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
11612 :
11613 : /* Construct lookup table containing OIDs in numeric form */
11614 :
11615 376 : i_description = PQfnumber(res, "description");
11616 376 : i_classoid = PQfnumber(res, "classoid");
11617 376 : i_objoid = PQfnumber(res, "objoid");
11618 376 : i_objsubid = PQfnumber(res, "objsubid");
11619 :
11620 376 : ntups = PQntuples(res);
11621 :
11622 376 : comments = (CommentItem *) pg_malloc(ntups * sizeof(CommentItem));
11623 376 : ncomments = 0;
11624 376 : dobj = NULL;
11625 :
11626 2019314 : for (i = 0; i < ntups; i++)
11627 : {
11628 : CatalogId objId;
11629 : int subid;
11630 :
11631 2018938 : objId.tableoid = atooid(PQgetvalue(res, i, i_classoid));
11632 2018938 : objId.oid = atooid(PQgetvalue(res, i, i_objoid));
11633 2018938 : subid = atoi(PQgetvalue(res, i, i_objsubid));
11634 :
11635 : /* We needn't remember comments that don't match any dumpable object */
11636 2018938 : if (dobj == NULL ||
11637 725698 : dobj->catId.tableoid != objId.tableoid ||
11638 721088 : dobj->catId.oid != objId.oid)
11639 2018758 : dobj = findObjectByCatalogId(objId);
11640 2018938 : if (dobj == NULL)
11641 1292876 : continue;
11642 :
11643 : /*
11644 : * Comments on columns of composite types are linked to the type's
11645 : * pg_class entry, but we need to set the DUMP_COMPONENT_COMMENT flag
11646 : * in the type's own DumpableObject.
11647 : */
11648 726062 : if (subid != 0 && dobj->objType == DO_TABLE &&
11649 388 : ((TableInfo *) dobj)->relkind == RELKIND_COMPOSITE_TYPE)
11650 90 : {
11651 : TypeInfo *cTypeInfo;
11652 :
11653 90 : cTypeInfo = findTypeByOid(((TableInfo *) dobj)->reltype);
11654 90 : if (cTypeInfo)
11655 90 : cTypeInfo->dobj.components |= DUMP_COMPONENT_COMMENT;
11656 : }
11657 : else
11658 725972 : dobj->components |= DUMP_COMPONENT_COMMENT;
11659 :
11660 726062 : comments[ncomments].descr = pg_strdup(PQgetvalue(res, i, i_description));
11661 726062 : comments[ncomments].classoid = objId.tableoid;
11662 726062 : comments[ncomments].objoid = objId.oid;
11663 726062 : comments[ncomments].objsubid = subid;
11664 726062 : ncomments++;
11665 : }
11666 :
11667 376 : PQclear(res);
11668 376 : destroyPQExpBuffer(query);
11669 376 : }
11670 :
11671 : /*
11672 : * dumpDumpableObject
11673 : *
11674 : * This routine and its subsidiaries are responsible for creating
11675 : * ArchiveEntries (TOC objects) for each object to be dumped.
11676 : */
11677 : static void
11678 1401884 : dumpDumpableObject(Archive *fout, DumpableObject *dobj)
11679 : {
11680 : /*
11681 : * Clear any dump-request bits for components that don't exist for this
11682 : * object. (This makes it safe to initially use DUMP_COMPONENT_ALL as the
11683 : * request for every kind of object.)
11684 : */
11685 1401884 : dobj->dump &= dobj->components;
11686 :
11687 : /* Now, short-circuit if there's nothing to be done here. */
11688 1401884 : if (dobj->dump == 0)
11689 1248376 : return;
11690 :
11691 153508 : switch (dobj->objType)
11692 : {
11693 978 : case DO_NAMESPACE:
11694 978 : dumpNamespace(fout, (const NamespaceInfo *) dobj);
11695 978 : break;
11696 48 : case DO_EXTENSION:
11697 48 : dumpExtension(fout, (const ExtensionInfo *) dobj);
11698 48 : break;
11699 1846 : case DO_TYPE:
11700 1846 : dumpType(fout, (const TypeInfo *) dobj);
11701 1846 : break;
11702 146 : case DO_SHELL_TYPE:
11703 146 : dumpShellType(fout, (const ShellTypeInfo *) dobj);
11704 146 : break;
11705 3656 : case DO_FUNC:
11706 3656 : dumpFunc(fout, (const FuncInfo *) dobj);
11707 3656 : break;
11708 584 : case DO_AGG:
11709 584 : dumpAgg(fout, (const AggInfo *) dobj);
11710 584 : break;
11711 5044 : case DO_OPERATOR:
11712 5044 : dumpOpr(fout, (const OprInfo *) dobj);
11713 5044 : break;
11714 160 : case DO_ACCESS_METHOD:
11715 160 : dumpAccessMethod(fout, (const AccessMethodInfo *) dobj);
11716 160 : break;
11717 1332 : case DO_OPCLASS:
11718 1332 : dumpOpclass(fout, (const OpclassInfo *) dobj);
11719 1332 : break;
11720 1110 : case DO_OPFAMILY:
11721 1110 : dumpOpfamily(fout, (const OpfamilyInfo *) dobj);
11722 1110 : break;
11723 5074 : case DO_COLLATION:
11724 5074 : dumpCollation(fout, (const CollInfo *) dobj);
11725 5074 : break;
11726 844 : case DO_CONVERSION:
11727 844 : dumpConversion(fout, (const ConvInfo *) dobj);
11728 844 : break;
11729 62728 : case DO_TABLE:
11730 62728 : dumpTable(fout, (const TableInfo *) dobj);
11731 62728 : break;
11732 2782 : case DO_TABLE_ATTACH:
11733 2782 : dumpTableAttach(fout, (const TableAttachInfo *) dobj);
11734 2782 : break;
11735 2064 : case DO_ATTRDEF:
11736 2064 : dumpAttrDef(fout, (const AttrDefInfo *) dobj);
11737 2064 : break;
11738 5200 : case DO_INDEX:
11739 5200 : dumpIndex(fout, (const IndxInfo *) dobj);
11740 5200 : break;
11741 1148 : case DO_INDEX_ATTACH:
11742 1148 : dumpIndexAttach(fout, (const IndexAttachInfo *) dobj);
11743 1148 : break;
11744 342 : case DO_STATSEXT:
11745 342 : dumpStatisticsExt(fout, (const StatsExtInfo *) dobj);
11746 342 : dumpStatisticsExtStats(fout, (const StatsExtInfo *) dobj);
11747 342 : break;
11748 690 : case DO_REFRESH_MATVIEW:
11749 690 : refreshMatViewData(fout, (const TableDataInfo *) dobj);
11750 690 : break;
11751 2258 : case DO_RULE:
11752 2258 : dumpRule(fout, (const RuleInfo *) dobj);
11753 2258 : break;
11754 1046 : case DO_TRIGGER:
11755 1046 : dumpTrigger(fout, (const TriggerInfo *) dobj);
11756 1046 : break;
11757 84 : case DO_EVENT_TRIGGER:
11758 84 : dumpEventTrigger(fout, (const EventTriggerInfo *) dobj);
11759 84 : break;
11760 4636 : case DO_CONSTRAINT:
11761 4636 : dumpConstraint(fout, (const ConstraintInfo *) dobj);
11762 4636 : break;
11763 342 : case DO_FK_CONSTRAINT:
11764 342 : dumpConstraint(fout, (const ConstraintInfo *) dobj);
11765 342 : break;
11766 164 : case DO_PROCLANG:
11767 164 : dumpProcLang(fout, (const ProcLangInfo *) dobj);
11768 164 : break;
11769 134 : case DO_CAST:
11770 134 : dumpCast(fout, (const CastInfo *) dobj);
11771 134 : break;
11772 84 : case DO_TRANSFORM:
11773 84 : dumpTransform(fout, (const TransformInfo *) dobj);
11774 84 : break;
11775 786 : case DO_SEQUENCE_SET:
11776 786 : dumpSequenceData(fout, (const TableDataInfo *) dobj);
11777 786 : break;
11778 8520 : case DO_TABLE_DATA:
11779 8520 : dumpTableData(fout, (const TableDataInfo *) dobj);
11780 8520 : break;
11781 28316 : case DO_DUMMY_TYPE:
11782 : /* table rowtypes and array types are never dumped separately */
11783 28316 : break;
11784 82 : case DO_TSPARSER:
11785 82 : dumpTSParser(fout, (const TSParserInfo *) dobj);
11786 82 : break;
11787 358 : case DO_TSDICT:
11788 358 : dumpTSDictionary(fout, (const TSDictInfo *) dobj);
11789 358 : break;
11790 106 : case DO_TSTEMPLATE:
11791 106 : dumpTSTemplate(fout, (const TSTemplateInfo *) dobj);
11792 106 : break;
11793 308 : case DO_TSCONFIG:
11794 308 : dumpTSConfig(fout, (const TSConfigInfo *) dobj);
11795 308 : break;
11796 104 : case DO_FDW:
11797 104 : dumpForeignDataWrapper(fout, (const FdwInfo *) dobj);
11798 104 : break;
11799 112 : case DO_FOREIGN_SERVER:
11800 112 : dumpForeignServer(fout, (const ForeignServerInfo *) dobj);
11801 112 : break;
11802 320 : case DO_DEFAULT_ACL:
11803 320 : dumpDefaultACL(fout, (const DefaultACLInfo *) dobj);
11804 320 : break;
11805 168 : case DO_LARGE_OBJECT:
11806 168 : dumpLO(fout, (const LoInfo *) dobj);
11807 168 : break;
11808 180 : case DO_LARGE_OBJECT_DATA:
11809 180 : if (dobj->dump & DUMP_COMPONENT_DATA)
11810 : {
11811 : LoInfo *loinfo;
11812 : TocEntry *te;
11813 :
11814 180 : loinfo = (LoInfo *) findObjectByDumpId(dobj->dependencies[0]);
11815 180 : if (loinfo == NULL)
11816 0 : pg_fatal("missing metadata for large objects \"%s\"",
11817 : dobj->name);
11818 :
11819 180 : te = ArchiveEntry(fout, dobj->catId, dobj->dumpId,
11820 180 : ARCHIVE_OPTS(.tag = dobj->name,
11821 : .owner = loinfo->rolname,
11822 : .description = "BLOBS",
11823 : .section = SECTION_DATA,
11824 : .deps = dobj->dependencies,
11825 : .nDeps = dobj->nDeps,
11826 : .dumpFn = dumpLOs,
11827 : .dumpArg = loinfo));
11828 :
11829 : /*
11830 : * Set the TocEntry's dataLength in case we are doing a
11831 : * parallel dump and want to order dump jobs by table size.
11832 : * (We need some size estimate for every TocEntry with a
11833 : * DataDumper function.) We don't currently have any cheap
11834 : * way to estimate the size of LOs, but fortunately it doesn't
11835 : * matter too much as long as we get large batches of LOs
11836 : * processed reasonably early. Assume 8K per blob.
11837 : */
11838 180 : te->dataLength = loinfo->numlos * (pgoff_t) 8192;
11839 : }
11840 180 : break;
11841 652 : case DO_POLICY:
11842 652 : dumpPolicy(fout, (const PolicyInfo *) dobj);
11843 652 : break;
11844 570 : case DO_PUBLICATION:
11845 570 : dumpPublication(fout, (const PublicationInfo *) dobj);
11846 570 : break;
11847 568 : case DO_PUBLICATION_REL:
11848 568 : dumpPublicationTable(fout, (const PublicationRelInfo *) dobj);
11849 568 : break;
11850 198 : case DO_PUBLICATION_TABLE_IN_SCHEMA:
11851 198 : dumpPublicationNamespace(fout,
11852 : (const PublicationSchemaInfo *) dobj);
11853 198 : break;
11854 220 : case DO_SUBSCRIPTION:
11855 220 : dumpSubscription(fout, (const SubscriptionInfo *) dobj);
11856 220 : break;
11857 6 : case DO_SUBSCRIPTION_REL:
11858 6 : dumpSubscriptionTable(fout, (const SubRelInfo *) dobj);
11859 6 : break;
11860 6658 : case DO_REL_STATS:
11861 6658 : dumpRelationStats(fout, (const RelStatsInfo *) dobj);
11862 6658 : break;
11863 752 : case DO_PRE_DATA_BOUNDARY:
11864 : case DO_POST_DATA_BOUNDARY:
11865 : /* never dumped, nothing to do */
11866 752 : break;
11867 : }
11868 : }
11869 :
11870 : /*
11871 : * dumpNamespace
11872 : * writes out to fout the queries to recreate a user-defined namespace
11873 : */
11874 : static void
11875 978 : dumpNamespace(Archive *fout, const NamespaceInfo *nspinfo)
11876 : {
11877 978 : DumpOptions *dopt = fout->dopt;
11878 : PQExpBuffer q;
11879 : PQExpBuffer delq;
11880 : char *qnspname;
11881 :
11882 : /* Do nothing if not dumping schema */
11883 978 : if (!dopt->dumpSchema)
11884 56 : return;
11885 :
11886 922 : q = createPQExpBuffer();
11887 922 : delq = createPQExpBuffer();
11888 :
11889 922 : qnspname = pg_strdup(fmtId(nspinfo->dobj.name));
11890 :
11891 922 : if (nspinfo->create)
11892 : {
11893 616 : appendPQExpBuffer(delq, "DROP SCHEMA %s;\n", qnspname);
11894 616 : appendPQExpBuffer(q, "CREATE SCHEMA %s;\n", qnspname);
11895 : }
11896 : else
11897 : {
11898 : /* see selectDumpableNamespace() */
11899 306 : appendPQExpBufferStr(delq,
11900 : "-- *not* dropping schema, since initdb creates it\n");
11901 306 : appendPQExpBufferStr(q,
11902 : "-- *not* creating schema, since initdb creates it\n");
11903 : }
11904 :
11905 922 : if (dopt->binary_upgrade)
11906 188 : binary_upgrade_extension_member(q, &nspinfo->dobj,
11907 : "SCHEMA", qnspname, NULL);
11908 :
11909 922 : if (nspinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
11910 370 : ArchiveEntry(fout, nspinfo->dobj.catId, nspinfo->dobj.dumpId,
11911 370 : ARCHIVE_OPTS(.tag = nspinfo->dobj.name,
11912 : .owner = nspinfo->rolname,
11913 : .description = "SCHEMA",
11914 : .section = SECTION_PRE_DATA,
11915 : .createStmt = q->data,
11916 : .dropStmt = delq->data));
11917 :
11918 : /* Dump Schema Comments and Security Labels */
11919 922 : if (nspinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
11920 : {
11921 316 : const char *initdb_comment = NULL;
11922 :
11923 316 : if (!nspinfo->create && strcmp(qnspname, "public") == 0)
11924 234 : initdb_comment = "standard public schema";
11925 316 : dumpCommentExtended(fout, "SCHEMA", qnspname,
11926 316 : NULL, nspinfo->rolname,
11927 316 : nspinfo->dobj.catId, 0, nspinfo->dobj.dumpId,
11928 : initdb_comment);
11929 : }
11930 :
11931 922 : if (nspinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
11932 0 : dumpSecLabel(fout, "SCHEMA", qnspname,
11933 0 : NULL, nspinfo->rolname,
11934 0 : nspinfo->dobj.catId, 0, nspinfo->dobj.dumpId);
11935 :
11936 922 : if (nspinfo->dobj.dump & DUMP_COMPONENT_ACL)
11937 718 : dumpACL(fout, nspinfo->dobj.dumpId, InvalidDumpId, "SCHEMA",
11938 : qnspname, NULL, NULL,
11939 718 : NULL, nspinfo->rolname, &nspinfo->dacl);
11940 :
11941 922 : free(qnspname);
11942 :
11943 922 : destroyPQExpBuffer(q);
11944 922 : destroyPQExpBuffer(delq);
11945 : }
11946 :
11947 : /*
11948 : * dumpExtension
11949 : * writes out to fout the queries to recreate an extension
11950 : */
11951 : static void
11952 48 : dumpExtension(Archive *fout, const ExtensionInfo *extinfo)
11953 : {
11954 48 : DumpOptions *dopt = fout->dopt;
11955 : PQExpBuffer q;
11956 : PQExpBuffer delq;
11957 : char *qextname;
11958 :
11959 : /* Do nothing if not dumping schema */
11960 48 : if (!dopt->dumpSchema)
11961 2 : return;
11962 :
11963 46 : q = createPQExpBuffer();
11964 46 : delq = createPQExpBuffer();
11965 :
11966 46 : qextname = pg_strdup(fmtId(extinfo->dobj.name));
11967 :
11968 46 : appendPQExpBuffer(delq, "DROP EXTENSION %s;\n", qextname);
11969 :
11970 46 : if (!dopt->binary_upgrade)
11971 : {
11972 : /*
11973 : * In a regular dump, we simply create the extension, intentionally
11974 : * not specifying a version, so that the destination installation's
11975 : * default version is used.
11976 : *
11977 : * Use of IF NOT EXISTS here is unlike our behavior for other object
11978 : * types; but there are various scenarios in which it's convenient to
11979 : * manually create the desired extension before restoring, so we
11980 : * prefer to allow it to exist already.
11981 : */
11982 34 : appendPQExpBuffer(q, "CREATE EXTENSION IF NOT EXISTS %s WITH SCHEMA %s;\n",
11983 34 : qextname, fmtId(extinfo->namespace));
11984 : }
11985 : else
11986 : {
11987 : /*
11988 : * In binary-upgrade mode, it's critical to reproduce the state of the
11989 : * database exactly, so our procedure is to create an empty extension,
11990 : * restore all the contained objects normally, and add them to the
11991 : * extension one by one. This function performs just the first of
11992 : * those steps. binary_upgrade_extension_member() takes care of
11993 : * adding member objects as they're created.
11994 : */
11995 : int i;
11996 : int n;
11997 :
11998 12 : appendPQExpBufferStr(q, "-- For binary upgrade, create an empty extension and insert objects into it\n");
11999 :
12000 : /*
12001 : * We unconditionally create the extension, so we must drop it if it
12002 : * exists. This could happen if the user deleted 'plpgsql' and then
12003 : * readded it, causing its oid to be greater than g_last_builtin_oid.
12004 : */
12005 12 : appendPQExpBuffer(q, "DROP EXTENSION IF EXISTS %s;\n", qextname);
12006 :
12007 12 : appendPQExpBufferStr(q,
12008 : "SELECT pg_catalog.binary_upgrade_create_empty_extension(");
12009 12 : appendStringLiteralAH(q, extinfo->dobj.name, fout);
12010 12 : appendPQExpBufferStr(q, ", ");
12011 12 : appendStringLiteralAH(q, extinfo->namespace, fout);
12012 12 : appendPQExpBufferStr(q, ", ");
12013 12 : appendPQExpBuffer(q, "%s, ", extinfo->relocatable ? "true" : "false");
12014 12 : appendStringLiteralAH(q, extinfo->extversion, fout);
12015 12 : appendPQExpBufferStr(q, ", ");
12016 :
12017 : /*
12018 : * Note that we're pushing extconfig (an OID array) back into
12019 : * pg_extension exactly as-is. This is OK because pg_class OIDs are
12020 : * preserved in binary upgrade.
12021 : */
12022 12 : if (strlen(extinfo->extconfig) > 2)
12023 2 : appendStringLiteralAH(q, extinfo->extconfig, fout);
12024 : else
12025 10 : appendPQExpBufferStr(q, "NULL");
12026 12 : appendPQExpBufferStr(q, ", ");
12027 12 : if (strlen(extinfo->extcondition) > 2)
12028 2 : appendStringLiteralAH(q, extinfo->extcondition, fout);
12029 : else
12030 10 : appendPQExpBufferStr(q, "NULL");
12031 12 : appendPQExpBufferStr(q, ", ");
12032 12 : appendPQExpBufferStr(q, "ARRAY[");
12033 12 : n = 0;
12034 24 : for (i = 0; i < extinfo->dobj.nDeps; i++)
12035 : {
12036 : DumpableObject *extobj;
12037 :
12038 12 : extobj = findObjectByDumpId(extinfo->dobj.dependencies[i]);
12039 12 : if (extobj && extobj->objType == DO_EXTENSION)
12040 : {
12041 0 : if (n++ > 0)
12042 0 : appendPQExpBufferChar(q, ',');
12043 0 : appendStringLiteralAH(q, extobj->name, fout);
12044 : }
12045 : }
12046 12 : appendPQExpBufferStr(q, "]::pg_catalog.text[]");
12047 12 : appendPQExpBufferStr(q, ");\n");
12048 : }
12049 :
12050 46 : if (extinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
12051 46 : ArchiveEntry(fout, extinfo->dobj.catId, extinfo->dobj.dumpId,
12052 46 : ARCHIVE_OPTS(.tag = extinfo->dobj.name,
12053 : .description = "EXTENSION",
12054 : .section = SECTION_PRE_DATA,
12055 : .createStmt = q->data,
12056 : .dropStmt = delq->data));
12057 :
12058 : /* Dump Extension Comments */
12059 46 : if (extinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
12060 46 : dumpComment(fout, "EXTENSION", qextname,
12061 : NULL, "",
12062 46 : extinfo->dobj.catId, 0, extinfo->dobj.dumpId);
12063 :
12064 46 : free(qextname);
12065 :
12066 46 : destroyPQExpBuffer(q);
12067 46 : destroyPQExpBuffer(delq);
12068 : }
12069 :
12070 : /*
12071 : * dumpType
12072 : * writes out to fout the queries to recreate a user-defined type
12073 : */
12074 : static void
12075 1846 : dumpType(Archive *fout, const TypeInfo *tyinfo)
12076 : {
12077 1846 : DumpOptions *dopt = fout->dopt;
12078 :
12079 : /* Do nothing if not dumping schema */
12080 1846 : if (!dopt->dumpSchema)
12081 98 : return;
12082 :
12083 : /* Dump out in proper style */
12084 1748 : if (tyinfo->typtype == TYPTYPE_BASE)
12085 566 : dumpBaseType(fout, tyinfo);
12086 1182 : else if (tyinfo->typtype == TYPTYPE_DOMAIN)
12087 304 : dumpDomain(fout, tyinfo);
12088 878 : else if (tyinfo->typtype == TYPTYPE_COMPOSITE)
12089 260 : dumpCompositeType(fout, tyinfo);
12090 618 : else if (tyinfo->typtype == TYPTYPE_ENUM)
12091 170 : dumpEnumType(fout, tyinfo);
12092 448 : else if (tyinfo->typtype == TYPTYPE_RANGE)
12093 224 : dumpRangeType(fout, tyinfo);
12094 224 : else if (tyinfo->typtype == TYPTYPE_PSEUDO && !tyinfo->isDefined)
12095 74 : dumpUndefinedType(fout, tyinfo);
12096 : else
12097 150 : pg_log_warning("typtype of data type \"%s\" appears to be invalid",
12098 : tyinfo->dobj.name);
12099 : }
12100 :
12101 : /*
12102 : * dumpEnumType
12103 : * writes out to fout the queries to recreate a user-defined enum type
12104 : */
12105 : static void
12106 170 : dumpEnumType(Archive *fout, const TypeInfo *tyinfo)
12107 : {
12108 170 : DumpOptions *dopt = fout->dopt;
12109 170 : PQExpBuffer q = createPQExpBuffer();
12110 170 : PQExpBuffer delq = createPQExpBuffer();
12111 170 : PQExpBuffer query = createPQExpBuffer();
12112 : PGresult *res;
12113 : int num,
12114 : i;
12115 : Oid enum_oid;
12116 : char *qtypname;
12117 : char *qualtypname;
12118 : char *label;
12119 : int i_enumlabel;
12120 : int i_oid;
12121 :
12122 170 : if (!fout->is_prepared[PREPQUERY_DUMPENUMTYPE])
12123 : {
12124 : /* Set up query for enum-specific details */
12125 80 : appendPQExpBufferStr(query,
12126 : "PREPARE dumpEnumType(pg_catalog.oid) AS\n"
12127 : "SELECT oid, enumlabel "
12128 : "FROM pg_catalog.pg_enum "
12129 : "WHERE enumtypid = $1 "
12130 : "ORDER BY enumsortorder");
12131 :
12132 80 : ExecuteSqlStatement(fout, query->data);
12133 :
12134 80 : fout->is_prepared[PREPQUERY_DUMPENUMTYPE] = true;
12135 : }
12136 :
12137 170 : printfPQExpBuffer(query,
12138 : "EXECUTE dumpEnumType('%u')",
12139 170 : tyinfo->dobj.catId.oid);
12140 :
12141 170 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
12142 :
12143 170 : num = PQntuples(res);
12144 :
12145 170 : qtypname = pg_strdup(fmtId(tyinfo->dobj.name));
12146 170 : qualtypname = pg_strdup(fmtQualifiedDumpable(tyinfo));
12147 :
12148 : /*
12149 : * CASCADE shouldn't be required here as for normal types since the I/O
12150 : * functions are generic and do not get dropped.
12151 : */
12152 170 : appendPQExpBuffer(delq, "DROP TYPE %s;\n", qualtypname);
12153 :
12154 170 : if (dopt->binary_upgrade)
12155 12 : binary_upgrade_set_type_oids_by_type_oid(fout, q,
12156 12 : tyinfo->dobj.catId.oid,
12157 : false, false);
12158 :
12159 170 : appendPQExpBuffer(q, "CREATE TYPE %s AS ENUM (",
12160 : qualtypname);
12161 :
12162 170 : if (!dopt->binary_upgrade)
12163 : {
12164 158 : i_enumlabel = PQfnumber(res, "enumlabel");
12165 :
12166 : /* Labels with server-assigned oids */
12167 964 : for (i = 0; i < num; i++)
12168 : {
12169 806 : label = PQgetvalue(res, i, i_enumlabel);
12170 806 : if (i > 0)
12171 648 : appendPQExpBufferChar(q, ',');
12172 806 : appendPQExpBufferStr(q, "\n ");
12173 806 : appendStringLiteralAH(q, label, fout);
12174 : }
12175 : }
12176 :
12177 170 : appendPQExpBufferStr(q, "\n);\n");
12178 :
12179 170 : if (dopt->binary_upgrade)
12180 : {
12181 12 : i_oid = PQfnumber(res, "oid");
12182 12 : i_enumlabel = PQfnumber(res, "enumlabel");
12183 :
12184 : /* Labels with dump-assigned (preserved) oids */
12185 124 : for (i = 0; i < num; i++)
12186 : {
12187 112 : enum_oid = atooid(PQgetvalue(res, i, i_oid));
12188 112 : label = PQgetvalue(res, i, i_enumlabel);
12189 :
12190 112 : if (i == 0)
12191 12 : appendPQExpBufferStr(q, "\n-- For binary upgrade, must preserve pg_enum oids\n");
12192 112 : appendPQExpBuffer(q,
12193 : "SELECT pg_catalog.binary_upgrade_set_next_pg_enum_oid('%u'::pg_catalog.oid);\n",
12194 : enum_oid);
12195 112 : appendPQExpBuffer(q, "ALTER TYPE %s ADD VALUE ", qualtypname);
12196 112 : appendStringLiteralAH(q, label, fout);
12197 112 : appendPQExpBufferStr(q, ";\n\n");
12198 : }
12199 : }
12200 :
12201 170 : if (dopt->binary_upgrade)
12202 12 : binary_upgrade_extension_member(q, &tyinfo->dobj,
12203 : "TYPE", qtypname,
12204 12 : tyinfo->dobj.namespace->dobj.name);
12205 :
12206 170 : if (tyinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
12207 170 : ArchiveEntry(fout, tyinfo->dobj.catId, tyinfo->dobj.dumpId,
12208 170 : ARCHIVE_OPTS(.tag = tyinfo->dobj.name,
12209 : .namespace = tyinfo->dobj.namespace->dobj.name,
12210 : .owner = tyinfo->rolname,
12211 : .description = "TYPE",
12212 : .section = SECTION_PRE_DATA,
12213 : .createStmt = q->data,
12214 : .dropStmt = delq->data));
12215 :
12216 : /* Dump Type Comments and Security Labels */
12217 170 : if (tyinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
12218 64 : dumpComment(fout, "TYPE", qtypname,
12219 64 : tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
12220 64 : tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
12221 :
12222 170 : if (tyinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
12223 0 : dumpSecLabel(fout, "TYPE", qtypname,
12224 0 : tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
12225 0 : tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
12226 :
12227 170 : if (tyinfo->dobj.dump & DUMP_COMPONENT_ACL)
12228 64 : dumpACL(fout, tyinfo->dobj.dumpId, InvalidDumpId, "TYPE",
12229 : qtypname, NULL,
12230 64 : tyinfo->dobj.namespace->dobj.name,
12231 64 : NULL, tyinfo->rolname, &tyinfo->dacl);
12232 :
12233 170 : PQclear(res);
12234 170 : destroyPQExpBuffer(q);
12235 170 : destroyPQExpBuffer(delq);
12236 170 : destroyPQExpBuffer(query);
12237 170 : free(qtypname);
12238 170 : free(qualtypname);
12239 170 : }
12240 :
12241 : /*
12242 : * dumpRangeType
12243 : * writes out to fout the queries to recreate a user-defined range type
12244 : */
12245 : static void
12246 224 : dumpRangeType(Archive *fout, const TypeInfo *tyinfo)
12247 : {
12248 224 : DumpOptions *dopt = fout->dopt;
12249 224 : PQExpBuffer q = createPQExpBuffer();
12250 224 : PQExpBuffer delq = createPQExpBuffer();
12251 224 : PQExpBuffer query = createPQExpBuffer();
12252 : PGresult *res;
12253 : Oid collationOid;
12254 : char *qtypname;
12255 : char *qualtypname;
12256 : char *procname;
12257 :
12258 224 : if (!fout->is_prepared[PREPQUERY_DUMPRANGETYPE])
12259 : {
12260 : /* Set up query for range-specific details */
12261 80 : appendPQExpBufferStr(query,
12262 : "PREPARE dumpRangeType(pg_catalog.oid) AS\n");
12263 :
12264 80 : appendPQExpBufferStr(query,
12265 : "SELECT ");
12266 :
12267 80 : if (fout->remoteVersion >= 140000)
12268 80 : appendPQExpBufferStr(query,
12269 : "pg_catalog.format_type(rngmultitypid, NULL) AS rngmultitype, ");
12270 : else
12271 0 : appendPQExpBufferStr(query,
12272 : "NULL AS rngmultitype, ");
12273 :
12274 80 : appendPQExpBufferStr(query,
12275 : "pg_catalog.format_type(rngsubtype, NULL) AS rngsubtype, "
12276 : "opc.opcname AS opcname, "
12277 : "(SELECT nspname FROM pg_catalog.pg_namespace nsp "
12278 : " WHERE nsp.oid = opc.opcnamespace) AS opcnsp, "
12279 : "opc.opcdefault, "
12280 : "CASE WHEN rngcollation = st.typcollation THEN 0 "
12281 : " ELSE rngcollation END AS collation, "
12282 : "rngcanonical, rngsubdiff "
12283 : "FROM pg_catalog.pg_range r, pg_catalog.pg_type st, "
12284 : " pg_catalog.pg_opclass opc "
12285 : "WHERE st.oid = rngsubtype AND opc.oid = rngsubopc AND "
12286 : "rngtypid = $1");
12287 :
12288 80 : ExecuteSqlStatement(fout, query->data);
12289 :
12290 80 : fout->is_prepared[PREPQUERY_DUMPRANGETYPE] = true;
12291 : }
12292 :
12293 224 : printfPQExpBuffer(query,
12294 : "EXECUTE dumpRangeType('%u')",
12295 224 : tyinfo->dobj.catId.oid);
12296 :
12297 224 : res = ExecuteSqlQueryForSingleRow(fout, query->data);
12298 :
12299 224 : qtypname = pg_strdup(fmtId(tyinfo->dobj.name));
12300 224 : qualtypname = pg_strdup(fmtQualifiedDumpable(tyinfo));
12301 :
12302 : /*
12303 : * CASCADE shouldn't be required here as for normal types since the I/O
12304 : * functions are generic and do not get dropped.
12305 : */
12306 224 : appendPQExpBuffer(delq, "DROP TYPE %s;\n", qualtypname);
12307 :
12308 224 : if (dopt->binary_upgrade)
12309 16 : binary_upgrade_set_type_oids_by_type_oid(fout, q,
12310 16 : tyinfo->dobj.catId.oid,
12311 : false, true);
12312 :
12313 224 : appendPQExpBuffer(q, "CREATE TYPE %s AS RANGE (",
12314 : qualtypname);
12315 :
12316 224 : appendPQExpBuffer(q, "\n subtype = %s",
12317 : PQgetvalue(res, 0, PQfnumber(res, "rngsubtype")));
12318 :
12319 224 : if (!PQgetisnull(res, 0, PQfnumber(res, "rngmultitype")))
12320 224 : appendPQExpBuffer(q, ",\n multirange_type_name = %s",
12321 : PQgetvalue(res, 0, PQfnumber(res, "rngmultitype")));
12322 :
12323 : /* print subtype_opclass only if not default for subtype */
12324 224 : if (PQgetvalue(res, 0, PQfnumber(res, "opcdefault"))[0] != 't')
12325 : {
12326 64 : char *opcname = PQgetvalue(res, 0, PQfnumber(res, "opcname"));
12327 64 : char *nspname = PQgetvalue(res, 0, PQfnumber(res, "opcnsp"));
12328 :
12329 64 : appendPQExpBuffer(q, ",\n subtype_opclass = %s.",
12330 : fmtId(nspname));
12331 64 : appendPQExpBufferStr(q, fmtId(opcname));
12332 : }
12333 :
12334 224 : collationOid = atooid(PQgetvalue(res, 0, PQfnumber(res, "collation")));
12335 224 : if (OidIsValid(collationOid))
12336 : {
12337 74 : CollInfo *coll = findCollationByOid(collationOid);
12338 :
12339 74 : if (coll)
12340 74 : appendPQExpBuffer(q, ",\n collation = %s",
12341 74 : fmtQualifiedDumpable(coll));
12342 : }
12343 :
12344 224 : procname = PQgetvalue(res, 0, PQfnumber(res, "rngcanonical"));
12345 224 : if (strcmp(procname, "-") != 0)
12346 18 : appendPQExpBuffer(q, ",\n canonical = %s", procname);
12347 :
12348 224 : procname = PQgetvalue(res, 0, PQfnumber(res, "rngsubdiff"));
12349 224 : if (strcmp(procname, "-") != 0)
12350 46 : appendPQExpBuffer(q, ",\n subtype_diff = %s", procname);
12351 :
12352 224 : appendPQExpBufferStr(q, "\n);\n");
12353 :
12354 224 : if (dopt->binary_upgrade)
12355 16 : binary_upgrade_extension_member(q, &tyinfo->dobj,
12356 : "TYPE", qtypname,
12357 16 : tyinfo->dobj.namespace->dobj.name);
12358 :
12359 224 : if (tyinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
12360 224 : ArchiveEntry(fout, tyinfo->dobj.catId, tyinfo->dobj.dumpId,
12361 224 : ARCHIVE_OPTS(.tag = tyinfo->dobj.name,
12362 : .namespace = tyinfo->dobj.namespace->dobj.name,
12363 : .owner = tyinfo->rolname,
12364 : .description = "TYPE",
12365 : .section = SECTION_PRE_DATA,
12366 : .createStmt = q->data,
12367 : .dropStmt = delq->data));
12368 :
12369 : /* Dump Type Comments and Security Labels */
12370 224 : if (tyinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
12371 100 : dumpComment(fout, "TYPE", qtypname,
12372 100 : tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
12373 100 : tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
12374 :
12375 224 : if (tyinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
12376 0 : dumpSecLabel(fout, "TYPE", qtypname,
12377 0 : tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
12378 0 : tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
12379 :
12380 224 : if (tyinfo->dobj.dump & DUMP_COMPONENT_ACL)
12381 64 : dumpACL(fout, tyinfo->dobj.dumpId, InvalidDumpId, "TYPE",
12382 : qtypname, NULL,
12383 64 : tyinfo->dobj.namespace->dobj.name,
12384 64 : NULL, tyinfo->rolname, &tyinfo->dacl);
12385 :
12386 224 : PQclear(res);
12387 224 : destroyPQExpBuffer(q);
12388 224 : destroyPQExpBuffer(delq);
12389 224 : destroyPQExpBuffer(query);
12390 224 : free(qtypname);
12391 224 : free(qualtypname);
12392 224 : }
12393 :
12394 : /*
12395 : * dumpUndefinedType
12396 : * writes out to fout the queries to recreate a !typisdefined type
12397 : *
12398 : * This is a shell type, but we use different terminology to distinguish
12399 : * this case from where we have to emit a shell type definition to break
12400 : * circular dependencies. An undefined type shouldn't ever have anything
12401 : * depending on it.
12402 : */
12403 : static void
12404 74 : dumpUndefinedType(Archive *fout, const TypeInfo *tyinfo)
12405 : {
12406 74 : DumpOptions *dopt = fout->dopt;
12407 74 : PQExpBuffer q = createPQExpBuffer();
12408 74 : PQExpBuffer delq = createPQExpBuffer();
12409 : char *qtypname;
12410 : char *qualtypname;
12411 :
12412 74 : qtypname = pg_strdup(fmtId(tyinfo->dobj.name));
12413 74 : qualtypname = pg_strdup(fmtQualifiedDumpable(tyinfo));
12414 :
12415 74 : appendPQExpBuffer(delq, "DROP TYPE %s;\n", qualtypname);
12416 :
12417 74 : if (dopt->binary_upgrade)
12418 4 : binary_upgrade_set_type_oids_by_type_oid(fout, q,
12419 4 : tyinfo->dobj.catId.oid,
12420 : false, false);
12421 :
12422 74 : appendPQExpBuffer(q, "CREATE TYPE %s;\n",
12423 : qualtypname);
12424 :
12425 74 : if (dopt->binary_upgrade)
12426 4 : binary_upgrade_extension_member(q, &tyinfo->dobj,
12427 : "TYPE", qtypname,
12428 4 : tyinfo->dobj.namespace->dobj.name);
12429 :
12430 74 : if (tyinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
12431 74 : ArchiveEntry(fout, tyinfo->dobj.catId, tyinfo->dobj.dumpId,
12432 74 : ARCHIVE_OPTS(.tag = tyinfo->dobj.name,
12433 : .namespace = tyinfo->dobj.namespace->dobj.name,
12434 : .owner = tyinfo->rolname,
12435 : .description = "TYPE",
12436 : .section = SECTION_PRE_DATA,
12437 : .createStmt = q->data,
12438 : .dropStmt = delq->data));
12439 :
12440 : /* Dump Type Comments and Security Labels */
12441 74 : if (tyinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
12442 64 : dumpComment(fout, "TYPE", qtypname,
12443 64 : tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
12444 64 : tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
12445 :
12446 74 : if (tyinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
12447 0 : dumpSecLabel(fout, "TYPE", qtypname,
12448 0 : tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
12449 0 : tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
12450 :
12451 74 : if (tyinfo->dobj.dump & DUMP_COMPONENT_ACL)
12452 0 : dumpACL(fout, tyinfo->dobj.dumpId, InvalidDumpId, "TYPE",
12453 : qtypname, NULL,
12454 0 : tyinfo->dobj.namespace->dobj.name,
12455 0 : NULL, tyinfo->rolname, &tyinfo->dacl);
12456 :
12457 74 : destroyPQExpBuffer(q);
12458 74 : destroyPQExpBuffer(delq);
12459 74 : free(qtypname);
12460 74 : free(qualtypname);
12461 74 : }
12462 :
12463 : /*
12464 : * dumpBaseType
12465 : * writes out to fout the queries to recreate a user-defined base type
12466 : */
12467 : static void
12468 566 : dumpBaseType(Archive *fout, const TypeInfo *tyinfo)
12469 : {
12470 566 : DumpOptions *dopt = fout->dopt;
12471 566 : PQExpBuffer q = createPQExpBuffer();
12472 566 : PQExpBuffer delq = createPQExpBuffer();
12473 566 : PQExpBuffer query = createPQExpBuffer();
12474 : PGresult *res;
12475 : char *qtypname;
12476 : char *qualtypname;
12477 : char *typlen;
12478 : char *typinput;
12479 : char *typoutput;
12480 : char *typreceive;
12481 : char *typsend;
12482 : char *typmodin;
12483 : char *typmodout;
12484 : char *typanalyze;
12485 : char *typsubscript;
12486 : Oid typreceiveoid;
12487 : Oid typsendoid;
12488 : Oid typmodinoid;
12489 : Oid typmodoutoid;
12490 : Oid typanalyzeoid;
12491 : Oid typsubscriptoid;
12492 : char *typcategory;
12493 : char *typispreferred;
12494 : char *typdelim;
12495 : char *typbyval;
12496 : char *typalign;
12497 : char *typstorage;
12498 : char *typcollatable;
12499 : char *typdefault;
12500 566 : bool typdefault_is_literal = false;
12501 :
12502 566 : if (!fout->is_prepared[PREPQUERY_DUMPBASETYPE])
12503 : {
12504 : /* Set up query for type-specific details */
12505 80 : appendPQExpBufferStr(query,
12506 : "PREPARE dumpBaseType(pg_catalog.oid) AS\n"
12507 : "SELECT typlen, "
12508 : "typinput, typoutput, typreceive, typsend, "
12509 : "typreceive::pg_catalog.oid AS typreceiveoid, "
12510 : "typsend::pg_catalog.oid AS typsendoid, "
12511 : "typanalyze, "
12512 : "typanalyze::pg_catalog.oid AS typanalyzeoid, "
12513 : "typdelim, typbyval, typalign, typstorage, "
12514 : "typmodin, typmodout, "
12515 : "typmodin::pg_catalog.oid AS typmodinoid, "
12516 : "typmodout::pg_catalog.oid AS typmodoutoid, "
12517 : "typcategory, typispreferred, "
12518 : "(typcollation <> 0) AS typcollatable, "
12519 : "pg_catalog.pg_get_expr(typdefaultbin, 0) AS typdefaultbin, typdefault, ");
12520 :
12521 80 : if (fout->remoteVersion >= 140000)
12522 80 : appendPQExpBufferStr(query,
12523 : "typsubscript, "
12524 : "typsubscript::pg_catalog.oid AS typsubscriptoid ");
12525 : else
12526 0 : appendPQExpBufferStr(query,
12527 : "'-' AS typsubscript, 0 AS typsubscriptoid ");
12528 :
12529 80 : appendPQExpBufferStr(query, "FROM pg_catalog.pg_type "
12530 : "WHERE oid = $1");
12531 :
12532 80 : ExecuteSqlStatement(fout, query->data);
12533 :
12534 80 : fout->is_prepared[PREPQUERY_DUMPBASETYPE] = true;
12535 : }
12536 :
12537 566 : printfPQExpBuffer(query,
12538 : "EXECUTE dumpBaseType('%u')",
12539 566 : tyinfo->dobj.catId.oid);
12540 :
12541 566 : res = ExecuteSqlQueryForSingleRow(fout, query->data);
12542 :
12543 566 : typlen = PQgetvalue(res, 0, PQfnumber(res, "typlen"));
12544 566 : typinput = PQgetvalue(res, 0, PQfnumber(res, "typinput"));
12545 566 : typoutput = PQgetvalue(res, 0, PQfnumber(res, "typoutput"));
12546 566 : typreceive = PQgetvalue(res, 0, PQfnumber(res, "typreceive"));
12547 566 : typsend = PQgetvalue(res, 0, PQfnumber(res, "typsend"));
12548 566 : typmodin = PQgetvalue(res, 0, PQfnumber(res, "typmodin"));
12549 566 : typmodout = PQgetvalue(res, 0, PQfnumber(res, "typmodout"));
12550 566 : typanalyze = PQgetvalue(res, 0, PQfnumber(res, "typanalyze"));
12551 566 : typsubscript = PQgetvalue(res, 0, PQfnumber(res, "typsubscript"));
12552 566 : typreceiveoid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typreceiveoid")));
12553 566 : typsendoid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typsendoid")));
12554 566 : typmodinoid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typmodinoid")));
12555 566 : typmodoutoid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typmodoutoid")));
12556 566 : typanalyzeoid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typanalyzeoid")));
12557 566 : typsubscriptoid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typsubscriptoid")));
12558 566 : typcategory = PQgetvalue(res, 0, PQfnumber(res, "typcategory"));
12559 566 : typispreferred = PQgetvalue(res, 0, PQfnumber(res, "typispreferred"));
12560 566 : typdelim = PQgetvalue(res, 0, PQfnumber(res, "typdelim"));
12561 566 : typbyval = PQgetvalue(res, 0, PQfnumber(res, "typbyval"));
12562 566 : typalign = PQgetvalue(res, 0, PQfnumber(res, "typalign"));
12563 566 : typstorage = PQgetvalue(res, 0, PQfnumber(res, "typstorage"));
12564 566 : typcollatable = PQgetvalue(res, 0, PQfnumber(res, "typcollatable"));
12565 566 : if (!PQgetisnull(res, 0, PQfnumber(res, "typdefaultbin")))
12566 0 : typdefault = PQgetvalue(res, 0, PQfnumber(res, "typdefaultbin"));
12567 566 : else if (!PQgetisnull(res, 0, PQfnumber(res, "typdefault")))
12568 : {
12569 84 : typdefault = PQgetvalue(res, 0, PQfnumber(res, "typdefault"));
12570 84 : typdefault_is_literal = true; /* it needs quotes */
12571 : }
12572 : else
12573 482 : typdefault = NULL;
12574 :
12575 566 : qtypname = pg_strdup(fmtId(tyinfo->dobj.name));
12576 566 : qualtypname = pg_strdup(fmtQualifiedDumpable(tyinfo));
12577 :
12578 : /*
12579 : * The reason we include CASCADE is that the circular dependency between
12580 : * the type and its I/O functions makes it impossible to drop the type any
12581 : * other way.
12582 : */
12583 566 : appendPQExpBuffer(delq, "DROP TYPE %s CASCADE;\n", qualtypname);
12584 :
12585 : /*
12586 : * We might already have a shell type, but setting pg_type_oid is
12587 : * harmless, and in any case we'd better set the array type OID.
12588 : */
12589 566 : if (dopt->binary_upgrade)
12590 16 : binary_upgrade_set_type_oids_by_type_oid(fout, q,
12591 16 : tyinfo->dobj.catId.oid,
12592 : false, false);
12593 :
12594 566 : appendPQExpBuffer(q,
12595 : "CREATE TYPE %s (\n"
12596 : " INTERNALLENGTH = %s",
12597 : qualtypname,
12598 566 : (strcmp(typlen, "-1") == 0) ? "variable" : typlen);
12599 :
12600 : /* regproc result is sufficiently quoted already */
12601 566 : appendPQExpBuffer(q, ",\n INPUT = %s", typinput);
12602 566 : appendPQExpBuffer(q, ",\n OUTPUT = %s", typoutput);
12603 566 : if (OidIsValid(typreceiveoid))
12604 420 : appendPQExpBuffer(q, ",\n RECEIVE = %s", typreceive);
12605 566 : if (OidIsValid(typsendoid))
12606 420 : appendPQExpBuffer(q, ",\n SEND = %s", typsend);
12607 566 : if (OidIsValid(typmodinoid))
12608 70 : appendPQExpBuffer(q, ",\n TYPMOD_IN = %s", typmodin);
12609 566 : if (OidIsValid(typmodoutoid))
12610 70 : appendPQExpBuffer(q, ",\n TYPMOD_OUT = %s", typmodout);
12611 566 : if (OidIsValid(typanalyzeoid))
12612 6 : appendPQExpBuffer(q, ",\n ANALYZE = %s", typanalyze);
12613 :
12614 566 : if (strcmp(typcollatable, "t") == 0)
12615 60 : appendPQExpBufferStr(q, ",\n COLLATABLE = true");
12616 :
12617 566 : if (typdefault != NULL)
12618 : {
12619 84 : appendPQExpBufferStr(q, ",\n DEFAULT = ");
12620 84 : if (typdefault_is_literal)
12621 84 : appendStringLiteralAH(q, typdefault, fout);
12622 : else
12623 0 : appendPQExpBufferStr(q, typdefault);
12624 : }
12625 :
12626 566 : if (OidIsValid(typsubscriptoid))
12627 58 : appendPQExpBuffer(q, ",\n SUBSCRIPT = %s", typsubscript);
12628 :
12629 566 : if (OidIsValid(tyinfo->typelem))
12630 52 : appendPQExpBuffer(q, ",\n ELEMENT = %s",
12631 52 : getFormattedTypeName(fout, tyinfo->typelem,
12632 : zeroIsError));
12633 :
12634 566 : if (strcmp(typcategory, "U") != 0)
12635 : {
12636 322 : appendPQExpBufferStr(q, ",\n CATEGORY = ");
12637 322 : appendStringLiteralAH(q, typcategory, fout);
12638 : }
12639 :
12640 566 : if (strcmp(typispreferred, "t") == 0)
12641 58 : appendPQExpBufferStr(q, ",\n PREFERRED = true");
12642 :
12643 566 : if (typdelim && strcmp(typdelim, ",") != 0)
12644 : {
12645 6 : appendPQExpBufferStr(q, ",\n DELIMITER = ");
12646 6 : appendStringLiteralAH(q, typdelim, fout);
12647 : }
12648 :
12649 566 : if (*typalign == TYPALIGN_CHAR)
12650 24 : appendPQExpBufferStr(q, ",\n ALIGNMENT = char");
12651 542 : else if (*typalign == TYPALIGN_SHORT)
12652 12 : appendPQExpBufferStr(q, ",\n ALIGNMENT = int2");
12653 530 : else if (*typalign == TYPALIGN_INT)
12654 374 : appendPQExpBufferStr(q, ",\n ALIGNMENT = int4");
12655 156 : else if (*typalign == TYPALIGN_DOUBLE)
12656 156 : appendPQExpBufferStr(q, ",\n ALIGNMENT = double");
12657 :
12658 566 : if (*typstorage == TYPSTORAGE_PLAIN)
12659 416 : appendPQExpBufferStr(q, ",\n STORAGE = plain");
12660 150 : else if (*typstorage == TYPSTORAGE_EXTERNAL)
12661 0 : appendPQExpBufferStr(q, ",\n STORAGE = external");
12662 150 : else if (*typstorage == TYPSTORAGE_EXTENDED)
12663 132 : appendPQExpBufferStr(q, ",\n STORAGE = extended");
12664 18 : else if (*typstorage == TYPSTORAGE_MAIN)
12665 18 : appendPQExpBufferStr(q, ",\n STORAGE = main");
12666 :
12667 566 : if (strcmp(typbyval, "t") == 0)
12668 274 : appendPQExpBufferStr(q, ",\n PASSEDBYVALUE");
12669 :
12670 566 : appendPQExpBufferStr(q, "\n);\n");
12671 :
12672 566 : if (dopt->binary_upgrade)
12673 16 : binary_upgrade_extension_member(q, &tyinfo->dobj,
12674 : "TYPE", qtypname,
12675 16 : tyinfo->dobj.namespace->dobj.name);
12676 :
12677 566 : if (tyinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
12678 566 : ArchiveEntry(fout, tyinfo->dobj.catId, tyinfo->dobj.dumpId,
12679 566 : ARCHIVE_OPTS(.tag = tyinfo->dobj.name,
12680 : .namespace = tyinfo->dobj.namespace->dobj.name,
12681 : .owner = tyinfo->rolname,
12682 : .description = "TYPE",
12683 : .section = SECTION_PRE_DATA,
12684 : .createStmt = q->data,
12685 : .dropStmt = delq->data));
12686 :
12687 : /* Dump Type Comments and Security Labels */
12688 566 : if (tyinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
12689 496 : dumpComment(fout, "TYPE", qtypname,
12690 496 : tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
12691 496 : tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
12692 :
12693 566 : if (tyinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
12694 0 : dumpSecLabel(fout, "TYPE", qtypname,
12695 0 : tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
12696 0 : tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
12697 :
12698 566 : if (tyinfo->dobj.dump & DUMP_COMPONENT_ACL)
12699 64 : dumpACL(fout, tyinfo->dobj.dumpId, InvalidDumpId, "TYPE",
12700 : qtypname, NULL,
12701 64 : tyinfo->dobj.namespace->dobj.name,
12702 64 : NULL, tyinfo->rolname, &tyinfo->dacl);
12703 :
12704 566 : PQclear(res);
12705 566 : destroyPQExpBuffer(q);
12706 566 : destroyPQExpBuffer(delq);
12707 566 : destroyPQExpBuffer(query);
12708 566 : free(qtypname);
12709 566 : free(qualtypname);
12710 566 : }
12711 :
12712 : /*
12713 : * dumpDomain
12714 : * writes out to fout the queries to recreate a user-defined domain
12715 : */
12716 : static void
12717 304 : dumpDomain(Archive *fout, const TypeInfo *tyinfo)
12718 : {
12719 304 : DumpOptions *dopt = fout->dopt;
12720 304 : PQExpBuffer q = createPQExpBuffer();
12721 304 : PQExpBuffer delq = createPQExpBuffer();
12722 304 : PQExpBuffer query = createPQExpBuffer();
12723 : PGresult *res;
12724 : int i;
12725 : char *qtypname;
12726 : char *qualtypname;
12727 : char *typnotnull;
12728 : char *typdefn;
12729 : char *typdefault;
12730 : Oid typcollation;
12731 304 : bool typdefault_is_literal = false;
12732 :
12733 304 : if (!fout->is_prepared[PREPQUERY_DUMPDOMAIN])
12734 : {
12735 : /* Set up query for domain-specific details */
12736 74 : appendPQExpBufferStr(query,
12737 : "PREPARE dumpDomain(pg_catalog.oid) AS\n");
12738 :
12739 74 : appendPQExpBufferStr(query, "SELECT t.typnotnull, "
12740 : "pg_catalog.format_type(t.typbasetype, t.typtypmod) AS typdefn, "
12741 : "pg_catalog.pg_get_expr(t.typdefaultbin, 'pg_catalog.pg_type'::pg_catalog.regclass) AS typdefaultbin, "
12742 : "t.typdefault, "
12743 : "CASE WHEN t.typcollation <> u.typcollation "
12744 : "THEN t.typcollation ELSE 0 END AS typcollation "
12745 : "FROM pg_catalog.pg_type t "
12746 : "LEFT JOIN pg_catalog.pg_type u ON (t.typbasetype = u.oid) "
12747 : "WHERE t.oid = $1");
12748 :
12749 74 : ExecuteSqlStatement(fout, query->data);
12750 :
12751 74 : fout->is_prepared[PREPQUERY_DUMPDOMAIN] = true;
12752 : }
12753 :
12754 304 : printfPQExpBuffer(query,
12755 : "EXECUTE dumpDomain('%u')",
12756 304 : tyinfo->dobj.catId.oid);
12757 :
12758 304 : res = ExecuteSqlQueryForSingleRow(fout, query->data);
12759 :
12760 304 : typnotnull = PQgetvalue(res, 0, PQfnumber(res, "typnotnull"));
12761 304 : typdefn = PQgetvalue(res, 0, PQfnumber(res, "typdefn"));
12762 304 : if (!PQgetisnull(res, 0, PQfnumber(res, "typdefaultbin")))
12763 74 : typdefault = PQgetvalue(res, 0, PQfnumber(res, "typdefaultbin"));
12764 230 : else if (!PQgetisnull(res, 0, PQfnumber(res, "typdefault")))
12765 : {
12766 0 : typdefault = PQgetvalue(res, 0, PQfnumber(res, "typdefault"));
12767 0 : typdefault_is_literal = true; /* it needs quotes */
12768 : }
12769 : else
12770 230 : typdefault = NULL;
12771 304 : typcollation = atooid(PQgetvalue(res, 0, PQfnumber(res, "typcollation")));
12772 :
12773 304 : if (dopt->binary_upgrade)
12774 50 : binary_upgrade_set_type_oids_by_type_oid(fout, q,
12775 50 : tyinfo->dobj.catId.oid,
12776 : true, /* force array type */
12777 : false); /* force multirange type */
12778 :
12779 304 : qtypname = pg_strdup(fmtId(tyinfo->dobj.name));
12780 304 : qualtypname = pg_strdup(fmtQualifiedDumpable(tyinfo));
12781 :
12782 304 : appendPQExpBuffer(q,
12783 : "CREATE DOMAIN %s AS %s",
12784 : qualtypname,
12785 : typdefn);
12786 :
12787 : /* Print collation only if different from base type's collation */
12788 304 : if (OidIsValid(typcollation))
12789 : {
12790 : CollInfo *coll;
12791 :
12792 64 : coll = findCollationByOid(typcollation);
12793 64 : if (coll)
12794 64 : appendPQExpBuffer(q, " COLLATE %s", fmtQualifiedDumpable(coll));
12795 : }
12796 :
12797 : /*
12798 : * Print a not-null constraint if there's one. In servers older than 17
12799 : * these don't have names, so just print it unadorned; in newer ones they
12800 : * do, but most of the time it's going to be the standard generated one,
12801 : * so omit the name in that case also.
12802 : */
12803 304 : if (typnotnull[0] == 't')
12804 : {
12805 94 : if (fout->remoteVersion < 170000 || tyinfo->notnull == NULL)
12806 0 : appendPQExpBufferStr(q, " NOT NULL");
12807 : else
12808 : {
12809 94 : ConstraintInfo *notnull = tyinfo->notnull;
12810 :
12811 94 : if (!notnull->separate)
12812 : {
12813 : char *default_name;
12814 :
12815 : /* XXX should match ChooseConstraintName better */
12816 94 : default_name = psprintf("%s_not_null", tyinfo->dobj.name);
12817 :
12818 94 : if (strcmp(default_name, notnull->dobj.name) == 0)
12819 30 : appendPQExpBufferStr(q, " NOT NULL");
12820 : else
12821 64 : appendPQExpBuffer(q, " CONSTRAINT %s %s",
12822 64 : fmtId(notnull->dobj.name), notnull->condef);
12823 94 : free(default_name);
12824 : }
12825 : }
12826 : }
12827 :
12828 304 : if (typdefault != NULL)
12829 : {
12830 74 : appendPQExpBufferStr(q, " DEFAULT ");
12831 74 : if (typdefault_is_literal)
12832 0 : appendStringLiteralAH(q, typdefault, fout);
12833 : else
12834 74 : appendPQExpBufferStr(q, typdefault);
12835 : }
12836 :
12837 304 : PQclear(res);
12838 :
12839 : /*
12840 : * Add any CHECK constraints for the domain
12841 : */
12842 518 : for (i = 0; i < tyinfo->nDomChecks; i++)
12843 : {
12844 214 : ConstraintInfo *domcheck = &(tyinfo->domChecks[i]);
12845 :
12846 214 : if (!domcheck->separate && domcheck->contype == 'c')
12847 204 : appendPQExpBuffer(q, "\n\tCONSTRAINT %s %s",
12848 204 : fmtId(domcheck->dobj.name), domcheck->condef);
12849 : }
12850 :
12851 304 : appendPQExpBufferStr(q, ";\n");
12852 :
12853 304 : appendPQExpBuffer(delq, "DROP DOMAIN %s;\n", qualtypname);
12854 :
12855 304 : if (dopt->binary_upgrade)
12856 50 : binary_upgrade_extension_member(q, &tyinfo->dobj,
12857 : "DOMAIN", qtypname,
12858 50 : tyinfo->dobj.namespace->dobj.name);
12859 :
12860 304 : if (tyinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
12861 304 : ArchiveEntry(fout, tyinfo->dobj.catId, tyinfo->dobj.dumpId,
12862 304 : ARCHIVE_OPTS(.tag = tyinfo->dobj.name,
12863 : .namespace = tyinfo->dobj.namespace->dobj.name,
12864 : .owner = tyinfo->rolname,
12865 : .description = "DOMAIN",
12866 : .section = SECTION_PRE_DATA,
12867 : .createStmt = q->data,
12868 : .dropStmt = delq->data));
12869 :
12870 : /* Dump Domain Comments and Security Labels */
12871 304 : if (tyinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
12872 0 : dumpComment(fout, "DOMAIN", qtypname,
12873 0 : tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
12874 0 : tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
12875 :
12876 304 : if (tyinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
12877 0 : dumpSecLabel(fout, "DOMAIN", qtypname,
12878 0 : tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
12879 0 : tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
12880 :
12881 304 : if (tyinfo->dobj.dump & DUMP_COMPONENT_ACL)
12882 64 : dumpACL(fout, tyinfo->dobj.dumpId, InvalidDumpId, "TYPE",
12883 : qtypname, NULL,
12884 64 : tyinfo->dobj.namespace->dobj.name,
12885 64 : NULL, tyinfo->rolname, &tyinfo->dacl);
12886 :
12887 : /* Dump any per-constraint comments */
12888 518 : for (i = 0; i < tyinfo->nDomChecks; i++)
12889 : {
12890 214 : ConstraintInfo *domcheck = &(tyinfo->domChecks[i]);
12891 : PQExpBuffer conprefix;
12892 :
12893 : /* but only if the constraint itself was dumped here */
12894 214 : if (domcheck->separate)
12895 10 : continue;
12896 :
12897 204 : conprefix = createPQExpBuffer();
12898 204 : appendPQExpBuffer(conprefix, "CONSTRAINT %s ON DOMAIN",
12899 204 : fmtId(domcheck->dobj.name));
12900 :
12901 204 : if (domcheck->dobj.dump & DUMP_COMPONENT_COMMENT)
12902 64 : dumpComment(fout, conprefix->data, qtypname,
12903 64 : tyinfo->dobj.namespace->dobj.name,
12904 64 : tyinfo->rolname,
12905 64 : domcheck->dobj.catId, 0, tyinfo->dobj.dumpId);
12906 :
12907 204 : destroyPQExpBuffer(conprefix);
12908 : }
12909 :
12910 : /*
12911 : * And a comment on the not-null constraint, if there's one -- but only if
12912 : * the constraint itself was dumped here
12913 : */
12914 304 : if (tyinfo->notnull != NULL && !tyinfo->notnull->separate)
12915 : {
12916 94 : PQExpBuffer conprefix = createPQExpBuffer();
12917 :
12918 94 : appendPQExpBuffer(conprefix, "CONSTRAINT %s ON DOMAIN",
12919 94 : fmtId(tyinfo->notnull->dobj.name));
12920 :
12921 94 : if (tyinfo->notnull->dobj.dump & DUMP_COMPONENT_COMMENT)
12922 64 : dumpComment(fout, conprefix->data, qtypname,
12923 64 : tyinfo->dobj.namespace->dobj.name,
12924 64 : tyinfo->rolname,
12925 64 : tyinfo->notnull->dobj.catId, 0, tyinfo->dobj.dumpId);
12926 94 : destroyPQExpBuffer(conprefix);
12927 : }
12928 :
12929 304 : destroyPQExpBuffer(q);
12930 304 : destroyPQExpBuffer(delq);
12931 304 : destroyPQExpBuffer(query);
12932 304 : free(qtypname);
12933 304 : free(qualtypname);
12934 304 : }
12935 :
12936 : /*
12937 : * dumpCompositeType
12938 : * writes out to fout the queries to recreate a user-defined stand-alone
12939 : * composite type
12940 : */
12941 : static void
12942 260 : dumpCompositeType(Archive *fout, const TypeInfo *tyinfo)
12943 : {
12944 260 : DumpOptions *dopt = fout->dopt;
12945 260 : PQExpBuffer q = createPQExpBuffer();
12946 260 : PQExpBuffer dropped = createPQExpBuffer();
12947 260 : PQExpBuffer delq = createPQExpBuffer();
12948 260 : PQExpBuffer query = createPQExpBuffer();
12949 : PGresult *res;
12950 : char *qtypname;
12951 : char *qualtypname;
12952 : int ntups;
12953 : int i_attname;
12954 : int i_atttypdefn;
12955 : int i_attlen;
12956 : int i_attalign;
12957 : int i_attisdropped;
12958 : int i_attcollation;
12959 : int i;
12960 : int actual_atts;
12961 :
12962 260 : if (!fout->is_prepared[PREPQUERY_DUMPCOMPOSITETYPE])
12963 : {
12964 : /*
12965 : * Set up query for type-specific details.
12966 : *
12967 : * Since we only want to dump COLLATE clauses for attributes whose
12968 : * collation is different from their type's default, we use a CASE
12969 : * here to suppress uninteresting attcollations cheaply. atttypid
12970 : * will be 0 for dropped columns; collation does not matter for those.
12971 : */
12972 110 : appendPQExpBufferStr(query,
12973 : "PREPARE dumpCompositeType(pg_catalog.oid) AS\n"
12974 : "SELECT a.attname, a.attnum, "
12975 : "pg_catalog.format_type(a.atttypid, a.atttypmod) AS atttypdefn, "
12976 : "a.attlen, a.attalign, a.attisdropped, "
12977 : "CASE WHEN a.attcollation <> at.typcollation "
12978 : "THEN a.attcollation ELSE 0 END AS attcollation "
12979 : "FROM pg_catalog.pg_type ct "
12980 : "JOIN pg_catalog.pg_attribute a ON a.attrelid = ct.typrelid "
12981 : "LEFT JOIN pg_catalog.pg_type at ON at.oid = a.atttypid "
12982 : "WHERE ct.oid = $1 "
12983 : "ORDER BY a.attnum");
12984 :
12985 110 : ExecuteSqlStatement(fout, query->data);
12986 :
12987 110 : fout->is_prepared[PREPQUERY_DUMPCOMPOSITETYPE] = true;
12988 : }
12989 :
12990 260 : printfPQExpBuffer(query,
12991 : "EXECUTE dumpCompositeType('%u')",
12992 260 : tyinfo->dobj.catId.oid);
12993 :
12994 260 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
12995 :
12996 260 : ntups = PQntuples(res);
12997 :
12998 260 : i_attname = PQfnumber(res, "attname");
12999 260 : i_atttypdefn = PQfnumber(res, "atttypdefn");
13000 260 : i_attlen = PQfnumber(res, "attlen");
13001 260 : i_attalign = PQfnumber(res, "attalign");
13002 260 : i_attisdropped = PQfnumber(res, "attisdropped");
13003 260 : i_attcollation = PQfnumber(res, "attcollation");
13004 :
13005 260 : if (dopt->binary_upgrade)
13006 : {
13007 36 : binary_upgrade_set_type_oids_by_type_oid(fout, q,
13008 36 : tyinfo->dobj.catId.oid,
13009 : false, false);
13010 36 : binary_upgrade_set_pg_class_oids(fout, q, tyinfo->typrelid);
13011 : }
13012 :
13013 260 : qtypname = pg_strdup(fmtId(tyinfo->dobj.name));
13014 260 : qualtypname = pg_strdup(fmtQualifiedDumpable(tyinfo));
13015 :
13016 260 : appendPQExpBuffer(q, "CREATE TYPE %s AS (",
13017 : qualtypname);
13018 :
13019 260 : actual_atts = 0;
13020 824 : for (i = 0; i < ntups; i++)
13021 : {
13022 : char *attname;
13023 : char *atttypdefn;
13024 : char *attlen;
13025 : char *attalign;
13026 : bool attisdropped;
13027 : Oid attcollation;
13028 :
13029 564 : attname = PQgetvalue(res, i, i_attname);
13030 564 : atttypdefn = PQgetvalue(res, i, i_atttypdefn);
13031 564 : attlen = PQgetvalue(res, i, i_attlen);
13032 564 : attalign = PQgetvalue(res, i, i_attalign);
13033 564 : attisdropped = (PQgetvalue(res, i, i_attisdropped)[0] == 't');
13034 564 : attcollation = atooid(PQgetvalue(res, i, i_attcollation));
13035 :
13036 564 : if (attisdropped && !dopt->binary_upgrade)
13037 16 : continue;
13038 :
13039 : /* Format properly if not first attr */
13040 548 : if (actual_atts++ > 0)
13041 288 : appendPQExpBufferChar(q, ',');
13042 548 : appendPQExpBufferStr(q, "\n\t");
13043 :
13044 548 : if (!attisdropped)
13045 : {
13046 544 : appendPQExpBuffer(q, "%s %s", fmtId(attname), atttypdefn);
13047 :
13048 : /* Add collation if not default for the column type */
13049 544 : if (OidIsValid(attcollation))
13050 : {
13051 : CollInfo *coll;
13052 :
13053 0 : coll = findCollationByOid(attcollation);
13054 0 : if (coll)
13055 0 : appendPQExpBuffer(q, " COLLATE %s",
13056 0 : fmtQualifiedDumpable(coll));
13057 : }
13058 : }
13059 : else
13060 : {
13061 : /*
13062 : * This is a dropped attribute and we're in binary_upgrade mode.
13063 : * Insert a placeholder for it in the CREATE TYPE command, and set
13064 : * length and alignment with direct UPDATE to the catalogs
13065 : * afterwards. See similar code in dumpTableSchema().
13066 : */
13067 4 : appendPQExpBuffer(q, "%s INTEGER /* dummy */", fmtId(attname));
13068 :
13069 : /* stash separately for insertion after the CREATE TYPE */
13070 4 : appendPQExpBufferStr(dropped,
13071 : "\n-- For binary upgrade, recreate dropped column.\n");
13072 4 : appendPQExpBuffer(dropped, "UPDATE pg_catalog.pg_attribute\n"
13073 : "SET attlen = %s, "
13074 : "attalign = '%s', attbyval = false\n"
13075 : "WHERE attname = ", attlen, attalign);
13076 4 : appendStringLiteralAH(dropped, attname, fout);
13077 4 : appendPQExpBufferStr(dropped, "\n AND attrelid = ");
13078 4 : appendStringLiteralAH(dropped, qualtypname, fout);
13079 4 : appendPQExpBufferStr(dropped, "::pg_catalog.regclass;\n");
13080 :
13081 4 : appendPQExpBuffer(dropped, "ALTER TYPE %s ",
13082 : qualtypname);
13083 4 : appendPQExpBuffer(dropped, "DROP ATTRIBUTE %s;\n",
13084 : fmtId(attname));
13085 : }
13086 : }
13087 260 : appendPQExpBufferStr(q, "\n);\n");
13088 260 : appendPQExpBufferStr(q, dropped->data);
13089 :
13090 260 : appendPQExpBuffer(delq, "DROP TYPE %s;\n", qualtypname);
13091 :
13092 260 : if (dopt->binary_upgrade)
13093 36 : binary_upgrade_extension_member(q, &tyinfo->dobj,
13094 : "TYPE", qtypname,
13095 36 : tyinfo->dobj.namespace->dobj.name);
13096 :
13097 260 : if (tyinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
13098 226 : ArchiveEntry(fout, tyinfo->dobj.catId, tyinfo->dobj.dumpId,
13099 226 : ARCHIVE_OPTS(.tag = tyinfo->dobj.name,
13100 : .namespace = tyinfo->dobj.namespace->dobj.name,
13101 : .owner = tyinfo->rolname,
13102 : .description = "TYPE",
13103 : .section = SECTION_PRE_DATA,
13104 : .createStmt = q->data,
13105 : .dropStmt = delq->data));
13106 :
13107 :
13108 : /* Dump Type Comments and Security Labels */
13109 260 : if (tyinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
13110 64 : dumpComment(fout, "TYPE", qtypname,
13111 64 : tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
13112 64 : tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
13113 :
13114 260 : if (tyinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
13115 0 : dumpSecLabel(fout, "TYPE", qtypname,
13116 0 : tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
13117 0 : tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
13118 :
13119 260 : if (tyinfo->dobj.dump & DUMP_COMPONENT_ACL)
13120 36 : dumpACL(fout, tyinfo->dobj.dumpId, InvalidDumpId, "TYPE",
13121 : qtypname, NULL,
13122 36 : tyinfo->dobj.namespace->dobj.name,
13123 36 : NULL, tyinfo->rolname, &tyinfo->dacl);
13124 :
13125 : /* Dump any per-column comments */
13126 260 : if (tyinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
13127 64 : dumpCompositeTypeColComments(fout, tyinfo, res);
13128 :
13129 260 : PQclear(res);
13130 260 : destroyPQExpBuffer(q);
13131 260 : destroyPQExpBuffer(dropped);
13132 260 : destroyPQExpBuffer(delq);
13133 260 : destroyPQExpBuffer(query);
13134 260 : free(qtypname);
13135 260 : free(qualtypname);
13136 260 : }
13137 :
13138 : /*
13139 : * dumpCompositeTypeColComments
13140 : * writes out to fout the queries to recreate comments on the columns of
13141 : * a user-defined stand-alone composite type.
13142 : *
13143 : * The caller has already made a query to collect the names and attnums
13144 : * of the type's columns, so we just pass that result into here rather
13145 : * than reading them again.
13146 : */
13147 : static void
13148 64 : dumpCompositeTypeColComments(Archive *fout, const TypeInfo *tyinfo,
13149 : PGresult *res)
13150 : {
13151 : CommentItem *comments;
13152 : int ncomments;
13153 : PQExpBuffer query;
13154 : PQExpBuffer target;
13155 : int i;
13156 : int ntups;
13157 : int i_attname;
13158 : int i_attnum;
13159 : int i_attisdropped;
13160 :
13161 : /* do nothing, if --no-comments is supplied */
13162 64 : if (fout->dopt->no_comments)
13163 0 : return;
13164 :
13165 : /* Search for comments associated with type's pg_class OID */
13166 64 : ncomments = findComments(RelationRelationId, tyinfo->typrelid,
13167 : &comments);
13168 :
13169 : /* If no comments exist, we're done */
13170 64 : if (ncomments <= 0)
13171 0 : return;
13172 :
13173 : /* Build COMMENT ON statements */
13174 64 : query = createPQExpBuffer();
13175 64 : target = createPQExpBuffer();
13176 :
13177 64 : ntups = PQntuples(res);
13178 64 : i_attnum = PQfnumber(res, "attnum");
13179 64 : i_attname = PQfnumber(res, "attname");
13180 64 : i_attisdropped = PQfnumber(res, "attisdropped");
13181 128 : while (ncomments > 0)
13182 : {
13183 : const char *attname;
13184 :
13185 64 : attname = NULL;
13186 64 : for (i = 0; i < ntups; i++)
13187 : {
13188 64 : if (atoi(PQgetvalue(res, i, i_attnum)) == comments->objsubid &&
13189 64 : PQgetvalue(res, i, i_attisdropped)[0] != 't')
13190 : {
13191 64 : attname = PQgetvalue(res, i, i_attname);
13192 64 : break;
13193 : }
13194 : }
13195 64 : if (attname) /* just in case we don't find it */
13196 : {
13197 64 : const char *descr = comments->descr;
13198 :
13199 64 : resetPQExpBuffer(target);
13200 64 : appendPQExpBuffer(target, "COLUMN %s.",
13201 64 : fmtId(tyinfo->dobj.name));
13202 64 : appendPQExpBufferStr(target, fmtId(attname));
13203 :
13204 64 : resetPQExpBuffer(query);
13205 64 : appendPQExpBuffer(query, "COMMENT ON COLUMN %s.",
13206 64 : fmtQualifiedDumpable(tyinfo));
13207 64 : appendPQExpBuffer(query, "%s IS ", fmtId(attname));
13208 64 : appendStringLiteralAH(query, descr, fout);
13209 64 : appendPQExpBufferStr(query, ";\n");
13210 :
13211 64 : ArchiveEntry(fout, nilCatalogId, createDumpId(),
13212 64 : ARCHIVE_OPTS(.tag = target->data,
13213 : .namespace = tyinfo->dobj.namespace->dobj.name,
13214 : .owner = tyinfo->rolname,
13215 : .description = "COMMENT",
13216 : .section = SECTION_NONE,
13217 : .createStmt = query->data,
13218 : .deps = &(tyinfo->dobj.dumpId),
13219 : .nDeps = 1));
13220 : }
13221 :
13222 64 : comments++;
13223 64 : ncomments--;
13224 : }
13225 :
13226 64 : destroyPQExpBuffer(query);
13227 64 : destroyPQExpBuffer(target);
13228 : }
13229 :
13230 : /*
13231 : * dumpShellType
13232 : * writes out to fout the queries to create a shell type
13233 : *
13234 : * We dump a shell definition in advance of the I/O functions for the type.
13235 : */
13236 : static void
13237 146 : dumpShellType(Archive *fout, const ShellTypeInfo *stinfo)
13238 : {
13239 146 : DumpOptions *dopt = fout->dopt;
13240 : PQExpBuffer q;
13241 :
13242 : /* Do nothing if not dumping schema */
13243 146 : if (!dopt->dumpSchema)
13244 12 : return;
13245 :
13246 134 : q = createPQExpBuffer();
13247 :
13248 : /*
13249 : * Note the lack of a DROP command for the shell type; any required DROP
13250 : * is driven off the base type entry, instead. This interacts with
13251 : * _printTocEntry()'s use of the presence of a DROP command to decide
13252 : * whether an entry needs an ALTER OWNER command. We don't want to alter
13253 : * the shell type's owner immediately on creation; that should happen only
13254 : * after it's filled in, otherwise the backend complains.
13255 : */
13256 :
13257 134 : if (dopt->binary_upgrade)
13258 16 : binary_upgrade_set_type_oids_by_type_oid(fout, q,
13259 16 : stinfo->baseType->dobj.catId.oid,
13260 : false, false);
13261 :
13262 134 : appendPQExpBuffer(q, "CREATE TYPE %s;\n",
13263 134 : fmtQualifiedDumpable(stinfo));
13264 :
13265 134 : if (stinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
13266 134 : ArchiveEntry(fout, stinfo->dobj.catId, stinfo->dobj.dumpId,
13267 134 : ARCHIVE_OPTS(.tag = stinfo->dobj.name,
13268 : .namespace = stinfo->dobj.namespace->dobj.name,
13269 : .owner = stinfo->baseType->rolname,
13270 : .description = "SHELL TYPE",
13271 : .section = SECTION_PRE_DATA,
13272 : .createStmt = q->data));
13273 :
13274 134 : destroyPQExpBuffer(q);
13275 : }
13276 :
13277 : /*
13278 : * dumpProcLang
13279 : * writes out to fout the queries to recreate a user-defined
13280 : * procedural language
13281 : */
13282 : static void
13283 164 : dumpProcLang(Archive *fout, const ProcLangInfo *plang)
13284 : {
13285 164 : DumpOptions *dopt = fout->dopt;
13286 : PQExpBuffer defqry;
13287 : PQExpBuffer delqry;
13288 : bool useParams;
13289 : char *qlanname;
13290 : FuncInfo *funcInfo;
13291 164 : FuncInfo *inlineInfo = NULL;
13292 164 : FuncInfo *validatorInfo = NULL;
13293 :
13294 : /* Do nothing if not dumping schema */
13295 164 : if (!dopt->dumpSchema)
13296 26 : return;
13297 :
13298 : /*
13299 : * Try to find the support function(s). It is not an error if we don't
13300 : * find them --- if the functions are in the pg_catalog schema, as is
13301 : * standard in 8.1 and up, then we won't have loaded them. (In this case
13302 : * we will emit a parameterless CREATE LANGUAGE command, which will
13303 : * require PL template knowledge in the backend to reload.)
13304 : */
13305 :
13306 138 : funcInfo = findFuncByOid(plang->lanplcallfoid);
13307 138 : if (funcInfo != NULL && !funcInfo->dobj.dump)
13308 4 : funcInfo = NULL; /* treat not-dumped same as not-found */
13309 :
13310 138 : if (OidIsValid(plang->laninline))
13311 : {
13312 76 : inlineInfo = findFuncByOid(plang->laninline);
13313 76 : if (inlineInfo != NULL && !inlineInfo->dobj.dump)
13314 2 : inlineInfo = NULL;
13315 : }
13316 :
13317 138 : if (OidIsValid(plang->lanvalidator))
13318 : {
13319 76 : validatorInfo = findFuncByOid(plang->lanvalidator);
13320 76 : if (validatorInfo != NULL && !validatorInfo->dobj.dump)
13321 2 : validatorInfo = NULL;
13322 : }
13323 :
13324 : /*
13325 : * If the functions are dumpable then emit a complete CREATE LANGUAGE with
13326 : * parameters. Otherwise, we'll write a parameterless command, which will
13327 : * be interpreted as CREATE EXTENSION.
13328 : */
13329 60 : useParams = (funcInfo != NULL &&
13330 258 : (inlineInfo != NULL || !OidIsValid(plang->laninline)) &&
13331 60 : (validatorInfo != NULL || !OidIsValid(plang->lanvalidator)));
13332 :
13333 138 : defqry = createPQExpBuffer();
13334 138 : delqry = createPQExpBuffer();
13335 :
13336 138 : qlanname = pg_strdup(fmtId(plang->dobj.name));
13337 :
13338 138 : appendPQExpBuffer(delqry, "DROP PROCEDURAL LANGUAGE %s;\n",
13339 : qlanname);
13340 :
13341 138 : if (useParams)
13342 : {
13343 60 : appendPQExpBuffer(defqry, "CREATE %sPROCEDURAL LANGUAGE %s",
13344 60 : plang->lanpltrusted ? "TRUSTED " : "",
13345 : qlanname);
13346 60 : appendPQExpBuffer(defqry, " HANDLER %s",
13347 60 : fmtQualifiedDumpable(funcInfo));
13348 60 : if (OidIsValid(plang->laninline))
13349 0 : appendPQExpBuffer(defqry, " INLINE %s",
13350 0 : fmtQualifiedDumpable(inlineInfo));
13351 60 : if (OidIsValid(plang->lanvalidator))
13352 0 : appendPQExpBuffer(defqry, " VALIDATOR %s",
13353 0 : fmtQualifiedDumpable(validatorInfo));
13354 : }
13355 : else
13356 : {
13357 : /*
13358 : * If not dumping parameters, then use CREATE OR REPLACE so that the
13359 : * command will not fail if the language is preinstalled in the target
13360 : * database.
13361 : *
13362 : * Modern servers will interpret this as CREATE EXTENSION IF NOT
13363 : * EXISTS; perhaps we should emit that instead? But it might just add
13364 : * confusion.
13365 : */
13366 78 : appendPQExpBuffer(defqry, "CREATE OR REPLACE PROCEDURAL LANGUAGE %s",
13367 : qlanname);
13368 : }
13369 138 : appendPQExpBufferStr(defqry, ";\n");
13370 :
13371 138 : if (dopt->binary_upgrade)
13372 4 : binary_upgrade_extension_member(defqry, &plang->dobj,
13373 : "LANGUAGE", qlanname, NULL);
13374 :
13375 138 : if (plang->dobj.dump & DUMP_COMPONENT_DEFINITION)
13376 62 : ArchiveEntry(fout, plang->dobj.catId, plang->dobj.dumpId,
13377 62 : ARCHIVE_OPTS(.tag = plang->dobj.name,
13378 : .owner = plang->lanowner,
13379 : .description = "PROCEDURAL LANGUAGE",
13380 : .section = SECTION_PRE_DATA,
13381 : .createStmt = defqry->data,
13382 : .dropStmt = delqry->data,
13383 : ));
13384 :
13385 : /* Dump Proc Lang Comments and Security Labels */
13386 138 : if (plang->dobj.dump & DUMP_COMPONENT_COMMENT)
13387 0 : dumpComment(fout, "LANGUAGE", qlanname,
13388 0 : NULL, plang->lanowner,
13389 0 : plang->dobj.catId, 0, plang->dobj.dumpId);
13390 :
13391 138 : if (plang->dobj.dump & DUMP_COMPONENT_SECLABEL)
13392 0 : dumpSecLabel(fout, "LANGUAGE", qlanname,
13393 0 : NULL, plang->lanowner,
13394 0 : plang->dobj.catId, 0, plang->dobj.dumpId);
13395 :
13396 138 : if (plang->lanpltrusted && plang->dobj.dump & DUMP_COMPONENT_ACL)
13397 76 : dumpACL(fout, plang->dobj.dumpId, InvalidDumpId, "LANGUAGE",
13398 : qlanname, NULL, NULL,
13399 76 : NULL, plang->lanowner, &plang->dacl);
13400 :
13401 138 : free(qlanname);
13402 :
13403 138 : destroyPQExpBuffer(defqry);
13404 138 : destroyPQExpBuffer(delqry);
13405 : }
13406 :
13407 : /*
13408 : * format_function_arguments: generate function name and argument list
13409 : *
13410 : * This is used when we can rely on pg_get_function_arguments to format
13411 : * the argument list. Note, however, that pg_get_function_arguments
13412 : * does not special-case zero-argument aggregates.
13413 : */
13414 : static char *
13415 8204 : format_function_arguments(const FuncInfo *finfo, const char *funcargs, bool is_agg)
13416 : {
13417 : PQExpBufferData fn;
13418 :
13419 8204 : initPQExpBuffer(&fn);
13420 8204 : appendPQExpBufferStr(&fn, fmtId(finfo->dobj.name));
13421 8204 : if (is_agg && finfo->nargs == 0)
13422 160 : appendPQExpBufferStr(&fn, "(*)");
13423 : else
13424 8044 : appendPQExpBuffer(&fn, "(%s)", funcargs);
13425 8204 : return fn.data;
13426 : }
13427 :
13428 : /*
13429 : * format_function_signature: generate function name and argument list
13430 : *
13431 : * Only a minimal list of input argument types is generated; this is
13432 : * sufficient to reference the function, but not to define it.
13433 : *
13434 : * If honor_quotes is false then the function name is never quoted.
13435 : * This is appropriate for use in TOC tags, but not in SQL commands.
13436 : */
13437 : static char *
13438 4318 : format_function_signature(Archive *fout, const FuncInfo *finfo, bool honor_quotes)
13439 : {
13440 : PQExpBufferData fn;
13441 : int j;
13442 :
13443 4318 : initPQExpBuffer(&fn);
13444 4318 : if (honor_quotes)
13445 786 : appendPQExpBuffer(&fn, "%s(", fmtId(finfo->dobj.name));
13446 : else
13447 3532 : appendPQExpBuffer(&fn, "%s(", finfo->dobj.name);
13448 7920 : for (j = 0; j < finfo->nargs; j++)
13449 : {
13450 3602 : if (j > 0)
13451 844 : appendPQExpBufferStr(&fn, ", ");
13452 :
13453 3602 : appendPQExpBufferStr(&fn,
13454 3602 : getFormattedTypeName(fout, finfo->argtypes[j],
13455 : zeroIsError));
13456 : }
13457 4318 : appendPQExpBufferChar(&fn, ')');
13458 4318 : return fn.data;
13459 : }
13460 :
13461 :
13462 : /*
13463 : * dumpFunc:
13464 : * dump out one function
13465 : */
13466 : static void
13467 3656 : dumpFunc(Archive *fout, const FuncInfo *finfo)
13468 : {
13469 3656 : DumpOptions *dopt = fout->dopt;
13470 : PQExpBuffer query;
13471 : PQExpBuffer q;
13472 : PQExpBuffer delqry;
13473 : PQExpBuffer asPart;
13474 : PGresult *res;
13475 : char *funcsig; /* identity signature */
13476 3656 : char *funcfullsig = NULL; /* full signature */
13477 : char *funcsig_tag;
13478 : char *qual_funcsig;
13479 : char *proretset;
13480 : char *prosrc;
13481 : char *probin;
13482 : char *prosqlbody;
13483 : char *funcargs;
13484 : char *funciargs;
13485 : char *funcresult;
13486 : char *protrftypes;
13487 : char *prokind;
13488 : char *provolatile;
13489 : char *proisstrict;
13490 : char *prosecdef;
13491 : char *proleakproof;
13492 : char *proconfig;
13493 : char *procost;
13494 : char *prorows;
13495 : char *prosupport;
13496 : char *proparallel;
13497 : char *lanname;
13498 3656 : char **configitems = NULL;
13499 3656 : int nconfigitems = 0;
13500 : const char *keyword;
13501 :
13502 : /* Do nothing if not dumping schema */
13503 3656 : if (!dopt->dumpSchema)
13504 124 : return;
13505 :
13506 3532 : query = createPQExpBuffer();
13507 3532 : q = createPQExpBuffer();
13508 3532 : delqry = createPQExpBuffer();
13509 3532 : asPart = createPQExpBuffer();
13510 :
13511 3532 : if (!fout->is_prepared[PREPQUERY_DUMPFUNC])
13512 : {
13513 : /* Set up query for function-specific details */
13514 132 : appendPQExpBufferStr(query,
13515 : "PREPARE dumpFunc(pg_catalog.oid) AS\n");
13516 :
13517 132 : appendPQExpBufferStr(query,
13518 : "SELECT\n"
13519 : "proretset,\n"
13520 : "prosrc,\n"
13521 : "probin,\n"
13522 : "provolatile,\n"
13523 : "proisstrict,\n"
13524 : "prosecdef,\n"
13525 : "lanname,\n"
13526 : "proconfig,\n"
13527 : "procost,\n"
13528 : "prorows,\n"
13529 : "pg_catalog.pg_get_function_arguments(p.oid) AS funcargs,\n"
13530 : "pg_catalog.pg_get_function_identity_arguments(p.oid) AS funciargs,\n"
13531 : "pg_catalog.pg_get_function_result(p.oid) AS funcresult,\n"
13532 : "proleakproof,\n");
13533 :
13534 132 : if (fout->remoteVersion >= 90500)
13535 132 : appendPQExpBufferStr(query,
13536 : "array_to_string(protrftypes, ' ') AS protrftypes,\n");
13537 : else
13538 0 : appendPQExpBufferStr(query,
13539 : "NULL AS protrftypes,\n");
13540 :
13541 132 : if (fout->remoteVersion >= 90600)
13542 132 : appendPQExpBufferStr(query,
13543 : "proparallel,\n");
13544 : else
13545 0 : appendPQExpBufferStr(query,
13546 : "'u' AS proparallel,\n");
13547 :
13548 132 : if (fout->remoteVersion >= 110000)
13549 132 : appendPQExpBufferStr(query,
13550 : "prokind,\n");
13551 : else
13552 0 : appendPQExpBufferStr(query,
13553 : "CASE WHEN proiswindow THEN 'w' ELSE 'f' END AS prokind,\n");
13554 :
13555 132 : if (fout->remoteVersion >= 120000)
13556 132 : appendPQExpBufferStr(query,
13557 : "prosupport,\n");
13558 : else
13559 0 : appendPQExpBufferStr(query,
13560 : "'-' AS prosupport,\n");
13561 :
13562 132 : if (fout->remoteVersion >= 140000)
13563 132 : appendPQExpBufferStr(query,
13564 : "pg_get_function_sqlbody(p.oid) AS prosqlbody\n");
13565 : else
13566 0 : appendPQExpBufferStr(query,
13567 : "NULL AS prosqlbody\n");
13568 :
13569 132 : appendPQExpBufferStr(query,
13570 : "FROM pg_catalog.pg_proc p, pg_catalog.pg_language l\n"
13571 : "WHERE p.oid = $1 "
13572 : "AND l.oid = p.prolang");
13573 :
13574 132 : ExecuteSqlStatement(fout, query->data);
13575 :
13576 132 : fout->is_prepared[PREPQUERY_DUMPFUNC] = true;
13577 : }
13578 :
13579 3532 : printfPQExpBuffer(query,
13580 : "EXECUTE dumpFunc('%u')",
13581 3532 : finfo->dobj.catId.oid);
13582 :
13583 3532 : res = ExecuteSqlQueryForSingleRow(fout, query->data);
13584 :
13585 3532 : proretset = PQgetvalue(res, 0, PQfnumber(res, "proretset"));
13586 3532 : if (PQgetisnull(res, 0, PQfnumber(res, "prosqlbody")))
13587 : {
13588 3436 : prosrc = PQgetvalue(res, 0, PQfnumber(res, "prosrc"));
13589 3436 : probin = PQgetvalue(res, 0, PQfnumber(res, "probin"));
13590 3436 : prosqlbody = NULL;
13591 : }
13592 : else
13593 : {
13594 96 : prosrc = NULL;
13595 96 : probin = NULL;
13596 96 : prosqlbody = PQgetvalue(res, 0, PQfnumber(res, "prosqlbody"));
13597 : }
13598 3532 : funcargs = PQgetvalue(res, 0, PQfnumber(res, "funcargs"));
13599 3532 : funciargs = PQgetvalue(res, 0, PQfnumber(res, "funciargs"));
13600 3532 : funcresult = PQgetvalue(res, 0, PQfnumber(res, "funcresult"));
13601 3532 : protrftypes = PQgetvalue(res, 0, PQfnumber(res, "protrftypes"));
13602 3532 : prokind = PQgetvalue(res, 0, PQfnumber(res, "prokind"));
13603 3532 : provolatile = PQgetvalue(res, 0, PQfnumber(res, "provolatile"));
13604 3532 : proisstrict = PQgetvalue(res, 0, PQfnumber(res, "proisstrict"));
13605 3532 : prosecdef = PQgetvalue(res, 0, PQfnumber(res, "prosecdef"));
13606 3532 : proleakproof = PQgetvalue(res, 0, PQfnumber(res, "proleakproof"));
13607 3532 : proconfig = PQgetvalue(res, 0, PQfnumber(res, "proconfig"));
13608 3532 : procost = PQgetvalue(res, 0, PQfnumber(res, "procost"));
13609 3532 : prorows = PQgetvalue(res, 0, PQfnumber(res, "prorows"));
13610 3532 : prosupport = PQgetvalue(res, 0, PQfnumber(res, "prosupport"));
13611 3532 : proparallel = PQgetvalue(res, 0, PQfnumber(res, "proparallel"));
13612 3532 : lanname = PQgetvalue(res, 0, PQfnumber(res, "lanname"));
13613 :
13614 : /*
13615 : * See backend/commands/functioncmds.c for details of how the 'AS' clause
13616 : * is used.
13617 : */
13618 3532 : if (prosqlbody)
13619 : {
13620 96 : appendPQExpBufferStr(asPart, prosqlbody);
13621 : }
13622 3436 : else if (probin[0] != '\0')
13623 : {
13624 294 : appendPQExpBufferStr(asPart, "AS ");
13625 294 : appendStringLiteralAH(asPart, probin, fout);
13626 294 : if (prosrc[0] != '\0')
13627 : {
13628 294 : appendPQExpBufferStr(asPart, ", ");
13629 :
13630 : /*
13631 : * where we have bin, use dollar quoting if allowed and src
13632 : * contains quote or backslash; else use regular quoting.
13633 : */
13634 294 : if (dopt->disable_dollar_quoting ||
13635 294 : (strchr(prosrc, '\'') == NULL && strchr(prosrc, '\\') == NULL))
13636 294 : appendStringLiteralAH(asPart, prosrc, fout);
13637 : else
13638 0 : appendStringLiteralDQ(asPart, prosrc, NULL);
13639 : }
13640 : }
13641 : else
13642 : {
13643 3142 : appendPQExpBufferStr(asPart, "AS ");
13644 : /* with no bin, dollar quote src unconditionally if allowed */
13645 3142 : if (dopt->disable_dollar_quoting)
13646 0 : appendStringLiteralAH(asPart, prosrc, fout);
13647 : else
13648 3142 : appendStringLiteralDQ(asPart, prosrc, NULL);
13649 : }
13650 :
13651 3532 : if (*proconfig)
13652 : {
13653 30 : if (!parsePGArray(proconfig, &configitems, &nconfigitems))
13654 0 : pg_fatal("could not parse %s array", "proconfig");
13655 : }
13656 : else
13657 : {
13658 3502 : configitems = NULL;
13659 3502 : nconfigitems = 0;
13660 : }
13661 :
13662 3532 : funcfullsig = format_function_arguments(finfo, funcargs, false);
13663 3532 : funcsig = format_function_arguments(finfo, funciargs, false);
13664 :
13665 3532 : funcsig_tag = format_function_signature(fout, finfo, false);
13666 :
13667 3532 : qual_funcsig = psprintf("%s.%s",
13668 3532 : fmtId(finfo->dobj.namespace->dobj.name),
13669 : funcsig);
13670 :
13671 3532 : if (prokind[0] == PROKIND_PROCEDURE)
13672 184 : keyword = "PROCEDURE";
13673 : else
13674 3348 : keyword = "FUNCTION"; /* works for window functions too */
13675 :
13676 3532 : appendPQExpBuffer(delqry, "DROP %s %s;\n",
13677 : keyword, qual_funcsig);
13678 :
13679 7064 : appendPQExpBuffer(q, "CREATE %s %s.%s",
13680 : keyword,
13681 3532 : fmtId(finfo->dobj.namespace->dobj.name),
13682 : funcfullsig ? funcfullsig :
13683 : funcsig);
13684 :
13685 3532 : if (prokind[0] == PROKIND_PROCEDURE)
13686 : /* no result type to output */ ;
13687 3348 : else if (funcresult)
13688 3348 : appendPQExpBuffer(q, " RETURNS %s", funcresult);
13689 : else
13690 0 : appendPQExpBuffer(q, " RETURNS %s%s",
13691 0 : (proretset[0] == 't') ? "SETOF " : "",
13692 0 : getFormattedTypeName(fout, finfo->prorettype,
13693 : zeroIsError));
13694 :
13695 3532 : appendPQExpBuffer(q, "\n LANGUAGE %s", fmtId(lanname));
13696 :
13697 3532 : if (*protrftypes)
13698 : {
13699 0 : Oid *typeids = pg_malloc(FUNC_MAX_ARGS * sizeof(Oid));
13700 : int i;
13701 :
13702 0 : appendPQExpBufferStr(q, " TRANSFORM ");
13703 0 : parseOidArray(protrftypes, typeids, FUNC_MAX_ARGS);
13704 0 : for (i = 0; typeids[i]; i++)
13705 : {
13706 0 : if (i != 0)
13707 0 : appendPQExpBufferStr(q, ", ");
13708 0 : appendPQExpBuffer(q, "FOR TYPE %s",
13709 0 : getFormattedTypeName(fout, typeids[i], zeroAsNone));
13710 : }
13711 :
13712 0 : free(typeids);
13713 : }
13714 :
13715 3532 : if (prokind[0] == PROKIND_WINDOW)
13716 10 : appendPQExpBufferStr(q, " WINDOW");
13717 :
13718 3532 : if (provolatile[0] != PROVOLATILE_VOLATILE)
13719 : {
13720 702 : if (provolatile[0] == PROVOLATILE_IMMUTABLE)
13721 660 : appendPQExpBufferStr(q, " IMMUTABLE");
13722 42 : else if (provolatile[0] == PROVOLATILE_STABLE)
13723 42 : appendPQExpBufferStr(q, " STABLE");
13724 0 : else if (provolatile[0] != PROVOLATILE_VOLATILE)
13725 0 : pg_fatal("unrecognized provolatile value for function \"%s\"",
13726 : finfo->dobj.name);
13727 : }
13728 :
13729 3532 : if (proisstrict[0] == 't')
13730 716 : appendPQExpBufferStr(q, " STRICT");
13731 :
13732 3532 : if (prosecdef[0] == 't')
13733 0 : appendPQExpBufferStr(q, " SECURITY DEFINER");
13734 :
13735 3532 : if (proleakproof[0] == 't')
13736 20 : appendPQExpBufferStr(q, " LEAKPROOF");
13737 :
13738 : /*
13739 : * COST and ROWS are emitted only if present and not default, so as not to
13740 : * break backwards-compatibility of the dump without need. Keep this code
13741 : * in sync with the defaults in functioncmds.c.
13742 : */
13743 3532 : if (strcmp(procost, "0") != 0)
13744 : {
13745 3532 : if (strcmp(lanname, "internal") == 0 || strcmp(lanname, "c") == 0)
13746 : {
13747 : /* default cost is 1 */
13748 752 : if (strcmp(procost, "1") != 0)
13749 0 : appendPQExpBuffer(q, " COST %s", procost);
13750 : }
13751 : else
13752 : {
13753 : /* default cost is 100 */
13754 2780 : if (strcmp(procost, "100") != 0)
13755 12 : appendPQExpBuffer(q, " COST %s", procost);
13756 : }
13757 : }
13758 3532 : if (proretset[0] == 't' &&
13759 374 : strcmp(prorows, "0") != 0 && strcmp(prorows, "1000") != 0)
13760 0 : appendPQExpBuffer(q, " ROWS %s", prorows);
13761 :
13762 3532 : if (strcmp(prosupport, "-") != 0)
13763 : {
13764 : /* We rely on regprocout to provide quoting and qualification */
13765 84 : appendPQExpBuffer(q, " SUPPORT %s", prosupport);
13766 : }
13767 :
13768 3532 : if (proparallel[0] != PROPARALLEL_UNSAFE)
13769 : {
13770 232 : if (proparallel[0] == PROPARALLEL_SAFE)
13771 222 : appendPQExpBufferStr(q, " PARALLEL SAFE");
13772 10 : else if (proparallel[0] == PROPARALLEL_RESTRICTED)
13773 10 : appendPQExpBufferStr(q, " PARALLEL RESTRICTED");
13774 0 : else if (proparallel[0] != PROPARALLEL_UNSAFE)
13775 0 : pg_fatal("unrecognized proparallel value for function \"%s\"",
13776 : finfo->dobj.name);
13777 : }
13778 :
13779 3612 : for (int i = 0; i < nconfigitems; i++)
13780 : {
13781 : /* we feel free to scribble on configitems[] here */
13782 80 : char *configitem = configitems[i];
13783 : char *pos;
13784 :
13785 80 : pos = strchr(configitem, '=');
13786 80 : if (pos == NULL)
13787 0 : continue;
13788 80 : *pos++ = '\0';
13789 80 : appendPQExpBuffer(q, "\n SET %s TO ", fmtId(configitem));
13790 :
13791 : /*
13792 : * Variables that are marked GUC_LIST_QUOTE were already fully quoted
13793 : * by flatten_set_variable_args() before they were put into the
13794 : * proconfig array. However, because the quoting rules used there
13795 : * aren't exactly like SQL's, we have to break the list value apart
13796 : * and then quote the elements as string literals. (The elements may
13797 : * be double-quoted as-is, but we can't just feed them to the SQL
13798 : * parser; it would do the wrong thing with elements that are
13799 : * zero-length or longer than NAMEDATALEN.) Also, we need a special
13800 : * case for empty lists.
13801 : *
13802 : * Variables that are not so marked should just be emitted as simple
13803 : * string literals. If the variable is not known to
13804 : * variable_is_guc_list_quote(), we'll do that; this makes it unsafe
13805 : * to use GUC_LIST_QUOTE for extension variables.
13806 : */
13807 80 : if (variable_is_guc_list_quote(configitem))
13808 : {
13809 : char **namelist;
13810 : char **nameptr;
13811 :
13812 : /* Parse string into list of identifiers */
13813 : /* this shouldn't fail really */
13814 30 : if (SplitGUCList(pos, ',', &namelist))
13815 : {
13816 : /* Special case: represent an empty list as NULL */
13817 30 : if (*namelist == NULL)
13818 10 : appendPQExpBufferStr(q, "NULL");
13819 80 : for (nameptr = namelist; *nameptr; nameptr++)
13820 : {
13821 50 : if (nameptr != namelist)
13822 30 : appendPQExpBufferStr(q, ", ");
13823 50 : appendStringLiteralAH(q, *nameptr, fout);
13824 : }
13825 : }
13826 30 : pg_free(namelist);
13827 : }
13828 : else
13829 50 : appendStringLiteralAH(q, pos, fout);
13830 : }
13831 :
13832 3532 : appendPQExpBuffer(q, "\n %s;\n", asPart->data);
13833 :
13834 3532 : append_depends_on_extension(fout, q, &finfo->dobj,
13835 : "pg_catalog.pg_proc", keyword,
13836 : qual_funcsig);
13837 :
13838 3532 : if (dopt->binary_upgrade)
13839 590 : binary_upgrade_extension_member(q, &finfo->dobj,
13840 : keyword, funcsig,
13841 590 : finfo->dobj.namespace->dobj.name);
13842 :
13843 3532 : if (finfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
13844 3340 : ArchiveEntry(fout, finfo->dobj.catId, finfo->dobj.dumpId,
13845 3340 : ARCHIVE_OPTS(.tag = funcsig_tag,
13846 : .namespace = finfo->dobj.namespace->dobj.name,
13847 : .owner = finfo->rolname,
13848 : .description = keyword,
13849 : .section = finfo->postponed_def ?
13850 : SECTION_POST_DATA : SECTION_PRE_DATA,
13851 : .createStmt = q->data,
13852 : .dropStmt = delqry->data));
13853 :
13854 : /* Dump Function Comments and Security Labels */
13855 3532 : if (finfo->dobj.dump & DUMP_COMPONENT_COMMENT)
13856 18 : dumpComment(fout, keyword, funcsig,
13857 18 : finfo->dobj.namespace->dobj.name, finfo->rolname,
13858 18 : finfo->dobj.catId, 0, finfo->dobj.dumpId);
13859 :
13860 3532 : if (finfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
13861 0 : dumpSecLabel(fout, keyword, funcsig,
13862 0 : finfo->dobj.namespace->dobj.name, finfo->rolname,
13863 0 : finfo->dobj.catId, 0, finfo->dobj.dumpId);
13864 :
13865 3532 : if (finfo->dobj.dump & DUMP_COMPONENT_ACL)
13866 200 : dumpACL(fout, finfo->dobj.dumpId, InvalidDumpId, keyword,
13867 : funcsig, NULL,
13868 200 : finfo->dobj.namespace->dobj.name,
13869 200 : NULL, finfo->rolname, &finfo->dacl);
13870 :
13871 3532 : PQclear(res);
13872 :
13873 3532 : destroyPQExpBuffer(query);
13874 3532 : destroyPQExpBuffer(q);
13875 3532 : destroyPQExpBuffer(delqry);
13876 3532 : destroyPQExpBuffer(asPart);
13877 3532 : free(funcsig);
13878 3532 : free(funcfullsig);
13879 3532 : free(funcsig_tag);
13880 3532 : free(qual_funcsig);
13881 3532 : free(configitems);
13882 : }
13883 :
13884 :
13885 : /*
13886 : * Dump a user-defined cast
13887 : */
13888 : static void
13889 134 : dumpCast(Archive *fout, const CastInfo *cast)
13890 : {
13891 134 : DumpOptions *dopt = fout->dopt;
13892 : PQExpBuffer defqry;
13893 : PQExpBuffer delqry;
13894 : PQExpBuffer labelq;
13895 : PQExpBuffer castargs;
13896 134 : FuncInfo *funcInfo = NULL;
13897 : const char *sourceType;
13898 : const char *targetType;
13899 :
13900 : /* Do nothing if not dumping schema */
13901 134 : if (!dopt->dumpSchema)
13902 12 : return;
13903 :
13904 : /* Cannot dump if we don't have the cast function's info */
13905 122 : if (OidIsValid(cast->castfunc))
13906 : {
13907 72 : funcInfo = findFuncByOid(cast->castfunc);
13908 72 : if (funcInfo == NULL)
13909 0 : pg_fatal("could not find function definition for function with OID %u",
13910 : cast->castfunc);
13911 : }
13912 :
13913 122 : defqry = createPQExpBuffer();
13914 122 : delqry = createPQExpBuffer();
13915 122 : labelq = createPQExpBuffer();
13916 122 : castargs = createPQExpBuffer();
13917 :
13918 122 : sourceType = getFormattedTypeName(fout, cast->castsource, zeroAsNone);
13919 122 : targetType = getFormattedTypeName(fout, cast->casttarget, zeroAsNone);
13920 122 : appendPQExpBuffer(delqry, "DROP CAST (%s AS %s);\n",
13921 : sourceType, targetType);
13922 :
13923 122 : appendPQExpBuffer(defqry, "CREATE CAST (%s AS %s) ",
13924 : sourceType, targetType);
13925 :
13926 122 : switch (cast->castmethod)
13927 : {
13928 50 : case COERCION_METHOD_BINARY:
13929 50 : appendPQExpBufferStr(defqry, "WITHOUT FUNCTION");
13930 50 : break;
13931 0 : case COERCION_METHOD_INOUT:
13932 0 : appendPQExpBufferStr(defqry, "WITH INOUT");
13933 0 : break;
13934 72 : case COERCION_METHOD_FUNCTION:
13935 72 : if (funcInfo)
13936 : {
13937 72 : char *fsig = format_function_signature(fout, funcInfo, true);
13938 :
13939 : /*
13940 : * Always qualify the function name (format_function_signature
13941 : * won't qualify it).
13942 : */
13943 72 : appendPQExpBuffer(defqry, "WITH FUNCTION %s.%s",
13944 72 : fmtId(funcInfo->dobj.namespace->dobj.name), fsig);
13945 72 : free(fsig);
13946 : }
13947 : else
13948 0 : pg_log_warning("bogus value in pg_cast.castfunc or pg_cast.castmethod field");
13949 72 : break;
13950 0 : default:
13951 0 : pg_log_warning("bogus value in pg_cast.castmethod field");
13952 : }
13953 :
13954 122 : if (cast->castcontext == 'a')
13955 62 : appendPQExpBufferStr(defqry, " AS ASSIGNMENT");
13956 60 : else if (cast->castcontext == 'i')
13957 20 : appendPQExpBufferStr(defqry, " AS IMPLICIT");
13958 122 : appendPQExpBufferStr(defqry, ";\n");
13959 :
13960 122 : appendPQExpBuffer(labelq, "CAST (%s AS %s)",
13961 : sourceType, targetType);
13962 :
13963 122 : appendPQExpBuffer(castargs, "(%s AS %s)",
13964 : sourceType, targetType);
13965 :
13966 122 : if (dopt->binary_upgrade)
13967 14 : binary_upgrade_extension_member(defqry, &cast->dobj,
13968 14 : "CAST", castargs->data, NULL);
13969 :
13970 122 : if (cast->dobj.dump & DUMP_COMPONENT_DEFINITION)
13971 122 : ArchiveEntry(fout, cast->dobj.catId, cast->dobj.dumpId,
13972 122 : ARCHIVE_OPTS(.tag = labelq->data,
13973 : .description = "CAST",
13974 : .section = SECTION_PRE_DATA,
13975 : .createStmt = defqry->data,
13976 : .dropStmt = delqry->data));
13977 :
13978 : /* Dump Cast Comments */
13979 122 : if (cast->dobj.dump & DUMP_COMPONENT_COMMENT)
13980 0 : dumpComment(fout, "CAST", castargs->data,
13981 : NULL, "",
13982 0 : cast->dobj.catId, 0, cast->dobj.dumpId);
13983 :
13984 122 : destroyPQExpBuffer(defqry);
13985 122 : destroyPQExpBuffer(delqry);
13986 122 : destroyPQExpBuffer(labelq);
13987 122 : destroyPQExpBuffer(castargs);
13988 : }
13989 :
13990 : /*
13991 : * Dump a transform
13992 : */
13993 : static void
13994 84 : dumpTransform(Archive *fout, const TransformInfo *transform)
13995 : {
13996 84 : DumpOptions *dopt = fout->dopt;
13997 : PQExpBuffer defqry;
13998 : PQExpBuffer delqry;
13999 : PQExpBuffer labelq;
14000 : PQExpBuffer transformargs;
14001 84 : FuncInfo *fromsqlFuncInfo = NULL;
14002 84 : FuncInfo *tosqlFuncInfo = NULL;
14003 : char *lanname;
14004 : const char *transformType;
14005 :
14006 : /* Do nothing if not dumping schema */
14007 84 : if (!dopt->dumpSchema)
14008 12 : return;
14009 :
14010 : /* Cannot dump if we don't have the transform functions' info */
14011 72 : if (OidIsValid(transform->trffromsql))
14012 : {
14013 72 : fromsqlFuncInfo = findFuncByOid(transform->trffromsql);
14014 72 : if (fromsqlFuncInfo == NULL)
14015 0 : pg_fatal("could not find function definition for function with OID %u",
14016 : transform->trffromsql);
14017 : }
14018 72 : if (OidIsValid(transform->trftosql))
14019 : {
14020 72 : tosqlFuncInfo = findFuncByOid(transform->trftosql);
14021 72 : if (tosqlFuncInfo == NULL)
14022 0 : pg_fatal("could not find function definition for function with OID %u",
14023 : transform->trftosql);
14024 : }
14025 :
14026 72 : defqry = createPQExpBuffer();
14027 72 : delqry = createPQExpBuffer();
14028 72 : labelq = createPQExpBuffer();
14029 72 : transformargs = createPQExpBuffer();
14030 :
14031 72 : lanname = get_language_name(fout, transform->trflang);
14032 72 : transformType = getFormattedTypeName(fout, transform->trftype, zeroAsNone);
14033 :
14034 72 : appendPQExpBuffer(delqry, "DROP TRANSFORM FOR %s LANGUAGE %s;\n",
14035 : transformType, lanname);
14036 :
14037 72 : appendPQExpBuffer(defqry, "CREATE TRANSFORM FOR %s LANGUAGE %s (",
14038 : transformType, lanname);
14039 :
14040 72 : if (!transform->trffromsql && !transform->trftosql)
14041 0 : pg_log_warning("bogus transform definition, at least one of trffromsql and trftosql should be nonzero");
14042 :
14043 72 : if (transform->trffromsql)
14044 : {
14045 72 : if (fromsqlFuncInfo)
14046 : {
14047 72 : char *fsig = format_function_signature(fout, fromsqlFuncInfo, true);
14048 :
14049 : /*
14050 : * Always qualify the function name (format_function_signature
14051 : * won't qualify it).
14052 : */
14053 72 : appendPQExpBuffer(defqry, "FROM SQL WITH FUNCTION %s.%s",
14054 72 : fmtId(fromsqlFuncInfo->dobj.namespace->dobj.name), fsig);
14055 72 : free(fsig);
14056 : }
14057 : else
14058 0 : pg_log_warning("bogus value in pg_transform.trffromsql field");
14059 : }
14060 :
14061 72 : if (transform->trftosql)
14062 : {
14063 72 : if (transform->trffromsql)
14064 72 : appendPQExpBufferStr(defqry, ", ");
14065 :
14066 72 : if (tosqlFuncInfo)
14067 : {
14068 72 : char *fsig = format_function_signature(fout, tosqlFuncInfo, true);
14069 :
14070 : /*
14071 : * Always qualify the function name (format_function_signature
14072 : * won't qualify it).
14073 : */
14074 72 : appendPQExpBuffer(defqry, "TO SQL WITH FUNCTION %s.%s",
14075 72 : fmtId(tosqlFuncInfo->dobj.namespace->dobj.name), fsig);
14076 72 : free(fsig);
14077 : }
14078 : else
14079 0 : pg_log_warning("bogus value in pg_transform.trftosql field");
14080 : }
14081 :
14082 72 : appendPQExpBufferStr(defqry, ");\n");
14083 :
14084 72 : appendPQExpBuffer(labelq, "TRANSFORM FOR %s LANGUAGE %s",
14085 : transformType, lanname);
14086 :
14087 72 : appendPQExpBuffer(transformargs, "FOR %s LANGUAGE %s",
14088 : transformType, lanname);
14089 :
14090 72 : if (dopt->binary_upgrade)
14091 4 : binary_upgrade_extension_member(defqry, &transform->dobj,
14092 4 : "TRANSFORM", transformargs->data, NULL);
14093 :
14094 72 : if (transform->dobj.dump & DUMP_COMPONENT_DEFINITION)
14095 72 : ArchiveEntry(fout, transform->dobj.catId, transform->dobj.dumpId,
14096 72 : ARCHIVE_OPTS(.tag = labelq->data,
14097 : .description = "TRANSFORM",
14098 : .section = SECTION_PRE_DATA,
14099 : .createStmt = defqry->data,
14100 : .dropStmt = delqry->data,
14101 : .deps = transform->dobj.dependencies,
14102 : .nDeps = transform->dobj.nDeps));
14103 :
14104 : /* Dump Transform Comments */
14105 72 : if (transform->dobj.dump & DUMP_COMPONENT_COMMENT)
14106 0 : dumpComment(fout, "TRANSFORM", transformargs->data,
14107 : NULL, "",
14108 0 : transform->dobj.catId, 0, transform->dobj.dumpId);
14109 :
14110 72 : free(lanname);
14111 72 : destroyPQExpBuffer(defqry);
14112 72 : destroyPQExpBuffer(delqry);
14113 72 : destroyPQExpBuffer(labelq);
14114 72 : destroyPQExpBuffer(transformargs);
14115 : }
14116 :
14117 :
14118 : /*
14119 : * dumpOpr
14120 : * write out a single operator definition
14121 : */
14122 : static void
14123 5044 : dumpOpr(Archive *fout, const OprInfo *oprinfo)
14124 : {
14125 5044 : DumpOptions *dopt = fout->dopt;
14126 : PQExpBuffer query;
14127 : PQExpBuffer q;
14128 : PQExpBuffer delq;
14129 : PQExpBuffer oprid;
14130 : PQExpBuffer details;
14131 : PGresult *res;
14132 : int i_oprkind;
14133 : int i_oprcode;
14134 : int i_oprleft;
14135 : int i_oprright;
14136 : int i_oprcom;
14137 : int i_oprnegate;
14138 : int i_oprrest;
14139 : int i_oprjoin;
14140 : int i_oprcanmerge;
14141 : int i_oprcanhash;
14142 : char *oprkind;
14143 : char *oprcode;
14144 : char *oprleft;
14145 : char *oprright;
14146 : char *oprcom;
14147 : char *oprnegate;
14148 : char *oprrest;
14149 : char *oprjoin;
14150 : char *oprcanmerge;
14151 : char *oprcanhash;
14152 : char *oprregproc;
14153 : char *oprref;
14154 :
14155 : /* Do nothing if not dumping schema */
14156 5044 : if (!dopt->dumpSchema)
14157 12 : return;
14158 :
14159 : /*
14160 : * some operators are invalid because they were the result of user
14161 : * defining operators before commutators exist
14162 : */
14163 5032 : if (!OidIsValid(oprinfo->oprcode))
14164 28 : return;
14165 :
14166 5004 : query = createPQExpBuffer();
14167 5004 : q = createPQExpBuffer();
14168 5004 : delq = createPQExpBuffer();
14169 5004 : oprid = createPQExpBuffer();
14170 5004 : details = createPQExpBuffer();
14171 :
14172 5004 : if (!fout->is_prepared[PREPQUERY_DUMPOPR])
14173 : {
14174 : /* Set up query for operator-specific details */
14175 80 : appendPQExpBufferStr(query,
14176 : "PREPARE dumpOpr(pg_catalog.oid) AS\n"
14177 : "SELECT oprkind, "
14178 : "oprcode::pg_catalog.regprocedure, "
14179 : "oprleft::pg_catalog.regtype, "
14180 : "oprright::pg_catalog.regtype, "
14181 : "oprcom, "
14182 : "oprnegate, "
14183 : "oprrest::pg_catalog.regprocedure, "
14184 : "oprjoin::pg_catalog.regprocedure, "
14185 : "oprcanmerge, oprcanhash "
14186 : "FROM pg_catalog.pg_operator "
14187 : "WHERE oid = $1");
14188 :
14189 80 : ExecuteSqlStatement(fout, query->data);
14190 :
14191 80 : fout->is_prepared[PREPQUERY_DUMPOPR] = true;
14192 : }
14193 :
14194 5004 : printfPQExpBuffer(query,
14195 : "EXECUTE dumpOpr('%u')",
14196 5004 : oprinfo->dobj.catId.oid);
14197 :
14198 5004 : res = ExecuteSqlQueryForSingleRow(fout, query->data);
14199 :
14200 5004 : i_oprkind = PQfnumber(res, "oprkind");
14201 5004 : i_oprcode = PQfnumber(res, "oprcode");
14202 5004 : i_oprleft = PQfnumber(res, "oprleft");
14203 5004 : i_oprright = PQfnumber(res, "oprright");
14204 5004 : i_oprcom = PQfnumber(res, "oprcom");
14205 5004 : i_oprnegate = PQfnumber(res, "oprnegate");
14206 5004 : i_oprrest = PQfnumber(res, "oprrest");
14207 5004 : i_oprjoin = PQfnumber(res, "oprjoin");
14208 5004 : i_oprcanmerge = PQfnumber(res, "oprcanmerge");
14209 5004 : i_oprcanhash = PQfnumber(res, "oprcanhash");
14210 :
14211 5004 : oprkind = PQgetvalue(res, 0, i_oprkind);
14212 5004 : oprcode = PQgetvalue(res, 0, i_oprcode);
14213 5004 : oprleft = PQgetvalue(res, 0, i_oprleft);
14214 5004 : oprright = PQgetvalue(res, 0, i_oprright);
14215 5004 : oprcom = PQgetvalue(res, 0, i_oprcom);
14216 5004 : oprnegate = PQgetvalue(res, 0, i_oprnegate);
14217 5004 : oprrest = PQgetvalue(res, 0, i_oprrest);
14218 5004 : oprjoin = PQgetvalue(res, 0, i_oprjoin);
14219 5004 : oprcanmerge = PQgetvalue(res, 0, i_oprcanmerge);
14220 5004 : oprcanhash = PQgetvalue(res, 0, i_oprcanhash);
14221 :
14222 : /* In PG14 upwards postfix operator support does not exist anymore. */
14223 5004 : if (strcmp(oprkind, "r") == 0)
14224 0 : pg_log_warning("postfix operators are not supported anymore (operator \"%s\")",
14225 : oprcode);
14226 :
14227 5004 : oprregproc = convertRegProcReference(oprcode);
14228 5004 : if (oprregproc)
14229 : {
14230 5004 : appendPQExpBuffer(details, " FUNCTION = %s", oprregproc);
14231 5004 : free(oprregproc);
14232 : }
14233 :
14234 5004 : appendPQExpBuffer(oprid, "%s (",
14235 5004 : oprinfo->dobj.name);
14236 :
14237 : /*
14238 : * right unary means there's a left arg and left unary means there's a
14239 : * right arg. (Although the "r" case is dead code for PG14 and later,
14240 : * continue to support it in case we're dumping from an old server.)
14241 : */
14242 5004 : if (strcmp(oprkind, "r") == 0 ||
14243 5004 : strcmp(oprkind, "b") == 0)
14244 : {
14245 4718 : appendPQExpBuffer(details, ",\n LEFTARG = %s", oprleft);
14246 4718 : appendPQExpBufferStr(oprid, oprleft);
14247 : }
14248 : else
14249 286 : appendPQExpBufferStr(oprid, "NONE");
14250 :
14251 5004 : if (strcmp(oprkind, "l") == 0 ||
14252 4718 : strcmp(oprkind, "b") == 0)
14253 : {
14254 5004 : appendPQExpBuffer(details, ",\n RIGHTARG = %s", oprright);
14255 5004 : appendPQExpBuffer(oprid, ", %s)", oprright);
14256 : }
14257 : else
14258 0 : appendPQExpBufferStr(oprid, ", NONE)");
14259 :
14260 5004 : oprref = getFormattedOperatorName(oprcom);
14261 5004 : if (oprref)
14262 : {
14263 3358 : appendPQExpBuffer(details, ",\n COMMUTATOR = %s", oprref);
14264 3358 : free(oprref);
14265 : }
14266 :
14267 5004 : oprref = getFormattedOperatorName(oprnegate);
14268 5004 : if (oprref)
14269 : {
14270 2362 : appendPQExpBuffer(details, ",\n NEGATOR = %s", oprref);
14271 2362 : free(oprref);
14272 : }
14273 :
14274 5004 : if (strcmp(oprcanmerge, "t") == 0)
14275 376 : appendPQExpBufferStr(details, ",\n MERGES");
14276 :
14277 5004 : if (strcmp(oprcanhash, "t") == 0)
14278 282 : appendPQExpBufferStr(details, ",\n HASHES");
14279 :
14280 5004 : oprregproc = convertRegProcReference(oprrest);
14281 5004 : if (oprregproc)
14282 : {
14283 3064 : appendPQExpBuffer(details, ",\n RESTRICT = %s", oprregproc);
14284 3064 : free(oprregproc);
14285 : }
14286 :
14287 5004 : oprregproc = convertRegProcReference(oprjoin);
14288 5004 : if (oprregproc)
14289 : {
14290 3064 : appendPQExpBuffer(details, ",\n JOIN = %s", oprregproc);
14291 3064 : free(oprregproc);
14292 : }
14293 :
14294 5004 : appendPQExpBuffer(delq, "DROP OPERATOR %s.%s;\n",
14295 5004 : fmtId(oprinfo->dobj.namespace->dobj.name),
14296 : oprid->data);
14297 :
14298 5004 : appendPQExpBuffer(q, "CREATE OPERATOR %s.%s (\n%s\n);\n",
14299 5004 : fmtId(oprinfo->dobj.namespace->dobj.name),
14300 5004 : oprinfo->dobj.name, details->data);
14301 :
14302 5004 : if (dopt->binary_upgrade)
14303 24 : binary_upgrade_extension_member(q, &oprinfo->dobj,
14304 24 : "OPERATOR", oprid->data,
14305 24 : oprinfo->dobj.namespace->dobj.name);
14306 :
14307 5004 : if (oprinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
14308 5004 : ArchiveEntry(fout, oprinfo->dobj.catId, oprinfo->dobj.dumpId,
14309 5004 : ARCHIVE_OPTS(.tag = oprinfo->dobj.name,
14310 : .namespace = oprinfo->dobj.namespace->dobj.name,
14311 : .owner = oprinfo->rolname,
14312 : .description = "OPERATOR",
14313 : .section = SECTION_PRE_DATA,
14314 : .createStmt = q->data,
14315 : .dropStmt = delq->data));
14316 :
14317 : /* Dump Operator Comments */
14318 5004 : if (oprinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
14319 4830 : dumpComment(fout, "OPERATOR", oprid->data,
14320 4830 : oprinfo->dobj.namespace->dobj.name, oprinfo->rolname,
14321 4830 : oprinfo->dobj.catId, 0, oprinfo->dobj.dumpId);
14322 :
14323 5004 : PQclear(res);
14324 :
14325 5004 : destroyPQExpBuffer(query);
14326 5004 : destroyPQExpBuffer(q);
14327 5004 : destroyPQExpBuffer(delq);
14328 5004 : destroyPQExpBuffer(oprid);
14329 5004 : destroyPQExpBuffer(details);
14330 : }
14331 :
14332 : /*
14333 : * Convert a function reference obtained from pg_operator
14334 : *
14335 : * Returns allocated string of what to print, or NULL if function references
14336 : * is InvalidOid. Returned string is expected to be free'd by the caller.
14337 : *
14338 : * The input is a REGPROCEDURE display; we have to strip the argument-types
14339 : * part.
14340 : */
14341 : static char *
14342 15012 : convertRegProcReference(const char *proc)
14343 : {
14344 : char *name;
14345 : char *paren;
14346 : bool inquote;
14347 :
14348 : /* In all cases "-" means a null reference */
14349 15012 : if (strcmp(proc, "-") == 0)
14350 3880 : return NULL;
14351 :
14352 11132 : name = pg_strdup(proc);
14353 : /* find non-double-quoted left paren */
14354 11132 : inquote = false;
14355 133992 : for (paren = name; *paren; paren++)
14356 : {
14357 133992 : if (*paren == '(' && !inquote)
14358 : {
14359 11132 : *paren = '\0';
14360 11132 : break;
14361 : }
14362 122860 : if (*paren == '"')
14363 100 : inquote = !inquote;
14364 : }
14365 11132 : return name;
14366 : }
14367 :
14368 : /*
14369 : * getFormattedOperatorName - retrieve the operator name for the
14370 : * given operator OID (presented in string form).
14371 : *
14372 : * Returns an allocated string, or NULL if the given OID is invalid.
14373 : * Caller is responsible for free'ing result string.
14374 : *
14375 : * What we produce has the format "OPERATOR(schema.oprname)". This is only
14376 : * useful in commands where the operator's argument types can be inferred from
14377 : * context. We always schema-qualify the name, though. The predecessor to
14378 : * this code tried to skip the schema qualification if possible, but that led
14379 : * to wrong results in corner cases, such as if an operator and its negator
14380 : * are in different schemas.
14381 : */
14382 : static char *
14383 10578 : getFormattedOperatorName(const char *oproid)
14384 : {
14385 : OprInfo *oprInfo;
14386 :
14387 : /* In all cases "0" means a null reference */
14388 10578 : if (strcmp(oproid, "0") == 0)
14389 4858 : return NULL;
14390 :
14391 5720 : oprInfo = findOprByOid(atooid(oproid));
14392 5720 : if (oprInfo == NULL)
14393 : {
14394 0 : pg_log_warning("could not find operator with OID %s",
14395 : oproid);
14396 0 : return NULL;
14397 : }
14398 :
14399 5720 : return psprintf("OPERATOR(%s.%s)",
14400 5720 : fmtId(oprInfo->dobj.namespace->dobj.name),
14401 : oprInfo->dobj.name);
14402 : }
14403 :
14404 : /*
14405 : * Convert a function OID obtained from pg_ts_parser or pg_ts_template
14406 : *
14407 : * It is sufficient to use REGPROC rather than REGPROCEDURE, since the
14408 : * argument lists of these functions are predetermined. Note that the
14409 : * caller should ensure we are in the proper schema, because the results
14410 : * are search path dependent!
14411 : */
14412 : static char *
14413 410 : convertTSFunction(Archive *fout, Oid funcOid)
14414 : {
14415 : char *result;
14416 : char query[128];
14417 : PGresult *res;
14418 :
14419 410 : snprintf(query, sizeof(query),
14420 : "SELECT '%u'::pg_catalog.regproc", funcOid);
14421 410 : res = ExecuteSqlQueryForSingleRow(fout, query);
14422 :
14423 410 : result = pg_strdup(PQgetvalue(res, 0, 0));
14424 :
14425 410 : PQclear(res);
14426 :
14427 410 : return result;
14428 : }
14429 :
14430 : /*
14431 : * dumpAccessMethod
14432 : * write out a single access method definition
14433 : */
14434 : static void
14435 160 : dumpAccessMethod(Archive *fout, const AccessMethodInfo *aminfo)
14436 : {
14437 160 : DumpOptions *dopt = fout->dopt;
14438 : PQExpBuffer q;
14439 : PQExpBuffer delq;
14440 : char *qamname;
14441 :
14442 : /* Do nothing if not dumping schema */
14443 160 : if (!dopt->dumpSchema)
14444 24 : return;
14445 :
14446 136 : q = createPQExpBuffer();
14447 136 : delq = createPQExpBuffer();
14448 :
14449 136 : qamname = pg_strdup(fmtId(aminfo->dobj.name));
14450 :
14451 136 : appendPQExpBuffer(q, "CREATE ACCESS METHOD %s ", qamname);
14452 :
14453 136 : switch (aminfo->amtype)
14454 : {
14455 64 : case AMTYPE_INDEX:
14456 64 : appendPQExpBufferStr(q, "TYPE INDEX ");
14457 64 : break;
14458 72 : case AMTYPE_TABLE:
14459 72 : appendPQExpBufferStr(q, "TYPE TABLE ");
14460 72 : break;
14461 0 : default:
14462 0 : pg_log_warning("invalid type \"%c\" of access method \"%s\"",
14463 : aminfo->amtype, qamname);
14464 0 : destroyPQExpBuffer(q);
14465 0 : destroyPQExpBuffer(delq);
14466 0 : free(qamname);
14467 0 : return;
14468 : }
14469 :
14470 136 : appendPQExpBuffer(q, "HANDLER %s;\n", aminfo->amhandler);
14471 :
14472 136 : appendPQExpBuffer(delq, "DROP ACCESS METHOD %s;\n",
14473 : qamname);
14474 :
14475 136 : if (dopt->binary_upgrade)
14476 8 : binary_upgrade_extension_member(q, &aminfo->dobj,
14477 : "ACCESS METHOD", qamname, NULL);
14478 :
14479 136 : if (aminfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
14480 136 : ArchiveEntry(fout, aminfo->dobj.catId, aminfo->dobj.dumpId,
14481 136 : ARCHIVE_OPTS(.tag = aminfo->dobj.name,
14482 : .description = "ACCESS METHOD",
14483 : .section = SECTION_PRE_DATA,
14484 : .createStmt = q->data,
14485 : .dropStmt = delq->data));
14486 :
14487 : /* Dump Access Method Comments */
14488 136 : if (aminfo->dobj.dump & DUMP_COMPONENT_COMMENT)
14489 0 : dumpComment(fout, "ACCESS METHOD", qamname,
14490 : NULL, "",
14491 0 : aminfo->dobj.catId, 0, aminfo->dobj.dumpId);
14492 :
14493 136 : destroyPQExpBuffer(q);
14494 136 : destroyPQExpBuffer(delq);
14495 136 : free(qamname);
14496 : }
14497 :
14498 : /*
14499 : * dumpOpclass
14500 : * write out a single operator class definition
14501 : */
14502 : static void
14503 1332 : dumpOpclass(Archive *fout, const OpclassInfo *opcinfo)
14504 : {
14505 1332 : DumpOptions *dopt = fout->dopt;
14506 : PQExpBuffer query;
14507 : PQExpBuffer q;
14508 : PQExpBuffer delq;
14509 : PQExpBuffer nameusing;
14510 : PGresult *res;
14511 : int ntups;
14512 : int i_opcintype;
14513 : int i_opckeytype;
14514 : int i_opcdefault;
14515 : int i_opcfamily;
14516 : int i_opcfamilyname;
14517 : int i_opcfamilynsp;
14518 : int i_amname;
14519 : int i_amopstrategy;
14520 : int i_amopopr;
14521 : int i_sortfamily;
14522 : int i_sortfamilynsp;
14523 : int i_amprocnum;
14524 : int i_amproc;
14525 : int i_amproclefttype;
14526 : int i_amprocrighttype;
14527 : char *opcintype;
14528 : char *opckeytype;
14529 : char *opcdefault;
14530 : char *opcfamily;
14531 : char *opcfamilyname;
14532 : char *opcfamilynsp;
14533 : char *amname;
14534 : char *amopstrategy;
14535 : char *amopopr;
14536 : char *sortfamily;
14537 : char *sortfamilynsp;
14538 : char *amprocnum;
14539 : char *amproc;
14540 : char *amproclefttype;
14541 : char *amprocrighttype;
14542 : bool needComma;
14543 : int i;
14544 :
14545 : /* Do nothing if not dumping schema */
14546 1332 : if (!dopt->dumpSchema)
14547 36 : return;
14548 :
14549 1296 : query = createPQExpBuffer();
14550 1296 : q = createPQExpBuffer();
14551 1296 : delq = createPQExpBuffer();
14552 1296 : nameusing = createPQExpBuffer();
14553 :
14554 : /* Get additional fields from the pg_opclass row */
14555 1296 : appendPQExpBuffer(query, "SELECT opcintype::pg_catalog.regtype, "
14556 : "opckeytype::pg_catalog.regtype, "
14557 : "opcdefault, opcfamily, "
14558 : "opfname AS opcfamilyname, "
14559 : "nspname AS opcfamilynsp, "
14560 : "(SELECT amname FROM pg_catalog.pg_am WHERE oid = opcmethod) AS amname "
14561 : "FROM pg_catalog.pg_opclass c "
14562 : "LEFT JOIN pg_catalog.pg_opfamily f ON f.oid = opcfamily "
14563 : "LEFT JOIN pg_catalog.pg_namespace n ON n.oid = opfnamespace "
14564 : "WHERE c.oid = '%u'::pg_catalog.oid",
14565 1296 : opcinfo->dobj.catId.oid);
14566 :
14567 1296 : res = ExecuteSqlQueryForSingleRow(fout, query->data);
14568 :
14569 1296 : i_opcintype = PQfnumber(res, "opcintype");
14570 1296 : i_opckeytype = PQfnumber(res, "opckeytype");
14571 1296 : i_opcdefault = PQfnumber(res, "opcdefault");
14572 1296 : i_opcfamily = PQfnumber(res, "opcfamily");
14573 1296 : i_opcfamilyname = PQfnumber(res, "opcfamilyname");
14574 1296 : i_opcfamilynsp = PQfnumber(res, "opcfamilynsp");
14575 1296 : i_amname = PQfnumber(res, "amname");
14576 :
14577 : /* opcintype may still be needed after we PQclear res */
14578 1296 : opcintype = pg_strdup(PQgetvalue(res, 0, i_opcintype));
14579 1296 : opckeytype = PQgetvalue(res, 0, i_opckeytype);
14580 1296 : opcdefault = PQgetvalue(res, 0, i_opcdefault);
14581 : /* opcfamily will still be needed after we PQclear res */
14582 1296 : opcfamily = pg_strdup(PQgetvalue(res, 0, i_opcfamily));
14583 1296 : opcfamilyname = PQgetvalue(res, 0, i_opcfamilyname);
14584 1296 : opcfamilynsp = PQgetvalue(res, 0, i_opcfamilynsp);
14585 : /* amname will still be needed after we PQclear res */
14586 1296 : amname = pg_strdup(PQgetvalue(res, 0, i_amname));
14587 :
14588 1296 : appendPQExpBuffer(delq, "DROP OPERATOR CLASS %s",
14589 1296 : fmtQualifiedDumpable(opcinfo));
14590 1296 : appendPQExpBuffer(delq, " USING %s;\n",
14591 : fmtId(amname));
14592 :
14593 : /* Build the fixed portion of the CREATE command */
14594 1296 : appendPQExpBuffer(q, "CREATE OPERATOR CLASS %s\n ",
14595 1296 : fmtQualifiedDumpable(opcinfo));
14596 1296 : if (strcmp(opcdefault, "t") == 0)
14597 732 : appendPQExpBufferStr(q, "DEFAULT ");
14598 1296 : appendPQExpBuffer(q, "FOR TYPE %s USING %s",
14599 : opcintype,
14600 : fmtId(amname));
14601 1296 : if (strlen(opcfamilyname) > 0)
14602 : {
14603 1296 : appendPQExpBufferStr(q, " FAMILY ");
14604 1296 : appendPQExpBuffer(q, "%s.", fmtId(opcfamilynsp));
14605 1296 : appendPQExpBufferStr(q, fmtId(opcfamilyname));
14606 : }
14607 1296 : appendPQExpBufferStr(q, " AS\n ");
14608 :
14609 1296 : needComma = false;
14610 :
14611 1296 : if (strcmp(opckeytype, "-") != 0)
14612 : {
14613 504 : appendPQExpBuffer(q, "STORAGE %s",
14614 : opckeytype);
14615 504 : needComma = true;
14616 : }
14617 :
14618 1296 : PQclear(res);
14619 :
14620 : /*
14621 : * Now fetch and print the OPERATOR entries (pg_amop rows).
14622 : *
14623 : * Print only those opfamily members that are tied to the opclass by
14624 : * pg_depend entries.
14625 : */
14626 1296 : resetPQExpBuffer(query);
14627 1296 : appendPQExpBuffer(query, "SELECT amopstrategy, "
14628 : "amopopr::pg_catalog.regoperator, "
14629 : "opfname AS sortfamily, "
14630 : "nspname AS sortfamilynsp "
14631 : "FROM pg_catalog.pg_amop ao JOIN pg_catalog.pg_depend ON "
14632 : "(classid = 'pg_catalog.pg_amop'::pg_catalog.regclass AND objid = ao.oid) "
14633 : "LEFT JOIN pg_catalog.pg_opfamily f ON f.oid = amopsortfamily "
14634 : "LEFT JOIN pg_catalog.pg_namespace n ON n.oid = opfnamespace "
14635 : "WHERE refclassid = 'pg_catalog.pg_opclass'::pg_catalog.regclass "
14636 : "AND refobjid = '%u'::pg_catalog.oid "
14637 : "AND amopfamily = '%s'::pg_catalog.oid "
14638 : "ORDER BY amopstrategy",
14639 1296 : opcinfo->dobj.catId.oid,
14640 : opcfamily);
14641 :
14642 1296 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
14643 :
14644 1296 : ntups = PQntuples(res);
14645 :
14646 1296 : i_amopstrategy = PQfnumber(res, "amopstrategy");
14647 1296 : i_amopopr = PQfnumber(res, "amopopr");
14648 1296 : i_sortfamily = PQfnumber(res, "sortfamily");
14649 1296 : i_sortfamilynsp = PQfnumber(res, "sortfamilynsp");
14650 :
14651 1700 : for (i = 0; i < ntups; i++)
14652 : {
14653 404 : amopstrategy = PQgetvalue(res, i, i_amopstrategy);
14654 404 : amopopr = PQgetvalue(res, i, i_amopopr);
14655 404 : sortfamily = PQgetvalue(res, i, i_sortfamily);
14656 404 : sortfamilynsp = PQgetvalue(res, i, i_sortfamilynsp);
14657 :
14658 404 : if (needComma)
14659 256 : appendPQExpBufferStr(q, " ,\n ");
14660 :
14661 404 : appendPQExpBuffer(q, "OPERATOR %s %s",
14662 : amopstrategy, amopopr);
14663 :
14664 404 : if (strlen(sortfamily) > 0)
14665 : {
14666 0 : appendPQExpBufferStr(q, " FOR ORDER BY ");
14667 0 : appendPQExpBuffer(q, "%s.", fmtId(sortfamilynsp));
14668 0 : appendPQExpBufferStr(q, fmtId(sortfamily));
14669 : }
14670 :
14671 404 : needComma = true;
14672 : }
14673 :
14674 1296 : PQclear(res);
14675 :
14676 : /*
14677 : * Now fetch and print the FUNCTION entries (pg_amproc rows).
14678 : *
14679 : * Print only those opfamily members that are tied to the opclass by
14680 : * pg_depend entries.
14681 : *
14682 : * We print the amproclefttype/amprocrighttype even though in most cases
14683 : * the backend could deduce the right values, because of the corner case
14684 : * of a btree sort support function for a cross-type comparison.
14685 : */
14686 1296 : resetPQExpBuffer(query);
14687 :
14688 1296 : appendPQExpBuffer(query, "SELECT amprocnum, "
14689 : "amproc::pg_catalog.regprocedure, "
14690 : "amproclefttype::pg_catalog.regtype, "
14691 : "amprocrighttype::pg_catalog.regtype "
14692 : "FROM pg_catalog.pg_amproc ap, pg_catalog.pg_depend "
14693 : "WHERE refclassid = 'pg_catalog.pg_opclass'::pg_catalog.regclass "
14694 : "AND refobjid = '%u'::pg_catalog.oid "
14695 : "AND classid = 'pg_catalog.pg_amproc'::pg_catalog.regclass "
14696 : "AND objid = ap.oid "
14697 : "ORDER BY amprocnum",
14698 1296 : opcinfo->dobj.catId.oid);
14699 :
14700 1296 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
14701 :
14702 1296 : ntups = PQntuples(res);
14703 :
14704 1296 : i_amprocnum = PQfnumber(res, "amprocnum");
14705 1296 : i_amproc = PQfnumber(res, "amproc");
14706 1296 : i_amproclefttype = PQfnumber(res, "amproclefttype");
14707 1296 : i_amprocrighttype = PQfnumber(res, "amprocrighttype");
14708 :
14709 1360 : for (i = 0; i < ntups; i++)
14710 : {
14711 64 : amprocnum = PQgetvalue(res, i, i_amprocnum);
14712 64 : amproc = PQgetvalue(res, i, i_amproc);
14713 64 : amproclefttype = PQgetvalue(res, i, i_amproclefttype);
14714 64 : amprocrighttype = PQgetvalue(res, i, i_amprocrighttype);
14715 :
14716 64 : if (needComma)
14717 64 : appendPQExpBufferStr(q, " ,\n ");
14718 :
14719 64 : appendPQExpBuffer(q, "FUNCTION %s", amprocnum);
14720 :
14721 64 : if (*amproclefttype && *amprocrighttype)
14722 64 : appendPQExpBuffer(q, " (%s, %s)", amproclefttype, amprocrighttype);
14723 :
14724 64 : appendPQExpBuffer(q, " %s", amproc);
14725 :
14726 64 : needComma = true;
14727 : }
14728 :
14729 1296 : PQclear(res);
14730 :
14731 : /*
14732 : * If needComma is still false it means we haven't added anything after
14733 : * the AS keyword. To avoid printing broken SQL, append a dummy STORAGE
14734 : * clause with the same datatype. This isn't sanctioned by the
14735 : * documentation, but actually DefineOpClass will treat it as a no-op.
14736 : */
14737 1296 : if (!needComma)
14738 644 : appendPQExpBuffer(q, "STORAGE %s", opcintype);
14739 :
14740 1296 : appendPQExpBufferStr(q, ";\n");
14741 :
14742 1296 : appendPQExpBufferStr(nameusing, fmtId(opcinfo->dobj.name));
14743 1296 : appendPQExpBuffer(nameusing, " USING %s",
14744 : fmtId(amname));
14745 :
14746 1296 : if (dopt->binary_upgrade)
14747 12 : binary_upgrade_extension_member(q, &opcinfo->dobj,
14748 12 : "OPERATOR CLASS", nameusing->data,
14749 12 : opcinfo->dobj.namespace->dobj.name);
14750 :
14751 1296 : if (opcinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
14752 1296 : ArchiveEntry(fout, opcinfo->dobj.catId, opcinfo->dobj.dumpId,
14753 1296 : ARCHIVE_OPTS(.tag = opcinfo->dobj.name,
14754 : .namespace = opcinfo->dobj.namespace->dobj.name,
14755 : .owner = opcinfo->rolname,
14756 : .description = "OPERATOR CLASS",
14757 : .section = SECTION_PRE_DATA,
14758 : .createStmt = q->data,
14759 : .dropStmt = delq->data));
14760 :
14761 : /* Dump Operator Class Comments */
14762 1296 : if (opcinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
14763 0 : dumpComment(fout, "OPERATOR CLASS", nameusing->data,
14764 0 : opcinfo->dobj.namespace->dobj.name, opcinfo->rolname,
14765 0 : opcinfo->dobj.catId, 0, opcinfo->dobj.dumpId);
14766 :
14767 1296 : free(opcintype);
14768 1296 : free(opcfamily);
14769 1296 : free(amname);
14770 1296 : destroyPQExpBuffer(query);
14771 1296 : destroyPQExpBuffer(q);
14772 1296 : destroyPQExpBuffer(delq);
14773 1296 : destroyPQExpBuffer(nameusing);
14774 : }
14775 :
14776 : /*
14777 : * dumpOpfamily
14778 : * write out a single operator family definition
14779 : *
14780 : * Note: this also dumps any "loose" operator members that aren't bound to a
14781 : * specific opclass within the opfamily.
14782 : */
14783 : static void
14784 1110 : dumpOpfamily(Archive *fout, const OpfamilyInfo *opfinfo)
14785 : {
14786 1110 : DumpOptions *dopt = fout->dopt;
14787 : PQExpBuffer query;
14788 : PQExpBuffer q;
14789 : PQExpBuffer delq;
14790 : PQExpBuffer nameusing;
14791 : PGresult *res;
14792 : PGresult *res_ops;
14793 : PGresult *res_procs;
14794 : int ntups;
14795 : int i_amname;
14796 : int i_amopstrategy;
14797 : int i_amopopr;
14798 : int i_sortfamily;
14799 : int i_sortfamilynsp;
14800 : int i_amprocnum;
14801 : int i_amproc;
14802 : int i_amproclefttype;
14803 : int i_amprocrighttype;
14804 : char *amname;
14805 : char *amopstrategy;
14806 : char *amopopr;
14807 : char *sortfamily;
14808 : char *sortfamilynsp;
14809 : char *amprocnum;
14810 : char *amproc;
14811 : char *amproclefttype;
14812 : char *amprocrighttype;
14813 : bool needComma;
14814 : int i;
14815 :
14816 : /* Do nothing if not dumping schema */
14817 1110 : if (!dopt->dumpSchema)
14818 24 : return;
14819 :
14820 1086 : query = createPQExpBuffer();
14821 1086 : q = createPQExpBuffer();
14822 1086 : delq = createPQExpBuffer();
14823 1086 : nameusing = createPQExpBuffer();
14824 :
14825 : /*
14826 : * Fetch only those opfamily members that are tied directly to the
14827 : * opfamily by pg_depend entries.
14828 : */
14829 1086 : appendPQExpBuffer(query, "SELECT amopstrategy, "
14830 : "amopopr::pg_catalog.regoperator, "
14831 : "opfname AS sortfamily, "
14832 : "nspname AS sortfamilynsp "
14833 : "FROM pg_catalog.pg_amop ao JOIN pg_catalog.pg_depend ON "
14834 : "(classid = 'pg_catalog.pg_amop'::pg_catalog.regclass AND objid = ao.oid) "
14835 : "LEFT JOIN pg_catalog.pg_opfamily f ON f.oid = amopsortfamily "
14836 : "LEFT JOIN pg_catalog.pg_namespace n ON n.oid = opfnamespace "
14837 : "WHERE refclassid = 'pg_catalog.pg_opfamily'::pg_catalog.regclass "
14838 : "AND refobjid = '%u'::pg_catalog.oid "
14839 : "AND amopfamily = '%u'::pg_catalog.oid "
14840 : "ORDER BY amopstrategy",
14841 1086 : opfinfo->dobj.catId.oid,
14842 1086 : opfinfo->dobj.catId.oid);
14843 :
14844 1086 : res_ops = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
14845 :
14846 1086 : resetPQExpBuffer(query);
14847 :
14848 1086 : appendPQExpBuffer(query, "SELECT amprocnum, "
14849 : "amproc::pg_catalog.regprocedure, "
14850 : "amproclefttype::pg_catalog.regtype, "
14851 : "amprocrighttype::pg_catalog.regtype "
14852 : "FROM pg_catalog.pg_amproc ap, pg_catalog.pg_depend "
14853 : "WHERE refclassid = 'pg_catalog.pg_opfamily'::pg_catalog.regclass "
14854 : "AND refobjid = '%u'::pg_catalog.oid "
14855 : "AND classid = 'pg_catalog.pg_amproc'::pg_catalog.regclass "
14856 : "AND objid = ap.oid "
14857 : "ORDER BY amprocnum",
14858 1086 : opfinfo->dobj.catId.oid);
14859 :
14860 1086 : res_procs = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
14861 :
14862 : /* Get additional fields from the pg_opfamily row */
14863 1086 : resetPQExpBuffer(query);
14864 :
14865 1086 : appendPQExpBuffer(query, "SELECT "
14866 : "(SELECT amname FROM pg_catalog.pg_am WHERE oid = opfmethod) AS amname "
14867 : "FROM pg_catalog.pg_opfamily "
14868 : "WHERE oid = '%u'::pg_catalog.oid",
14869 1086 : opfinfo->dobj.catId.oid);
14870 :
14871 1086 : res = ExecuteSqlQueryForSingleRow(fout, query->data);
14872 :
14873 1086 : i_amname = PQfnumber(res, "amname");
14874 :
14875 : /* amname will still be needed after we PQclear res */
14876 1086 : amname = pg_strdup(PQgetvalue(res, 0, i_amname));
14877 :
14878 1086 : appendPQExpBuffer(delq, "DROP OPERATOR FAMILY %s",
14879 1086 : fmtQualifiedDumpable(opfinfo));
14880 1086 : appendPQExpBuffer(delq, " USING %s;\n",
14881 : fmtId(amname));
14882 :
14883 : /* Build the fixed portion of the CREATE command */
14884 1086 : appendPQExpBuffer(q, "CREATE OPERATOR FAMILY %s",
14885 1086 : fmtQualifiedDumpable(opfinfo));
14886 1086 : appendPQExpBuffer(q, " USING %s;\n",
14887 : fmtId(amname));
14888 :
14889 1086 : PQclear(res);
14890 :
14891 : /* Do we need an ALTER to add loose members? */
14892 1086 : if (PQntuples(res_ops) > 0 || PQntuples(res_procs) > 0)
14893 : {
14894 94 : appendPQExpBuffer(q, "ALTER OPERATOR FAMILY %s",
14895 94 : fmtQualifiedDumpable(opfinfo));
14896 94 : appendPQExpBuffer(q, " USING %s ADD\n ",
14897 : fmtId(amname));
14898 :
14899 94 : needComma = false;
14900 :
14901 : /*
14902 : * Now fetch and print the OPERATOR entries (pg_amop rows).
14903 : */
14904 94 : ntups = PQntuples(res_ops);
14905 :
14906 94 : i_amopstrategy = PQfnumber(res_ops, "amopstrategy");
14907 94 : i_amopopr = PQfnumber(res_ops, "amopopr");
14908 94 : i_sortfamily = PQfnumber(res_ops, "sortfamily");
14909 94 : i_sortfamilynsp = PQfnumber(res_ops, "sortfamilynsp");
14910 :
14911 414 : for (i = 0; i < ntups; i++)
14912 : {
14913 320 : amopstrategy = PQgetvalue(res_ops, i, i_amopstrategy);
14914 320 : amopopr = PQgetvalue(res_ops, i, i_amopopr);
14915 320 : sortfamily = PQgetvalue(res_ops, i, i_sortfamily);
14916 320 : sortfamilynsp = PQgetvalue(res_ops, i, i_sortfamilynsp);
14917 :
14918 320 : if (needComma)
14919 256 : appendPQExpBufferStr(q, " ,\n ");
14920 :
14921 320 : appendPQExpBuffer(q, "OPERATOR %s %s",
14922 : amopstrategy, amopopr);
14923 :
14924 320 : if (strlen(sortfamily) > 0)
14925 : {
14926 0 : appendPQExpBufferStr(q, " FOR ORDER BY ");
14927 0 : appendPQExpBuffer(q, "%s.", fmtId(sortfamilynsp));
14928 0 : appendPQExpBufferStr(q, fmtId(sortfamily));
14929 : }
14930 :
14931 320 : needComma = true;
14932 : }
14933 :
14934 : /*
14935 : * Now fetch and print the FUNCTION entries (pg_amproc rows).
14936 : */
14937 94 : ntups = PQntuples(res_procs);
14938 :
14939 94 : i_amprocnum = PQfnumber(res_procs, "amprocnum");
14940 94 : i_amproc = PQfnumber(res_procs, "amproc");
14941 94 : i_amproclefttype = PQfnumber(res_procs, "amproclefttype");
14942 94 : i_amprocrighttype = PQfnumber(res_procs, "amprocrighttype");
14943 :
14944 444 : for (i = 0; i < ntups; i++)
14945 : {
14946 350 : amprocnum = PQgetvalue(res_procs, i, i_amprocnum);
14947 350 : amproc = PQgetvalue(res_procs, i, i_amproc);
14948 350 : amproclefttype = PQgetvalue(res_procs, i, i_amproclefttype);
14949 350 : amprocrighttype = PQgetvalue(res_procs, i, i_amprocrighttype);
14950 :
14951 350 : if (needComma)
14952 320 : appendPQExpBufferStr(q, " ,\n ");
14953 :
14954 350 : appendPQExpBuffer(q, "FUNCTION %s (%s, %s) %s",
14955 : amprocnum, amproclefttype, amprocrighttype,
14956 : amproc);
14957 :
14958 350 : needComma = true;
14959 : }
14960 :
14961 94 : appendPQExpBufferStr(q, ";\n");
14962 : }
14963 :
14964 1086 : appendPQExpBufferStr(nameusing, fmtId(opfinfo->dobj.name));
14965 1086 : appendPQExpBuffer(nameusing, " USING %s",
14966 : fmtId(amname));
14967 :
14968 1086 : if (dopt->binary_upgrade)
14969 18 : binary_upgrade_extension_member(q, &opfinfo->dobj,
14970 18 : "OPERATOR FAMILY", nameusing->data,
14971 18 : opfinfo->dobj.namespace->dobj.name);
14972 :
14973 1086 : if (opfinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
14974 1086 : ArchiveEntry(fout, opfinfo->dobj.catId, opfinfo->dobj.dumpId,
14975 1086 : ARCHIVE_OPTS(.tag = opfinfo->dobj.name,
14976 : .namespace = opfinfo->dobj.namespace->dobj.name,
14977 : .owner = opfinfo->rolname,
14978 : .description = "OPERATOR FAMILY",
14979 : .section = SECTION_PRE_DATA,
14980 : .createStmt = q->data,
14981 : .dropStmt = delq->data));
14982 :
14983 : /* Dump Operator Family Comments */
14984 1086 : if (opfinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
14985 0 : dumpComment(fout, "OPERATOR FAMILY", nameusing->data,
14986 0 : opfinfo->dobj.namespace->dobj.name, opfinfo->rolname,
14987 0 : opfinfo->dobj.catId, 0, opfinfo->dobj.dumpId);
14988 :
14989 1086 : free(amname);
14990 1086 : PQclear(res_ops);
14991 1086 : PQclear(res_procs);
14992 1086 : destroyPQExpBuffer(query);
14993 1086 : destroyPQExpBuffer(q);
14994 1086 : destroyPQExpBuffer(delq);
14995 1086 : destroyPQExpBuffer(nameusing);
14996 : }
14997 :
14998 : /*
14999 : * dumpCollation
15000 : * write out a single collation definition
15001 : */
15002 : static void
15003 5074 : dumpCollation(Archive *fout, const CollInfo *collinfo)
15004 : {
15005 5074 : DumpOptions *dopt = fout->dopt;
15006 : PQExpBuffer query;
15007 : PQExpBuffer q;
15008 : PQExpBuffer delq;
15009 : char *qcollname;
15010 : PGresult *res;
15011 : int i_collprovider;
15012 : int i_collisdeterministic;
15013 : int i_collcollate;
15014 : int i_collctype;
15015 : int i_colllocale;
15016 : int i_collicurules;
15017 : const char *collprovider;
15018 : const char *collcollate;
15019 : const char *collctype;
15020 : const char *colllocale;
15021 : const char *collicurules;
15022 :
15023 : /* Do nothing if not dumping schema */
15024 5074 : if (!dopt->dumpSchema)
15025 24 : return;
15026 :
15027 5050 : query = createPQExpBuffer();
15028 5050 : q = createPQExpBuffer();
15029 5050 : delq = createPQExpBuffer();
15030 :
15031 5050 : qcollname = pg_strdup(fmtId(collinfo->dobj.name));
15032 :
15033 : /* Get collation-specific details */
15034 5050 : appendPQExpBufferStr(query, "SELECT ");
15035 :
15036 5050 : if (fout->remoteVersion >= 100000)
15037 5050 : appendPQExpBufferStr(query,
15038 : "collprovider, "
15039 : "collversion, ");
15040 : else
15041 0 : appendPQExpBufferStr(query,
15042 : "'c' AS collprovider, "
15043 : "NULL AS collversion, ");
15044 :
15045 5050 : if (fout->remoteVersion >= 120000)
15046 5050 : appendPQExpBufferStr(query,
15047 : "collisdeterministic, ");
15048 : else
15049 0 : appendPQExpBufferStr(query,
15050 : "true AS collisdeterministic, ");
15051 :
15052 5050 : if (fout->remoteVersion >= 170000)
15053 5050 : appendPQExpBufferStr(query,
15054 : "colllocale, ");
15055 0 : else if (fout->remoteVersion >= 150000)
15056 0 : appendPQExpBufferStr(query,
15057 : "colliculocale AS colllocale, ");
15058 : else
15059 0 : appendPQExpBufferStr(query,
15060 : "NULL AS colllocale, ");
15061 :
15062 5050 : if (fout->remoteVersion >= 160000)
15063 5050 : appendPQExpBufferStr(query,
15064 : "collicurules, ");
15065 : else
15066 0 : appendPQExpBufferStr(query,
15067 : "NULL AS collicurules, ");
15068 :
15069 5050 : appendPQExpBuffer(query,
15070 : "collcollate, "
15071 : "collctype "
15072 : "FROM pg_catalog.pg_collation c "
15073 : "WHERE c.oid = '%u'::pg_catalog.oid",
15074 5050 : collinfo->dobj.catId.oid);
15075 :
15076 5050 : res = ExecuteSqlQueryForSingleRow(fout, query->data);
15077 :
15078 5050 : i_collprovider = PQfnumber(res, "collprovider");
15079 5050 : i_collisdeterministic = PQfnumber(res, "collisdeterministic");
15080 5050 : i_collcollate = PQfnumber(res, "collcollate");
15081 5050 : i_collctype = PQfnumber(res, "collctype");
15082 5050 : i_colllocale = PQfnumber(res, "colllocale");
15083 5050 : i_collicurules = PQfnumber(res, "collicurules");
15084 :
15085 5050 : collprovider = PQgetvalue(res, 0, i_collprovider);
15086 :
15087 5050 : if (!PQgetisnull(res, 0, i_collcollate))
15088 92 : collcollate = PQgetvalue(res, 0, i_collcollate);
15089 : else
15090 4958 : collcollate = NULL;
15091 :
15092 5050 : if (!PQgetisnull(res, 0, i_collctype))
15093 92 : collctype = PQgetvalue(res, 0, i_collctype);
15094 : else
15095 4958 : collctype = NULL;
15096 :
15097 : /*
15098 : * Before version 15, collcollate and collctype were of type NAME and
15099 : * non-nullable. Treat empty strings as NULL for consistency.
15100 : */
15101 5050 : if (fout->remoteVersion < 150000)
15102 : {
15103 0 : if (collcollate[0] == '\0')
15104 0 : collcollate = NULL;
15105 0 : if (collctype[0] == '\0')
15106 0 : collctype = NULL;
15107 : }
15108 :
15109 5050 : if (!PQgetisnull(res, 0, i_colllocale))
15110 4952 : colllocale = PQgetvalue(res, 0, i_colllocale);
15111 : else
15112 98 : colllocale = NULL;
15113 :
15114 5050 : if (!PQgetisnull(res, 0, i_collicurules))
15115 0 : collicurules = PQgetvalue(res, 0, i_collicurules);
15116 : else
15117 5050 : collicurules = NULL;
15118 :
15119 5050 : appendPQExpBuffer(delq, "DROP COLLATION %s;\n",
15120 5050 : fmtQualifiedDumpable(collinfo));
15121 :
15122 5050 : appendPQExpBuffer(q, "CREATE COLLATION %s (",
15123 5050 : fmtQualifiedDumpable(collinfo));
15124 :
15125 5050 : appendPQExpBufferStr(q, "provider = ");
15126 5050 : if (collprovider[0] == 'b')
15127 38 : appendPQExpBufferStr(q, "builtin");
15128 5012 : else if (collprovider[0] == 'c')
15129 92 : appendPQExpBufferStr(q, "libc");
15130 4920 : else if (collprovider[0] == 'i')
15131 4914 : appendPQExpBufferStr(q, "icu");
15132 6 : else if (collprovider[0] == 'd')
15133 : /* to allow dumping pg_catalog; not accepted on input */
15134 6 : appendPQExpBufferStr(q, "default");
15135 : else
15136 0 : pg_fatal("unrecognized collation provider: %s",
15137 : collprovider);
15138 :
15139 5050 : if (strcmp(PQgetvalue(res, 0, i_collisdeterministic), "f") == 0)
15140 0 : appendPQExpBufferStr(q, ", deterministic = false");
15141 :
15142 5050 : if (collprovider[0] == 'd')
15143 : {
15144 6 : if (collcollate || collctype || colllocale || collicurules)
15145 0 : pg_log_warning("invalid collation \"%s\"", qcollname);
15146 :
15147 : /* no locale -- the default collation cannot be reloaded anyway */
15148 : }
15149 5044 : else if (collprovider[0] == 'b')
15150 : {
15151 38 : if (collcollate || collctype || !colllocale || collicurules)
15152 0 : pg_log_warning("invalid collation \"%s\"", qcollname);
15153 :
15154 38 : appendPQExpBufferStr(q, ", locale = ");
15155 38 : appendStringLiteralAH(q, colllocale ? colllocale : "",
15156 : fout);
15157 : }
15158 5006 : else if (collprovider[0] == 'i')
15159 : {
15160 4914 : if (fout->remoteVersion >= 150000)
15161 : {
15162 4914 : if (collcollate || collctype || !colllocale)
15163 0 : pg_log_warning("invalid collation \"%s\"", qcollname);
15164 :
15165 4914 : appendPQExpBufferStr(q, ", locale = ");
15166 4914 : appendStringLiteralAH(q, colllocale ? colllocale : "",
15167 : fout);
15168 : }
15169 : else
15170 : {
15171 0 : if (!collcollate || !collctype || colllocale ||
15172 0 : strcmp(collcollate, collctype) != 0)
15173 0 : pg_log_warning("invalid collation \"%s\"", qcollname);
15174 :
15175 0 : appendPQExpBufferStr(q, ", locale = ");
15176 0 : appendStringLiteralAH(q, collcollate ? collcollate : "", fout);
15177 : }
15178 :
15179 4914 : if (collicurules)
15180 : {
15181 0 : appendPQExpBufferStr(q, ", rules = ");
15182 0 : appendStringLiteralAH(q, collicurules ? collicurules : "", fout);
15183 : }
15184 : }
15185 92 : else if (collprovider[0] == 'c')
15186 : {
15187 92 : if (colllocale || collicurules || !collcollate || !collctype)
15188 0 : pg_log_warning("invalid collation \"%s\"", qcollname);
15189 :
15190 92 : if (collcollate && collctype && strcmp(collcollate, collctype) == 0)
15191 : {
15192 92 : appendPQExpBufferStr(q, ", locale = ");
15193 92 : appendStringLiteralAH(q, collcollate ? collcollate : "", fout);
15194 : }
15195 : else
15196 : {
15197 0 : appendPQExpBufferStr(q, ", lc_collate = ");
15198 0 : appendStringLiteralAH(q, collcollate ? collcollate : "", fout);
15199 0 : appendPQExpBufferStr(q, ", lc_ctype = ");
15200 0 : appendStringLiteralAH(q, collctype ? collctype : "", fout);
15201 : }
15202 : }
15203 : else
15204 0 : pg_fatal("unrecognized collation provider: %s", collprovider);
15205 :
15206 : /*
15207 : * For binary upgrade, carry over the collation version. For normal
15208 : * dump/restore, omit the version, so that it is computed upon restore.
15209 : */
15210 5050 : if (dopt->binary_upgrade)
15211 : {
15212 : int i_collversion;
15213 :
15214 10 : i_collversion = PQfnumber(res, "collversion");
15215 10 : if (!PQgetisnull(res, 0, i_collversion))
15216 : {
15217 8 : appendPQExpBufferStr(q, ", version = ");
15218 8 : appendStringLiteralAH(q,
15219 : PQgetvalue(res, 0, i_collversion),
15220 : fout);
15221 : }
15222 : }
15223 :
15224 5050 : appendPQExpBufferStr(q, ");\n");
15225 :
15226 5050 : if (dopt->binary_upgrade)
15227 10 : binary_upgrade_extension_member(q, &collinfo->dobj,
15228 : "COLLATION", qcollname,
15229 10 : collinfo->dobj.namespace->dobj.name);
15230 :
15231 5050 : if (collinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
15232 5050 : ArchiveEntry(fout, collinfo->dobj.catId, collinfo->dobj.dumpId,
15233 5050 : ARCHIVE_OPTS(.tag = collinfo->dobj.name,
15234 : .namespace = collinfo->dobj.namespace->dobj.name,
15235 : .owner = collinfo->rolname,
15236 : .description = "COLLATION",
15237 : .section = SECTION_PRE_DATA,
15238 : .createStmt = q->data,
15239 : .dropStmt = delq->data));
15240 :
15241 : /* Dump Collation Comments */
15242 5050 : if (collinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
15243 4862 : dumpComment(fout, "COLLATION", qcollname,
15244 4862 : collinfo->dobj.namespace->dobj.name, collinfo->rolname,
15245 4862 : collinfo->dobj.catId, 0, collinfo->dobj.dumpId);
15246 :
15247 5050 : PQclear(res);
15248 :
15249 5050 : destroyPQExpBuffer(query);
15250 5050 : destroyPQExpBuffer(q);
15251 5050 : destroyPQExpBuffer(delq);
15252 5050 : free(qcollname);
15253 : }
15254 :
15255 : /*
15256 : * dumpConversion
15257 : * write out a single conversion definition
15258 : */
15259 : static void
15260 844 : dumpConversion(Archive *fout, const ConvInfo *convinfo)
15261 : {
15262 844 : DumpOptions *dopt = fout->dopt;
15263 : PQExpBuffer query;
15264 : PQExpBuffer q;
15265 : PQExpBuffer delq;
15266 : char *qconvname;
15267 : PGresult *res;
15268 : int i_conforencoding;
15269 : int i_contoencoding;
15270 : int i_conproc;
15271 : int i_condefault;
15272 : const char *conforencoding;
15273 : const char *contoencoding;
15274 : const char *conproc;
15275 : bool condefault;
15276 :
15277 : /* Do nothing if not dumping schema */
15278 844 : if (!dopt->dumpSchema)
15279 12 : return;
15280 :
15281 832 : query = createPQExpBuffer();
15282 832 : q = createPQExpBuffer();
15283 832 : delq = createPQExpBuffer();
15284 :
15285 832 : qconvname = pg_strdup(fmtId(convinfo->dobj.name));
15286 :
15287 : /* Get conversion-specific details */
15288 832 : appendPQExpBuffer(query, "SELECT "
15289 : "pg_catalog.pg_encoding_to_char(conforencoding) AS conforencoding, "
15290 : "pg_catalog.pg_encoding_to_char(contoencoding) AS contoencoding, "
15291 : "conproc, condefault "
15292 : "FROM pg_catalog.pg_conversion c "
15293 : "WHERE c.oid = '%u'::pg_catalog.oid",
15294 832 : convinfo->dobj.catId.oid);
15295 :
15296 832 : res = ExecuteSqlQueryForSingleRow(fout, query->data);
15297 :
15298 832 : i_conforencoding = PQfnumber(res, "conforencoding");
15299 832 : i_contoencoding = PQfnumber(res, "contoencoding");
15300 832 : i_conproc = PQfnumber(res, "conproc");
15301 832 : i_condefault = PQfnumber(res, "condefault");
15302 :
15303 832 : conforencoding = PQgetvalue(res, 0, i_conforencoding);
15304 832 : contoencoding = PQgetvalue(res, 0, i_contoencoding);
15305 832 : conproc = PQgetvalue(res, 0, i_conproc);
15306 832 : condefault = (PQgetvalue(res, 0, i_condefault)[0] == 't');
15307 :
15308 832 : appendPQExpBuffer(delq, "DROP CONVERSION %s;\n",
15309 832 : fmtQualifiedDumpable(convinfo));
15310 :
15311 832 : appendPQExpBuffer(q, "CREATE %sCONVERSION %s FOR ",
15312 : (condefault) ? "DEFAULT " : "",
15313 832 : fmtQualifiedDumpable(convinfo));
15314 832 : appendStringLiteralAH(q, conforencoding, fout);
15315 832 : appendPQExpBufferStr(q, " TO ");
15316 832 : appendStringLiteralAH(q, contoencoding, fout);
15317 : /* regproc output is already sufficiently quoted */
15318 832 : appendPQExpBuffer(q, " FROM %s;\n", conproc);
15319 :
15320 832 : if (dopt->binary_upgrade)
15321 2 : binary_upgrade_extension_member(q, &convinfo->dobj,
15322 : "CONVERSION", qconvname,
15323 2 : convinfo->dobj.namespace->dobj.name);
15324 :
15325 832 : if (convinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
15326 832 : ArchiveEntry(fout, convinfo->dobj.catId, convinfo->dobj.dumpId,
15327 832 : ARCHIVE_OPTS(.tag = convinfo->dobj.name,
15328 : .namespace = convinfo->dobj.namespace->dobj.name,
15329 : .owner = convinfo->rolname,
15330 : .description = "CONVERSION",
15331 : .section = SECTION_PRE_DATA,
15332 : .createStmt = q->data,
15333 : .dropStmt = delq->data));
15334 :
15335 : /* Dump Conversion Comments */
15336 832 : if (convinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
15337 832 : dumpComment(fout, "CONVERSION", qconvname,
15338 832 : convinfo->dobj.namespace->dobj.name, convinfo->rolname,
15339 832 : convinfo->dobj.catId, 0, convinfo->dobj.dumpId);
15340 :
15341 832 : PQclear(res);
15342 :
15343 832 : destroyPQExpBuffer(query);
15344 832 : destroyPQExpBuffer(q);
15345 832 : destroyPQExpBuffer(delq);
15346 832 : free(qconvname);
15347 : }
15348 :
15349 : /*
15350 : * format_aggregate_signature: generate aggregate name and argument list
15351 : *
15352 : * The argument type names are qualified if needed. The aggregate name
15353 : * is never qualified.
15354 : */
15355 : static char *
15356 570 : format_aggregate_signature(const AggInfo *agginfo, Archive *fout, bool honor_quotes)
15357 : {
15358 : PQExpBufferData buf;
15359 : int j;
15360 :
15361 570 : initPQExpBuffer(&buf);
15362 570 : if (honor_quotes)
15363 0 : appendPQExpBufferStr(&buf, fmtId(agginfo->aggfn.dobj.name));
15364 : else
15365 570 : appendPQExpBufferStr(&buf, agginfo->aggfn.dobj.name);
15366 :
15367 570 : if (agginfo->aggfn.nargs == 0)
15368 80 : appendPQExpBufferStr(&buf, "(*)");
15369 : else
15370 : {
15371 490 : appendPQExpBufferChar(&buf, '(');
15372 1070 : for (j = 0; j < agginfo->aggfn.nargs; j++)
15373 580 : appendPQExpBuffer(&buf, "%s%s",
15374 : (j > 0) ? ", " : "",
15375 : getFormattedTypeName(fout,
15376 580 : agginfo->aggfn.argtypes[j],
15377 : zeroIsError));
15378 490 : appendPQExpBufferChar(&buf, ')');
15379 : }
15380 570 : return buf.data;
15381 : }
15382 :
15383 : /*
15384 : * dumpAgg
15385 : * write out a single aggregate definition
15386 : */
15387 : static void
15388 584 : dumpAgg(Archive *fout, const AggInfo *agginfo)
15389 : {
15390 584 : DumpOptions *dopt = fout->dopt;
15391 : PQExpBuffer query;
15392 : PQExpBuffer q;
15393 : PQExpBuffer delq;
15394 : PQExpBuffer details;
15395 : char *aggsig; /* identity signature */
15396 584 : char *aggfullsig = NULL; /* full signature */
15397 : char *aggsig_tag;
15398 : PGresult *res;
15399 : int i_agginitval;
15400 : int i_aggminitval;
15401 : const char *aggtransfn;
15402 : const char *aggfinalfn;
15403 : const char *aggcombinefn;
15404 : const char *aggserialfn;
15405 : const char *aggdeserialfn;
15406 : const char *aggmtransfn;
15407 : const char *aggminvtransfn;
15408 : const char *aggmfinalfn;
15409 : bool aggfinalextra;
15410 : bool aggmfinalextra;
15411 : char aggfinalmodify;
15412 : char aggmfinalmodify;
15413 : const char *aggsortop;
15414 : char *aggsortconvop;
15415 : char aggkind;
15416 : const char *aggtranstype;
15417 : const char *aggtransspace;
15418 : const char *aggmtranstype;
15419 : const char *aggmtransspace;
15420 : const char *agginitval;
15421 : const char *aggminitval;
15422 : const char *proparallel;
15423 : char defaultfinalmodify;
15424 :
15425 : /* Do nothing if not dumping schema */
15426 584 : if (!dopt->dumpSchema)
15427 14 : return;
15428 :
15429 570 : query = createPQExpBuffer();
15430 570 : q = createPQExpBuffer();
15431 570 : delq = createPQExpBuffer();
15432 570 : details = createPQExpBuffer();
15433 :
15434 570 : if (!fout->is_prepared[PREPQUERY_DUMPAGG])
15435 : {
15436 : /* Set up query for aggregate-specific details */
15437 110 : appendPQExpBufferStr(query,
15438 : "PREPARE dumpAgg(pg_catalog.oid) AS\n");
15439 :
15440 110 : appendPQExpBufferStr(query,
15441 : "SELECT "
15442 : "aggtransfn,\n"
15443 : "aggfinalfn,\n"
15444 : "aggtranstype::pg_catalog.regtype,\n"
15445 : "agginitval,\n"
15446 : "aggsortop,\n"
15447 : "pg_catalog.pg_get_function_arguments(p.oid) AS funcargs,\n"
15448 : "pg_catalog.pg_get_function_identity_arguments(p.oid) AS funciargs,\n");
15449 :
15450 110 : if (fout->remoteVersion >= 90400)
15451 110 : appendPQExpBufferStr(query,
15452 : "aggkind,\n"
15453 : "aggmtransfn,\n"
15454 : "aggminvtransfn,\n"
15455 : "aggmfinalfn,\n"
15456 : "aggmtranstype::pg_catalog.regtype,\n"
15457 : "aggfinalextra,\n"
15458 : "aggmfinalextra,\n"
15459 : "aggtransspace,\n"
15460 : "aggmtransspace,\n"
15461 : "aggminitval,\n");
15462 : else
15463 0 : appendPQExpBufferStr(query,
15464 : "'n' AS aggkind,\n"
15465 : "'-' AS aggmtransfn,\n"
15466 : "'-' AS aggminvtransfn,\n"
15467 : "'-' AS aggmfinalfn,\n"
15468 : "0 AS aggmtranstype,\n"
15469 : "false AS aggfinalextra,\n"
15470 : "false AS aggmfinalextra,\n"
15471 : "0 AS aggtransspace,\n"
15472 : "0 AS aggmtransspace,\n"
15473 : "NULL AS aggminitval,\n");
15474 :
15475 110 : if (fout->remoteVersion >= 90600)
15476 110 : appendPQExpBufferStr(query,
15477 : "aggcombinefn,\n"
15478 : "aggserialfn,\n"
15479 : "aggdeserialfn,\n"
15480 : "proparallel,\n");
15481 : else
15482 0 : appendPQExpBufferStr(query,
15483 : "'-' AS aggcombinefn,\n"
15484 : "'-' AS aggserialfn,\n"
15485 : "'-' AS aggdeserialfn,\n"
15486 : "'u' AS proparallel,\n");
15487 :
15488 110 : if (fout->remoteVersion >= 110000)
15489 110 : appendPQExpBufferStr(query,
15490 : "aggfinalmodify,\n"
15491 : "aggmfinalmodify\n");
15492 : else
15493 0 : appendPQExpBufferStr(query,
15494 : "'0' AS aggfinalmodify,\n"
15495 : "'0' AS aggmfinalmodify\n");
15496 :
15497 110 : appendPQExpBufferStr(query,
15498 : "FROM pg_catalog.pg_aggregate a, pg_catalog.pg_proc p "
15499 : "WHERE a.aggfnoid = p.oid "
15500 : "AND p.oid = $1");
15501 :
15502 110 : ExecuteSqlStatement(fout, query->data);
15503 :
15504 110 : fout->is_prepared[PREPQUERY_DUMPAGG] = true;
15505 : }
15506 :
15507 570 : printfPQExpBuffer(query,
15508 : "EXECUTE dumpAgg('%u')",
15509 570 : agginfo->aggfn.dobj.catId.oid);
15510 :
15511 570 : res = ExecuteSqlQueryForSingleRow(fout, query->data);
15512 :
15513 570 : i_agginitval = PQfnumber(res, "agginitval");
15514 570 : i_aggminitval = PQfnumber(res, "aggminitval");
15515 :
15516 570 : aggtransfn = PQgetvalue(res, 0, PQfnumber(res, "aggtransfn"));
15517 570 : aggfinalfn = PQgetvalue(res, 0, PQfnumber(res, "aggfinalfn"));
15518 570 : aggcombinefn = PQgetvalue(res, 0, PQfnumber(res, "aggcombinefn"));
15519 570 : aggserialfn = PQgetvalue(res, 0, PQfnumber(res, "aggserialfn"));
15520 570 : aggdeserialfn = PQgetvalue(res, 0, PQfnumber(res, "aggdeserialfn"));
15521 570 : aggmtransfn = PQgetvalue(res, 0, PQfnumber(res, "aggmtransfn"));
15522 570 : aggminvtransfn = PQgetvalue(res, 0, PQfnumber(res, "aggminvtransfn"));
15523 570 : aggmfinalfn = PQgetvalue(res, 0, PQfnumber(res, "aggmfinalfn"));
15524 570 : aggfinalextra = (PQgetvalue(res, 0, PQfnumber(res, "aggfinalextra"))[0] == 't');
15525 570 : aggmfinalextra = (PQgetvalue(res, 0, PQfnumber(res, "aggmfinalextra"))[0] == 't');
15526 570 : aggfinalmodify = PQgetvalue(res, 0, PQfnumber(res, "aggfinalmodify"))[0];
15527 570 : aggmfinalmodify = PQgetvalue(res, 0, PQfnumber(res, "aggmfinalmodify"))[0];
15528 570 : aggsortop = PQgetvalue(res, 0, PQfnumber(res, "aggsortop"));
15529 570 : aggkind = PQgetvalue(res, 0, PQfnumber(res, "aggkind"))[0];
15530 570 : aggtranstype = PQgetvalue(res, 0, PQfnumber(res, "aggtranstype"));
15531 570 : aggtransspace = PQgetvalue(res, 0, PQfnumber(res, "aggtransspace"));
15532 570 : aggmtranstype = PQgetvalue(res, 0, PQfnumber(res, "aggmtranstype"));
15533 570 : aggmtransspace = PQgetvalue(res, 0, PQfnumber(res, "aggmtransspace"));
15534 570 : agginitval = PQgetvalue(res, 0, i_agginitval);
15535 570 : aggminitval = PQgetvalue(res, 0, i_aggminitval);
15536 570 : proparallel = PQgetvalue(res, 0, PQfnumber(res, "proparallel"));
15537 :
15538 : {
15539 : char *funcargs;
15540 : char *funciargs;
15541 :
15542 570 : funcargs = PQgetvalue(res, 0, PQfnumber(res, "funcargs"));
15543 570 : funciargs = PQgetvalue(res, 0, PQfnumber(res, "funciargs"));
15544 570 : aggfullsig = format_function_arguments(&agginfo->aggfn, funcargs, true);
15545 570 : aggsig = format_function_arguments(&agginfo->aggfn, funciargs, true);
15546 : }
15547 :
15548 570 : aggsig_tag = format_aggregate_signature(agginfo, fout, false);
15549 :
15550 : /* identify default modify flag for aggkind (must match DefineAggregate) */
15551 570 : defaultfinalmodify = (aggkind == AGGKIND_NORMAL) ? AGGMODIFY_READ_ONLY : AGGMODIFY_READ_WRITE;
15552 : /* replace omitted flags for old versions */
15553 570 : if (aggfinalmodify == '0')
15554 0 : aggfinalmodify = defaultfinalmodify;
15555 570 : if (aggmfinalmodify == '0')
15556 0 : aggmfinalmodify = defaultfinalmodify;
15557 :
15558 : /* regproc and regtype output is already sufficiently quoted */
15559 570 : appendPQExpBuffer(details, " SFUNC = %s,\n STYPE = %s",
15560 : aggtransfn, aggtranstype);
15561 :
15562 570 : if (strcmp(aggtransspace, "0") != 0)
15563 : {
15564 10 : appendPQExpBuffer(details, ",\n SSPACE = %s",
15565 : aggtransspace);
15566 : }
15567 :
15568 570 : if (!PQgetisnull(res, 0, i_agginitval))
15569 : {
15570 414 : appendPQExpBufferStr(details, ",\n INITCOND = ");
15571 414 : appendStringLiteralAH(details, agginitval, fout);
15572 : }
15573 :
15574 570 : if (strcmp(aggfinalfn, "-") != 0)
15575 : {
15576 264 : appendPQExpBuffer(details, ",\n FINALFUNC = %s",
15577 : aggfinalfn);
15578 264 : if (aggfinalextra)
15579 20 : appendPQExpBufferStr(details, ",\n FINALFUNC_EXTRA");
15580 264 : if (aggfinalmodify != defaultfinalmodify)
15581 : {
15582 64 : switch (aggfinalmodify)
15583 : {
15584 0 : case AGGMODIFY_READ_ONLY:
15585 0 : appendPQExpBufferStr(details, ",\n FINALFUNC_MODIFY = READ_ONLY");
15586 0 : break;
15587 64 : case AGGMODIFY_SHAREABLE:
15588 64 : appendPQExpBufferStr(details, ",\n FINALFUNC_MODIFY = SHAREABLE");
15589 64 : break;
15590 0 : case AGGMODIFY_READ_WRITE:
15591 0 : appendPQExpBufferStr(details, ",\n FINALFUNC_MODIFY = READ_WRITE");
15592 0 : break;
15593 0 : default:
15594 0 : pg_fatal("unrecognized aggfinalmodify value for aggregate \"%s\"",
15595 : agginfo->aggfn.dobj.name);
15596 : break;
15597 : }
15598 : }
15599 : }
15600 :
15601 570 : if (strcmp(aggcombinefn, "-") != 0)
15602 0 : appendPQExpBuffer(details, ",\n COMBINEFUNC = %s", aggcombinefn);
15603 :
15604 570 : if (strcmp(aggserialfn, "-") != 0)
15605 0 : appendPQExpBuffer(details, ",\n SERIALFUNC = %s", aggserialfn);
15606 :
15607 570 : if (strcmp(aggdeserialfn, "-") != 0)
15608 0 : appendPQExpBuffer(details, ",\n DESERIALFUNC = %s", aggdeserialfn);
15609 :
15610 570 : if (strcmp(aggmtransfn, "-") != 0)
15611 : {
15612 60 : appendPQExpBuffer(details, ",\n MSFUNC = %s,\n MINVFUNC = %s,\n MSTYPE = %s",
15613 : aggmtransfn,
15614 : aggminvtransfn,
15615 : aggmtranstype);
15616 : }
15617 :
15618 570 : if (strcmp(aggmtransspace, "0") != 0)
15619 : {
15620 0 : appendPQExpBuffer(details, ",\n MSSPACE = %s",
15621 : aggmtransspace);
15622 : }
15623 :
15624 570 : if (!PQgetisnull(res, 0, i_aggminitval))
15625 : {
15626 20 : appendPQExpBufferStr(details, ",\n MINITCOND = ");
15627 20 : appendStringLiteralAH(details, aggminitval, fout);
15628 : }
15629 :
15630 570 : if (strcmp(aggmfinalfn, "-") != 0)
15631 : {
15632 0 : appendPQExpBuffer(details, ",\n MFINALFUNC = %s",
15633 : aggmfinalfn);
15634 0 : if (aggmfinalextra)
15635 0 : appendPQExpBufferStr(details, ",\n MFINALFUNC_EXTRA");
15636 0 : if (aggmfinalmodify != defaultfinalmodify)
15637 : {
15638 0 : switch (aggmfinalmodify)
15639 : {
15640 0 : case AGGMODIFY_READ_ONLY:
15641 0 : appendPQExpBufferStr(details, ",\n MFINALFUNC_MODIFY = READ_ONLY");
15642 0 : break;
15643 0 : case AGGMODIFY_SHAREABLE:
15644 0 : appendPQExpBufferStr(details, ",\n MFINALFUNC_MODIFY = SHAREABLE");
15645 0 : break;
15646 0 : case AGGMODIFY_READ_WRITE:
15647 0 : appendPQExpBufferStr(details, ",\n MFINALFUNC_MODIFY = READ_WRITE");
15648 0 : break;
15649 0 : default:
15650 0 : pg_fatal("unrecognized aggmfinalmodify value for aggregate \"%s\"",
15651 : agginfo->aggfn.dobj.name);
15652 : break;
15653 : }
15654 : }
15655 : }
15656 :
15657 570 : aggsortconvop = getFormattedOperatorName(aggsortop);
15658 570 : if (aggsortconvop)
15659 : {
15660 0 : appendPQExpBuffer(details, ",\n SORTOP = %s",
15661 : aggsortconvop);
15662 0 : free(aggsortconvop);
15663 : }
15664 :
15665 570 : if (aggkind == AGGKIND_HYPOTHETICAL)
15666 10 : appendPQExpBufferStr(details, ",\n HYPOTHETICAL");
15667 :
15668 570 : if (proparallel[0] != PROPARALLEL_UNSAFE)
15669 : {
15670 10 : if (proparallel[0] == PROPARALLEL_SAFE)
15671 10 : appendPQExpBufferStr(details, ",\n PARALLEL = safe");
15672 0 : else if (proparallel[0] == PROPARALLEL_RESTRICTED)
15673 0 : appendPQExpBufferStr(details, ",\n PARALLEL = restricted");
15674 0 : else if (proparallel[0] != PROPARALLEL_UNSAFE)
15675 0 : pg_fatal("unrecognized proparallel value for function \"%s\"",
15676 : agginfo->aggfn.dobj.name);
15677 : }
15678 :
15679 570 : appendPQExpBuffer(delq, "DROP AGGREGATE %s.%s;\n",
15680 570 : fmtId(agginfo->aggfn.dobj.namespace->dobj.name),
15681 : aggsig);
15682 :
15683 1140 : appendPQExpBuffer(q, "CREATE AGGREGATE %s.%s (\n%s\n);\n",
15684 570 : fmtId(agginfo->aggfn.dobj.namespace->dobj.name),
15685 : aggfullsig ? aggfullsig : aggsig, details->data);
15686 :
15687 570 : if (dopt->binary_upgrade)
15688 98 : binary_upgrade_extension_member(q, &agginfo->aggfn.dobj,
15689 : "AGGREGATE", aggsig,
15690 98 : agginfo->aggfn.dobj.namespace->dobj.name);
15691 :
15692 570 : if (agginfo->aggfn.dobj.dump & DUMP_COMPONENT_DEFINITION)
15693 536 : ArchiveEntry(fout, agginfo->aggfn.dobj.catId,
15694 536 : agginfo->aggfn.dobj.dumpId,
15695 536 : ARCHIVE_OPTS(.tag = aggsig_tag,
15696 : .namespace = agginfo->aggfn.dobj.namespace->dobj.name,
15697 : .owner = agginfo->aggfn.rolname,
15698 : .description = "AGGREGATE",
15699 : .section = SECTION_PRE_DATA,
15700 : .createStmt = q->data,
15701 : .dropStmt = delq->data));
15702 :
15703 : /* Dump Aggregate Comments */
15704 570 : if (agginfo->aggfn.dobj.dump & DUMP_COMPONENT_COMMENT)
15705 20 : dumpComment(fout, "AGGREGATE", aggsig,
15706 20 : agginfo->aggfn.dobj.namespace->dobj.name,
15707 20 : agginfo->aggfn.rolname,
15708 20 : agginfo->aggfn.dobj.catId, 0, agginfo->aggfn.dobj.dumpId);
15709 :
15710 570 : if (agginfo->aggfn.dobj.dump & DUMP_COMPONENT_SECLABEL)
15711 0 : dumpSecLabel(fout, "AGGREGATE", aggsig,
15712 0 : agginfo->aggfn.dobj.namespace->dobj.name,
15713 0 : agginfo->aggfn.rolname,
15714 0 : agginfo->aggfn.dobj.catId, 0, agginfo->aggfn.dobj.dumpId);
15715 :
15716 : /*
15717 : * Since there is no GRANT ON AGGREGATE syntax, we have to make the ACL
15718 : * command look like a function's GRANT; in particular this affects the
15719 : * syntax for zero-argument aggregates and ordered-set aggregates.
15720 : */
15721 570 : free(aggsig);
15722 :
15723 570 : aggsig = format_function_signature(fout, &agginfo->aggfn, true);
15724 :
15725 570 : if (agginfo->aggfn.dobj.dump & DUMP_COMPONENT_ACL)
15726 36 : dumpACL(fout, agginfo->aggfn.dobj.dumpId, InvalidDumpId,
15727 : "FUNCTION", aggsig, NULL,
15728 36 : agginfo->aggfn.dobj.namespace->dobj.name,
15729 36 : NULL, agginfo->aggfn.rolname, &agginfo->aggfn.dacl);
15730 :
15731 570 : free(aggsig);
15732 570 : free(aggfullsig);
15733 570 : free(aggsig_tag);
15734 :
15735 570 : PQclear(res);
15736 :
15737 570 : destroyPQExpBuffer(query);
15738 570 : destroyPQExpBuffer(q);
15739 570 : destroyPQExpBuffer(delq);
15740 570 : destroyPQExpBuffer(details);
15741 : }
15742 :
15743 : /*
15744 : * dumpTSParser
15745 : * write out a single text search parser
15746 : */
15747 : static void
15748 82 : dumpTSParser(Archive *fout, const TSParserInfo *prsinfo)
15749 : {
15750 82 : DumpOptions *dopt = fout->dopt;
15751 : PQExpBuffer q;
15752 : PQExpBuffer delq;
15753 : char *qprsname;
15754 :
15755 : /* Do nothing if not dumping schema */
15756 82 : if (!dopt->dumpSchema)
15757 12 : return;
15758 :
15759 70 : q = createPQExpBuffer();
15760 70 : delq = createPQExpBuffer();
15761 :
15762 70 : qprsname = pg_strdup(fmtId(prsinfo->dobj.name));
15763 :
15764 70 : appendPQExpBuffer(q, "CREATE TEXT SEARCH PARSER %s (\n",
15765 70 : fmtQualifiedDumpable(prsinfo));
15766 :
15767 70 : appendPQExpBuffer(q, " START = %s,\n",
15768 70 : convertTSFunction(fout, prsinfo->prsstart));
15769 70 : appendPQExpBuffer(q, " GETTOKEN = %s,\n",
15770 70 : convertTSFunction(fout, prsinfo->prstoken));
15771 70 : appendPQExpBuffer(q, " END = %s,\n",
15772 70 : convertTSFunction(fout, prsinfo->prsend));
15773 70 : if (prsinfo->prsheadline != InvalidOid)
15774 6 : appendPQExpBuffer(q, " HEADLINE = %s,\n",
15775 6 : convertTSFunction(fout, prsinfo->prsheadline));
15776 70 : appendPQExpBuffer(q, " LEXTYPES = %s );\n",
15777 70 : convertTSFunction(fout, prsinfo->prslextype));
15778 :
15779 70 : appendPQExpBuffer(delq, "DROP TEXT SEARCH PARSER %s;\n",
15780 70 : fmtQualifiedDumpable(prsinfo));
15781 :
15782 70 : if (dopt->binary_upgrade)
15783 2 : binary_upgrade_extension_member(q, &prsinfo->dobj,
15784 : "TEXT SEARCH PARSER", qprsname,
15785 2 : prsinfo->dobj.namespace->dobj.name);
15786 :
15787 70 : if (prsinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
15788 70 : ArchiveEntry(fout, prsinfo->dobj.catId, prsinfo->dobj.dumpId,
15789 70 : ARCHIVE_OPTS(.tag = prsinfo->dobj.name,
15790 : .namespace = prsinfo->dobj.namespace->dobj.name,
15791 : .description = "TEXT SEARCH PARSER",
15792 : .section = SECTION_PRE_DATA,
15793 : .createStmt = q->data,
15794 : .dropStmt = delq->data));
15795 :
15796 : /* Dump Parser Comments */
15797 70 : if (prsinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
15798 70 : dumpComment(fout, "TEXT SEARCH PARSER", qprsname,
15799 70 : prsinfo->dobj.namespace->dobj.name, "",
15800 70 : prsinfo->dobj.catId, 0, prsinfo->dobj.dumpId);
15801 :
15802 70 : destroyPQExpBuffer(q);
15803 70 : destroyPQExpBuffer(delq);
15804 70 : free(qprsname);
15805 : }
15806 :
15807 : /*
15808 : * dumpTSDictionary
15809 : * write out a single text search dictionary
15810 : */
15811 : static void
15812 358 : dumpTSDictionary(Archive *fout, const TSDictInfo *dictinfo)
15813 : {
15814 358 : DumpOptions *dopt = fout->dopt;
15815 : PQExpBuffer q;
15816 : PQExpBuffer delq;
15817 : PQExpBuffer query;
15818 : char *qdictname;
15819 : PGresult *res;
15820 : char *nspname;
15821 : char *tmplname;
15822 :
15823 : /* Do nothing if not dumping schema */
15824 358 : if (!dopt->dumpSchema)
15825 12 : return;
15826 :
15827 346 : q = createPQExpBuffer();
15828 346 : delq = createPQExpBuffer();
15829 346 : query = createPQExpBuffer();
15830 :
15831 346 : qdictname = pg_strdup(fmtId(dictinfo->dobj.name));
15832 :
15833 : /* Fetch name and namespace of the dictionary's template */
15834 346 : appendPQExpBuffer(query, "SELECT nspname, tmplname "
15835 : "FROM pg_ts_template p, pg_namespace n "
15836 : "WHERE p.oid = '%u' AND n.oid = tmplnamespace",
15837 346 : dictinfo->dicttemplate);
15838 346 : res = ExecuteSqlQueryForSingleRow(fout, query->data);
15839 346 : nspname = PQgetvalue(res, 0, 0);
15840 346 : tmplname = PQgetvalue(res, 0, 1);
15841 :
15842 346 : appendPQExpBuffer(q, "CREATE TEXT SEARCH DICTIONARY %s (\n",
15843 346 : fmtQualifiedDumpable(dictinfo));
15844 :
15845 346 : appendPQExpBufferStr(q, " TEMPLATE = ");
15846 346 : appendPQExpBuffer(q, "%s.", fmtId(nspname));
15847 346 : appendPQExpBufferStr(q, fmtId(tmplname));
15848 :
15849 346 : PQclear(res);
15850 :
15851 : /* the dictinitoption can be dumped straight into the command */
15852 346 : if (dictinfo->dictinitoption)
15853 276 : appendPQExpBuffer(q, ",\n %s", dictinfo->dictinitoption);
15854 :
15855 346 : appendPQExpBufferStr(q, " );\n");
15856 :
15857 346 : appendPQExpBuffer(delq, "DROP TEXT SEARCH DICTIONARY %s;\n",
15858 346 : fmtQualifiedDumpable(dictinfo));
15859 :
15860 346 : if (dopt->binary_upgrade)
15861 20 : binary_upgrade_extension_member(q, &dictinfo->dobj,
15862 : "TEXT SEARCH DICTIONARY", qdictname,
15863 20 : dictinfo->dobj.namespace->dobj.name);
15864 :
15865 346 : if (dictinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
15866 346 : ArchiveEntry(fout, dictinfo->dobj.catId, dictinfo->dobj.dumpId,
15867 346 : ARCHIVE_OPTS(.tag = dictinfo->dobj.name,
15868 : .namespace = dictinfo->dobj.namespace->dobj.name,
15869 : .owner = dictinfo->rolname,
15870 : .description = "TEXT SEARCH DICTIONARY",
15871 : .section = SECTION_PRE_DATA,
15872 : .createStmt = q->data,
15873 : .dropStmt = delq->data));
15874 :
15875 : /* Dump Dictionary Comments */
15876 346 : if (dictinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
15877 256 : dumpComment(fout, "TEXT SEARCH DICTIONARY", qdictname,
15878 256 : dictinfo->dobj.namespace->dobj.name, dictinfo->rolname,
15879 256 : dictinfo->dobj.catId, 0, dictinfo->dobj.dumpId);
15880 :
15881 346 : destroyPQExpBuffer(q);
15882 346 : destroyPQExpBuffer(delq);
15883 346 : destroyPQExpBuffer(query);
15884 346 : free(qdictname);
15885 : }
15886 :
15887 : /*
15888 : * dumpTSTemplate
15889 : * write out a single text search template
15890 : */
15891 : static void
15892 106 : dumpTSTemplate(Archive *fout, const TSTemplateInfo *tmplinfo)
15893 : {
15894 106 : DumpOptions *dopt = fout->dopt;
15895 : PQExpBuffer q;
15896 : PQExpBuffer delq;
15897 : char *qtmplname;
15898 :
15899 : /* Do nothing if not dumping schema */
15900 106 : if (!dopt->dumpSchema)
15901 12 : return;
15902 :
15903 94 : q = createPQExpBuffer();
15904 94 : delq = createPQExpBuffer();
15905 :
15906 94 : qtmplname = pg_strdup(fmtId(tmplinfo->dobj.name));
15907 :
15908 94 : appendPQExpBuffer(q, "CREATE TEXT SEARCH TEMPLATE %s (\n",
15909 94 : fmtQualifiedDumpable(tmplinfo));
15910 :
15911 94 : if (tmplinfo->tmplinit != InvalidOid)
15912 30 : appendPQExpBuffer(q, " INIT = %s,\n",
15913 30 : convertTSFunction(fout, tmplinfo->tmplinit));
15914 94 : appendPQExpBuffer(q, " LEXIZE = %s );\n",
15915 94 : convertTSFunction(fout, tmplinfo->tmpllexize));
15916 :
15917 94 : appendPQExpBuffer(delq, "DROP TEXT SEARCH TEMPLATE %s;\n",
15918 94 : fmtQualifiedDumpable(tmplinfo));
15919 :
15920 94 : if (dopt->binary_upgrade)
15921 2 : binary_upgrade_extension_member(q, &tmplinfo->dobj,
15922 : "TEXT SEARCH TEMPLATE", qtmplname,
15923 2 : tmplinfo->dobj.namespace->dobj.name);
15924 :
15925 94 : if (tmplinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
15926 94 : ArchiveEntry(fout, tmplinfo->dobj.catId, tmplinfo->dobj.dumpId,
15927 94 : ARCHIVE_OPTS(.tag = tmplinfo->dobj.name,
15928 : .namespace = tmplinfo->dobj.namespace->dobj.name,
15929 : .description = "TEXT SEARCH TEMPLATE",
15930 : .section = SECTION_PRE_DATA,
15931 : .createStmt = q->data,
15932 : .dropStmt = delq->data));
15933 :
15934 : /* Dump Template Comments */
15935 94 : if (tmplinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
15936 94 : dumpComment(fout, "TEXT SEARCH TEMPLATE", qtmplname,
15937 94 : tmplinfo->dobj.namespace->dobj.name, "",
15938 94 : tmplinfo->dobj.catId, 0, tmplinfo->dobj.dumpId);
15939 :
15940 94 : destroyPQExpBuffer(q);
15941 94 : destroyPQExpBuffer(delq);
15942 94 : free(qtmplname);
15943 : }
15944 :
15945 : /*
15946 : * dumpTSConfig
15947 : * write out a single text search configuration
15948 : */
15949 : static void
15950 308 : dumpTSConfig(Archive *fout, const TSConfigInfo *cfginfo)
15951 : {
15952 308 : DumpOptions *dopt = fout->dopt;
15953 : PQExpBuffer q;
15954 : PQExpBuffer delq;
15955 : PQExpBuffer query;
15956 : char *qcfgname;
15957 : PGresult *res;
15958 : char *nspname;
15959 : char *prsname;
15960 : int ntups,
15961 : i;
15962 : int i_tokenname;
15963 : int i_dictname;
15964 :
15965 : /* Do nothing if not dumping schema */
15966 308 : if (!dopt->dumpSchema)
15967 12 : return;
15968 :
15969 296 : q = createPQExpBuffer();
15970 296 : delq = createPQExpBuffer();
15971 296 : query = createPQExpBuffer();
15972 :
15973 296 : qcfgname = pg_strdup(fmtId(cfginfo->dobj.name));
15974 :
15975 : /* Fetch name and namespace of the config's parser */
15976 296 : appendPQExpBuffer(query, "SELECT nspname, prsname "
15977 : "FROM pg_ts_parser p, pg_namespace n "
15978 : "WHERE p.oid = '%u' AND n.oid = prsnamespace",
15979 296 : cfginfo->cfgparser);
15980 296 : res = ExecuteSqlQueryForSingleRow(fout, query->data);
15981 296 : nspname = PQgetvalue(res, 0, 0);
15982 296 : prsname = PQgetvalue(res, 0, 1);
15983 :
15984 296 : appendPQExpBuffer(q, "CREATE TEXT SEARCH CONFIGURATION %s (\n",
15985 296 : fmtQualifiedDumpable(cfginfo));
15986 :
15987 296 : appendPQExpBuffer(q, " PARSER = %s.", fmtId(nspname));
15988 296 : appendPQExpBuffer(q, "%s );\n", fmtId(prsname));
15989 :
15990 296 : PQclear(res);
15991 :
15992 296 : resetPQExpBuffer(query);
15993 296 : appendPQExpBuffer(query,
15994 : "SELECT\n"
15995 : " ( SELECT alias FROM pg_catalog.ts_token_type('%u'::pg_catalog.oid) AS t\n"
15996 : " WHERE t.tokid = m.maptokentype ) AS tokenname,\n"
15997 : " m.mapdict::pg_catalog.regdictionary AS dictname\n"
15998 : "FROM pg_catalog.pg_ts_config_map AS m\n"
15999 : "WHERE m.mapcfg = '%u'\n"
16000 : "ORDER BY m.mapcfg, m.maptokentype, m.mapseqno",
16001 296 : cfginfo->cfgparser, cfginfo->dobj.catId.oid);
16002 :
16003 296 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
16004 296 : ntups = PQntuples(res);
16005 :
16006 296 : i_tokenname = PQfnumber(res, "tokenname");
16007 296 : i_dictname = PQfnumber(res, "dictname");
16008 :
16009 6190 : for (i = 0; i < ntups; i++)
16010 : {
16011 5894 : char *tokenname = PQgetvalue(res, i, i_tokenname);
16012 5894 : char *dictname = PQgetvalue(res, i, i_dictname);
16013 :
16014 5894 : if (i == 0 ||
16015 5598 : strcmp(tokenname, PQgetvalue(res, i - 1, i_tokenname)) != 0)
16016 : {
16017 : /* starting a new token type, so start a new command */
16018 5624 : if (i > 0)
16019 5328 : appendPQExpBufferStr(q, ";\n");
16020 5624 : appendPQExpBuffer(q, "\nALTER TEXT SEARCH CONFIGURATION %s\n",
16021 5624 : fmtQualifiedDumpable(cfginfo));
16022 : /* tokenname needs quoting, dictname does NOT */
16023 5624 : appendPQExpBuffer(q, " ADD MAPPING FOR %s WITH %s",
16024 : fmtId(tokenname), dictname);
16025 : }
16026 : else
16027 270 : appendPQExpBuffer(q, ", %s", dictname);
16028 : }
16029 :
16030 296 : if (ntups > 0)
16031 296 : appendPQExpBufferStr(q, ";\n");
16032 :
16033 296 : PQclear(res);
16034 :
16035 296 : appendPQExpBuffer(delq, "DROP TEXT SEARCH CONFIGURATION %s;\n",
16036 296 : fmtQualifiedDumpable(cfginfo));
16037 :
16038 296 : if (dopt->binary_upgrade)
16039 10 : binary_upgrade_extension_member(q, &cfginfo->dobj,
16040 : "TEXT SEARCH CONFIGURATION", qcfgname,
16041 10 : cfginfo->dobj.namespace->dobj.name);
16042 :
16043 296 : if (cfginfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
16044 296 : ArchiveEntry(fout, cfginfo->dobj.catId, cfginfo->dobj.dumpId,
16045 296 : ARCHIVE_OPTS(.tag = cfginfo->dobj.name,
16046 : .namespace = cfginfo->dobj.namespace->dobj.name,
16047 : .owner = cfginfo->rolname,
16048 : .description = "TEXT SEARCH CONFIGURATION",
16049 : .section = SECTION_PRE_DATA,
16050 : .createStmt = q->data,
16051 : .dropStmt = delq->data));
16052 :
16053 : /* Dump Configuration Comments */
16054 296 : if (cfginfo->dobj.dump & DUMP_COMPONENT_COMMENT)
16055 256 : dumpComment(fout, "TEXT SEARCH CONFIGURATION", qcfgname,
16056 256 : cfginfo->dobj.namespace->dobj.name, cfginfo->rolname,
16057 256 : cfginfo->dobj.catId, 0, cfginfo->dobj.dumpId);
16058 :
16059 296 : destroyPQExpBuffer(q);
16060 296 : destroyPQExpBuffer(delq);
16061 296 : destroyPQExpBuffer(query);
16062 296 : free(qcfgname);
16063 : }
16064 :
16065 : /*
16066 : * dumpForeignDataWrapper
16067 : * write out a single foreign-data wrapper definition
16068 : */
16069 : static void
16070 104 : dumpForeignDataWrapper(Archive *fout, const FdwInfo *fdwinfo)
16071 : {
16072 104 : DumpOptions *dopt = fout->dopt;
16073 : PQExpBuffer q;
16074 : PQExpBuffer delq;
16075 : char *qfdwname;
16076 :
16077 : /* Do nothing if not dumping schema */
16078 104 : if (!dopt->dumpSchema)
16079 14 : return;
16080 :
16081 90 : q = createPQExpBuffer();
16082 90 : delq = createPQExpBuffer();
16083 :
16084 90 : qfdwname = pg_strdup(fmtId(fdwinfo->dobj.name));
16085 :
16086 90 : appendPQExpBuffer(q, "CREATE FOREIGN DATA WRAPPER %s",
16087 : qfdwname);
16088 :
16089 90 : if (strcmp(fdwinfo->fdwhandler, "-") != 0)
16090 0 : appendPQExpBuffer(q, " HANDLER %s", fdwinfo->fdwhandler);
16091 :
16092 90 : if (strcmp(fdwinfo->fdwvalidator, "-") != 0)
16093 0 : appendPQExpBuffer(q, " VALIDATOR %s", fdwinfo->fdwvalidator);
16094 :
16095 90 : if (strlen(fdwinfo->fdwoptions) > 0)
16096 0 : appendPQExpBuffer(q, " OPTIONS (\n %s\n)", fdwinfo->fdwoptions);
16097 :
16098 90 : appendPQExpBufferStr(q, ";\n");
16099 :
16100 90 : appendPQExpBuffer(delq, "DROP FOREIGN DATA WRAPPER %s;\n",
16101 : qfdwname);
16102 :
16103 90 : if (dopt->binary_upgrade)
16104 4 : binary_upgrade_extension_member(q, &fdwinfo->dobj,
16105 : "FOREIGN DATA WRAPPER", qfdwname,
16106 : NULL);
16107 :
16108 90 : if (fdwinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
16109 90 : ArchiveEntry(fout, fdwinfo->dobj.catId, fdwinfo->dobj.dumpId,
16110 90 : ARCHIVE_OPTS(.tag = fdwinfo->dobj.name,
16111 : .owner = fdwinfo->rolname,
16112 : .description = "FOREIGN DATA WRAPPER",
16113 : .section = SECTION_PRE_DATA,
16114 : .createStmt = q->data,
16115 : .dropStmt = delq->data));
16116 :
16117 : /* Dump Foreign Data Wrapper Comments */
16118 90 : if (fdwinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
16119 0 : dumpComment(fout, "FOREIGN DATA WRAPPER", qfdwname,
16120 0 : NULL, fdwinfo->rolname,
16121 0 : fdwinfo->dobj.catId, 0, fdwinfo->dobj.dumpId);
16122 :
16123 : /* Handle the ACL */
16124 90 : if (fdwinfo->dobj.dump & DUMP_COMPONENT_ACL)
16125 62 : dumpACL(fout, fdwinfo->dobj.dumpId, InvalidDumpId,
16126 : "FOREIGN DATA WRAPPER", qfdwname, NULL, NULL,
16127 62 : NULL, fdwinfo->rolname, &fdwinfo->dacl);
16128 :
16129 90 : free(qfdwname);
16130 :
16131 90 : destroyPQExpBuffer(q);
16132 90 : destroyPQExpBuffer(delq);
16133 : }
16134 :
16135 : /*
16136 : * dumpForeignServer
16137 : * write out a foreign server definition
16138 : */
16139 : static void
16140 112 : dumpForeignServer(Archive *fout, const ForeignServerInfo *srvinfo)
16141 : {
16142 112 : DumpOptions *dopt = fout->dopt;
16143 : PQExpBuffer q;
16144 : PQExpBuffer delq;
16145 : PQExpBuffer query;
16146 : PGresult *res;
16147 : char *qsrvname;
16148 : char *fdwname;
16149 :
16150 : /* Do nothing if not dumping schema */
16151 112 : if (!dopt->dumpSchema)
16152 18 : return;
16153 :
16154 94 : q = createPQExpBuffer();
16155 94 : delq = createPQExpBuffer();
16156 94 : query = createPQExpBuffer();
16157 :
16158 94 : qsrvname = pg_strdup(fmtId(srvinfo->dobj.name));
16159 :
16160 : /* look up the foreign-data wrapper */
16161 94 : appendPQExpBuffer(query, "SELECT fdwname "
16162 : "FROM pg_foreign_data_wrapper w "
16163 : "WHERE w.oid = '%u'",
16164 94 : srvinfo->srvfdw);
16165 94 : res = ExecuteSqlQueryForSingleRow(fout, query->data);
16166 94 : fdwname = PQgetvalue(res, 0, 0);
16167 :
16168 94 : appendPQExpBuffer(q, "CREATE SERVER %s", qsrvname);
16169 94 : if (srvinfo->srvtype && strlen(srvinfo->srvtype) > 0)
16170 : {
16171 0 : appendPQExpBufferStr(q, " TYPE ");
16172 0 : appendStringLiteralAH(q, srvinfo->srvtype, fout);
16173 : }
16174 94 : if (srvinfo->srvversion && strlen(srvinfo->srvversion) > 0)
16175 : {
16176 0 : appendPQExpBufferStr(q, " VERSION ");
16177 0 : appendStringLiteralAH(q, srvinfo->srvversion, fout);
16178 : }
16179 :
16180 94 : appendPQExpBufferStr(q, " FOREIGN DATA WRAPPER ");
16181 94 : appendPQExpBufferStr(q, fmtId(fdwname));
16182 :
16183 94 : if (srvinfo->srvoptions && strlen(srvinfo->srvoptions) > 0)
16184 0 : appendPQExpBuffer(q, " OPTIONS (\n %s\n)", srvinfo->srvoptions);
16185 :
16186 94 : appendPQExpBufferStr(q, ";\n");
16187 :
16188 94 : appendPQExpBuffer(delq, "DROP SERVER %s;\n",
16189 : qsrvname);
16190 :
16191 94 : if (dopt->binary_upgrade)
16192 4 : binary_upgrade_extension_member(q, &srvinfo->dobj,
16193 : "SERVER", qsrvname, NULL);
16194 :
16195 94 : if (srvinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
16196 94 : ArchiveEntry(fout, srvinfo->dobj.catId, srvinfo->dobj.dumpId,
16197 94 : ARCHIVE_OPTS(.tag = srvinfo->dobj.name,
16198 : .owner = srvinfo->rolname,
16199 : .description = "SERVER",
16200 : .section = SECTION_PRE_DATA,
16201 : .createStmt = q->data,
16202 : .dropStmt = delq->data));
16203 :
16204 : /* Dump Foreign Server Comments */
16205 94 : if (srvinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
16206 0 : dumpComment(fout, "SERVER", qsrvname,
16207 0 : NULL, srvinfo->rolname,
16208 0 : srvinfo->dobj.catId, 0, srvinfo->dobj.dumpId);
16209 :
16210 : /* Handle the ACL */
16211 94 : if (srvinfo->dobj.dump & DUMP_COMPONENT_ACL)
16212 62 : dumpACL(fout, srvinfo->dobj.dumpId, InvalidDumpId,
16213 : "FOREIGN SERVER", qsrvname, NULL, NULL,
16214 62 : NULL, srvinfo->rolname, &srvinfo->dacl);
16215 :
16216 : /* Dump user mappings */
16217 94 : if (srvinfo->dobj.dump & DUMP_COMPONENT_USERMAP)
16218 94 : dumpUserMappings(fout,
16219 94 : srvinfo->dobj.name, NULL,
16220 94 : srvinfo->rolname,
16221 94 : srvinfo->dobj.catId, srvinfo->dobj.dumpId);
16222 :
16223 94 : PQclear(res);
16224 :
16225 94 : free(qsrvname);
16226 :
16227 94 : destroyPQExpBuffer(q);
16228 94 : destroyPQExpBuffer(delq);
16229 94 : destroyPQExpBuffer(query);
16230 : }
16231 :
16232 : /*
16233 : * dumpUserMappings
16234 : *
16235 : * This routine is used to dump any user mappings associated with the
16236 : * server handed to this routine. Should be called after ArchiveEntry()
16237 : * for the server.
16238 : */
16239 : static void
16240 94 : dumpUserMappings(Archive *fout,
16241 : const char *servername, const char *namespace,
16242 : const char *owner,
16243 : CatalogId catalogId, DumpId dumpId)
16244 : {
16245 : PQExpBuffer q;
16246 : PQExpBuffer delq;
16247 : PQExpBuffer query;
16248 : PQExpBuffer tag;
16249 : PGresult *res;
16250 : int ntups;
16251 : int i_usename;
16252 : int i_umoptions;
16253 : int i;
16254 :
16255 94 : q = createPQExpBuffer();
16256 94 : tag = createPQExpBuffer();
16257 94 : delq = createPQExpBuffer();
16258 94 : query = createPQExpBuffer();
16259 :
16260 : /*
16261 : * We read from the publicly accessible view pg_user_mappings, so as not
16262 : * to fail if run by a non-superuser. Note that the view will show
16263 : * umoptions as null if the user hasn't got privileges for the associated
16264 : * server; this means that pg_dump will dump such a mapping, but with no
16265 : * OPTIONS clause. A possible alternative is to skip such mappings
16266 : * altogether, but it's not clear that that's an improvement.
16267 : */
16268 94 : appendPQExpBuffer(query,
16269 : "SELECT usename, "
16270 : "array_to_string(ARRAY("
16271 : "SELECT quote_ident(option_name) || ' ' || "
16272 : "quote_literal(option_value) "
16273 : "FROM pg_options_to_table(umoptions) "
16274 : "ORDER BY option_name"
16275 : "), E',\n ') AS umoptions "
16276 : "FROM pg_user_mappings "
16277 : "WHERE srvid = '%u' "
16278 : "ORDER BY usename",
16279 : catalogId.oid);
16280 :
16281 94 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
16282 :
16283 94 : ntups = PQntuples(res);
16284 94 : i_usename = PQfnumber(res, "usename");
16285 94 : i_umoptions = PQfnumber(res, "umoptions");
16286 :
16287 156 : for (i = 0; i < ntups; i++)
16288 : {
16289 : char *usename;
16290 : char *umoptions;
16291 :
16292 62 : usename = PQgetvalue(res, i, i_usename);
16293 62 : umoptions = PQgetvalue(res, i, i_umoptions);
16294 :
16295 62 : resetPQExpBuffer(q);
16296 62 : appendPQExpBuffer(q, "CREATE USER MAPPING FOR %s", fmtId(usename));
16297 62 : appendPQExpBuffer(q, " SERVER %s", fmtId(servername));
16298 :
16299 62 : if (umoptions && strlen(umoptions) > 0)
16300 0 : appendPQExpBuffer(q, " OPTIONS (\n %s\n)", umoptions);
16301 :
16302 62 : appendPQExpBufferStr(q, ";\n");
16303 :
16304 62 : resetPQExpBuffer(delq);
16305 62 : appendPQExpBuffer(delq, "DROP USER MAPPING FOR %s", fmtId(usename));
16306 62 : appendPQExpBuffer(delq, " SERVER %s;\n", fmtId(servername));
16307 :
16308 62 : resetPQExpBuffer(tag);
16309 62 : appendPQExpBuffer(tag, "USER MAPPING %s SERVER %s",
16310 : usename, servername);
16311 :
16312 62 : ArchiveEntry(fout, nilCatalogId, createDumpId(),
16313 62 : ARCHIVE_OPTS(.tag = tag->data,
16314 : .namespace = namespace,
16315 : .owner = owner,
16316 : .description = "USER MAPPING",
16317 : .section = SECTION_PRE_DATA,
16318 : .createStmt = q->data,
16319 : .dropStmt = delq->data));
16320 : }
16321 :
16322 94 : PQclear(res);
16323 :
16324 94 : destroyPQExpBuffer(query);
16325 94 : destroyPQExpBuffer(delq);
16326 94 : destroyPQExpBuffer(tag);
16327 94 : destroyPQExpBuffer(q);
16328 94 : }
16329 :
16330 : /*
16331 : * Write out default privileges information
16332 : */
16333 : static void
16334 320 : dumpDefaultACL(Archive *fout, const DefaultACLInfo *daclinfo)
16335 : {
16336 320 : DumpOptions *dopt = fout->dopt;
16337 : PQExpBuffer q;
16338 : PQExpBuffer tag;
16339 : const char *type;
16340 :
16341 : /* Do nothing if not dumping schema, or if we're skipping ACLs */
16342 320 : if (!dopt->dumpSchema || dopt->aclsSkip)
16343 56 : return;
16344 :
16345 264 : q = createPQExpBuffer();
16346 264 : tag = createPQExpBuffer();
16347 :
16348 264 : switch (daclinfo->defaclobjtype)
16349 : {
16350 122 : case DEFACLOBJ_RELATION:
16351 122 : type = "TABLES";
16352 122 : break;
16353 0 : case DEFACLOBJ_SEQUENCE:
16354 0 : type = "SEQUENCES";
16355 0 : break;
16356 122 : case DEFACLOBJ_FUNCTION:
16357 122 : type = "FUNCTIONS";
16358 122 : break;
16359 20 : case DEFACLOBJ_TYPE:
16360 20 : type = "TYPES";
16361 20 : break;
16362 0 : case DEFACLOBJ_NAMESPACE:
16363 0 : type = "SCHEMAS";
16364 0 : break;
16365 0 : case DEFACLOBJ_LARGEOBJECT:
16366 0 : type = "LARGE OBJECTS";
16367 0 : break;
16368 0 : default:
16369 : /* shouldn't get here */
16370 0 : pg_fatal("unrecognized object type in default privileges: %d",
16371 : (int) daclinfo->defaclobjtype);
16372 : type = ""; /* keep compiler quiet */
16373 : }
16374 :
16375 264 : appendPQExpBuffer(tag, "DEFAULT PRIVILEGES FOR %s", type);
16376 :
16377 : /* build the actual command(s) for this tuple */
16378 264 : if (!buildDefaultACLCommands(type,
16379 264 : daclinfo->dobj.namespace != NULL ?
16380 124 : daclinfo->dobj.namespace->dobj.name : NULL,
16381 264 : daclinfo->dacl.acl,
16382 264 : daclinfo->dacl.acldefault,
16383 264 : daclinfo->defaclrole,
16384 : fout->remoteVersion,
16385 : q))
16386 0 : pg_fatal("could not parse default ACL list (%s)",
16387 : daclinfo->dacl.acl);
16388 :
16389 264 : if (daclinfo->dobj.dump & DUMP_COMPONENT_ACL)
16390 264 : ArchiveEntry(fout, daclinfo->dobj.catId, daclinfo->dobj.dumpId,
16391 264 : ARCHIVE_OPTS(.tag = tag->data,
16392 : .namespace = daclinfo->dobj.namespace ?
16393 : daclinfo->dobj.namespace->dobj.name : NULL,
16394 : .owner = daclinfo->defaclrole,
16395 : .description = "DEFAULT ACL",
16396 : .section = SECTION_POST_DATA,
16397 : .createStmt = q->data));
16398 :
16399 264 : destroyPQExpBuffer(tag);
16400 264 : destroyPQExpBuffer(q);
16401 : }
16402 :
16403 : /*----------
16404 : * Write out grant/revoke information
16405 : *
16406 : * 'objDumpId' is the dump ID of the underlying object.
16407 : * 'altDumpId' can be a second dumpId that the ACL entry must also depend on,
16408 : * or InvalidDumpId if there is no need for a second dependency.
16409 : * 'type' must be one of
16410 : * TABLE, SEQUENCE, FUNCTION, LANGUAGE, SCHEMA, DATABASE, TABLESPACE,
16411 : * FOREIGN DATA WRAPPER, SERVER, or LARGE OBJECT.
16412 : * 'name' is the formatted name of the object. Must be quoted etc. already.
16413 : * 'subname' is the formatted name of the sub-object, if any. Must be quoted.
16414 : * (Currently we assume that subname is only provided for table columns.)
16415 : * 'nspname' is the namespace the object is in (NULL if none).
16416 : * 'tag' is the tag to use for the ACL TOC entry; typically, this is NULL
16417 : * to use the default for the object type.
16418 : * 'owner' is the owner, NULL if there is no owner (for languages).
16419 : * 'dacl' is the DumpableAcl struct for the object.
16420 : *
16421 : * Returns the dump ID assigned to the ACL TocEntry, or InvalidDumpId if
16422 : * no ACL entry was created.
16423 : *----------
16424 : */
16425 : static DumpId
16426 58024 : dumpACL(Archive *fout, DumpId objDumpId, DumpId altDumpId,
16427 : const char *type, const char *name, const char *subname,
16428 : const char *nspname, const char *tag, const char *owner,
16429 : const DumpableAcl *dacl)
16430 : {
16431 58024 : DumpId aclDumpId = InvalidDumpId;
16432 58024 : DumpOptions *dopt = fout->dopt;
16433 58024 : const char *acls = dacl->acl;
16434 58024 : const char *acldefault = dacl->acldefault;
16435 58024 : char privtype = dacl->privtype;
16436 58024 : const char *initprivs = dacl->initprivs;
16437 : const char *baseacls;
16438 : PQExpBuffer sql;
16439 :
16440 : /* Do nothing if ACL dump is not enabled */
16441 58024 : if (dopt->aclsSkip)
16442 652 : return InvalidDumpId;
16443 :
16444 : /* --data-only skips ACLs *except* large object ACLs */
16445 57372 : if (!dopt->dumpSchema && strcmp(type, "LARGE OBJECT") != 0)
16446 0 : return InvalidDumpId;
16447 :
16448 57372 : sql = createPQExpBuffer();
16449 :
16450 : /*
16451 : * In binary upgrade mode, we don't run an extension's script but instead
16452 : * dump out the objects independently and then recreate them. To preserve
16453 : * any initial privileges which were set on extension objects, we need to
16454 : * compute the set of GRANT and REVOKE commands necessary to get from the
16455 : * default privileges of an object to its initial privileges as recorded
16456 : * in pg_init_privs.
16457 : *
16458 : * At restore time, we apply these commands after having called
16459 : * binary_upgrade_set_record_init_privs(true). That tells the backend to
16460 : * copy the results into pg_init_privs. This is how we preserve the
16461 : * contents of that catalog across binary upgrades.
16462 : */
16463 57372 : if (dopt->binary_upgrade && privtype == 'e' &&
16464 26 : initprivs && *initprivs != '\0')
16465 : {
16466 26 : appendPQExpBufferStr(sql, "SELECT pg_catalog.binary_upgrade_set_record_init_privs(true);\n");
16467 26 : if (!buildACLCommands(name, subname, nspname, type,
16468 : initprivs, acldefault, owner,
16469 : "", fout->remoteVersion, sql))
16470 0 : pg_fatal("could not parse initial ACL list (%s) or default (%s) for object \"%s\" (%s)",
16471 : initprivs, acldefault, name, type);
16472 26 : appendPQExpBufferStr(sql, "SELECT pg_catalog.binary_upgrade_set_record_init_privs(false);\n");
16473 : }
16474 :
16475 : /*
16476 : * Now figure the GRANT and REVOKE commands needed to get to the object's
16477 : * actual current ACL, starting from the initprivs if given, else from the
16478 : * object-type-specific default. Also, while buildACLCommands will assume
16479 : * that a NULL/empty acls string means it needn't do anything, what that
16480 : * actually represents is the object-type-specific default; so we need to
16481 : * substitute the acldefault string to get the right results in that case.
16482 : */
16483 57372 : if (initprivs && *initprivs != '\0')
16484 : {
16485 53916 : baseacls = initprivs;
16486 53916 : if (acls == NULL || *acls == '\0')
16487 34 : acls = acldefault;
16488 : }
16489 : else
16490 3456 : baseacls = acldefault;
16491 :
16492 57372 : if (!buildACLCommands(name, subname, nspname, type,
16493 : acls, baseacls, owner,
16494 : "", fout->remoteVersion, sql))
16495 0 : pg_fatal("could not parse ACL list (%s) or default (%s) for object \"%s\" (%s)",
16496 : acls, baseacls, name, type);
16497 :
16498 57372 : if (sql->len > 0)
16499 : {
16500 3570 : PQExpBuffer tagbuf = createPQExpBuffer();
16501 : DumpId aclDeps[2];
16502 3570 : int nDeps = 0;
16503 :
16504 3570 : if (tag)
16505 0 : appendPQExpBufferStr(tagbuf, tag);
16506 3570 : else if (subname)
16507 2094 : appendPQExpBuffer(tagbuf, "COLUMN %s.%s", name, subname);
16508 : else
16509 1476 : appendPQExpBuffer(tagbuf, "%s %s", type, name);
16510 :
16511 3570 : aclDeps[nDeps++] = objDumpId;
16512 3570 : if (altDumpId != InvalidDumpId)
16513 1926 : aclDeps[nDeps++] = altDumpId;
16514 :
16515 3570 : aclDumpId = createDumpId();
16516 :
16517 3570 : ArchiveEntry(fout, nilCatalogId, aclDumpId,
16518 3570 : ARCHIVE_OPTS(.tag = tagbuf->data,
16519 : .namespace = nspname,
16520 : .owner = owner,
16521 : .description = "ACL",
16522 : .section = SECTION_NONE,
16523 : .createStmt = sql->data,
16524 : .deps = aclDeps,
16525 : .nDeps = nDeps));
16526 :
16527 3570 : destroyPQExpBuffer(tagbuf);
16528 : }
16529 :
16530 57372 : destroyPQExpBuffer(sql);
16531 :
16532 57372 : return aclDumpId;
16533 : }
16534 :
16535 : /*
16536 : * dumpSecLabel
16537 : *
16538 : * This routine is used to dump any security labels associated with the
16539 : * object handed to this routine. The routine takes the object type
16540 : * and object name (ready to print, except for schema decoration), plus
16541 : * the namespace and owner of the object (for labeling the ArchiveEntry),
16542 : * plus catalog ID and subid which are the lookup key for pg_seclabel,
16543 : * plus the dump ID for the object (for setting a dependency).
16544 : * If a matching pg_seclabel entry is found, it is dumped.
16545 : *
16546 : * Note: although this routine takes a dumpId for dependency purposes,
16547 : * that purpose is just to mark the dependency in the emitted dump file
16548 : * for possible future use by pg_restore. We do NOT use it for determining
16549 : * ordering of the label in the dump file, because this routine is called
16550 : * after dependency sorting occurs. This routine should be called just after
16551 : * calling ArchiveEntry() for the specified object.
16552 : */
16553 : static void
16554 20 : dumpSecLabel(Archive *fout, const char *type, const char *name,
16555 : const char *namespace, const char *owner,
16556 : CatalogId catalogId, int subid, DumpId dumpId)
16557 : {
16558 20 : DumpOptions *dopt = fout->dopt;
16559 : SecLabelItem *labels;
16560 : int nlabels;
16561 : int i;
16562 : PQExpBuffer query;
16563 :
16564 : /* do nothing, if --no-security-labels is supplied */
16565 20 : if (dopt->no_security_labels)
16566 0 : return;
16567 :
16568 : /*
16569 : * Security labels are schema not data ... except large object labels are
16570 : * data
16571 : */
16572 20 : if (strcmp(type, "LARGE OBJECT") != 0)
16573 : {
16574 0 : if (!dopt->dumpSchema)
16575 0 : return;
16576 : }
16577 : else
16578 : {
16579 : /* We do dump large object security labels in binary-upgrade mode */
16580 20 : if (!dopt->dumpData && !dopt->binary_upgrade)
16581 0 : return;
16582 : }
16583 :
16584 : /* Search for security labels associated with catalogId, using table */
16585 20 : nlabels = findSecLabels(catalogId.tableoid, catalogId.oid, &labels);
16586 :
16587 20 : query = createPQExpBuffer();
16588 :
16589 30 : for (i = 0; i < nlabels; i++)
16590 : {
16591 : /*
16592 : * Ignore label entries for which the subid doesn't match.
16593 : */
16594 10 : if (labels[i].objsubid != subid)
16595 0 : continue;
16596 :
16597 10 : appendPQExpBuffer(query,
16598 : "SECURITY LABEL FOR %s ON %s ",
16599 10 : fmtId(labels[i].provider), type);
16600 10 : if (namespace && *namespace)
16601 0 : appendPQExpBuffer(query, "%s.", fmtId(namespace));
16602 10 : appendPQExpBuffer(query, "%s IS ", name);
16603 10 : appendStringLiteralAH(query, labels[i].label, fout);
16604 10 : appendPQExpBufferStr(query, ";\n");
16605 : }
16606 :
16607 20 : if (query->len > 0)
16608 : {
16609 10 : PQExpBuffer tag = createPQExpBuffer();
16610 :
16611 10 : appendPQExpBuffer(tag, "%s %s", type, name);
16612 10 : ArchiveEntry(fout, nilCatalogId, createDumpId(),
16613 10 : ARCHIVE_OPTS(.tag = tag->data,
16614 : .namespace = namespace,
16615 : .owner = owner,
16616 : .description = "SECURITY LABEL",
16617 : .section = SECTION_NONE,
16618 : .createStmt = query->data,
16619 : .deps = &dumpId,
16620 : .nDeps = 1));
16621 10 : destroyPQExpBuffer(tag);
16622 : }
16623 :
16624 20 : destroyPQExpBuffer(query);
16625 : }
16626 :
16627 : /*
16628 : * dumpTableSecLabel
16629 : *
16630 : * As above, but dump security label for both the specified table (or view)
16631 : * and its columns.
16632 : */
16633 : static void
16634 0 : dumpTableSecLabel(Archive *fout, const TableInfo *tbinfo, const char *reltypename)
16635 : {
16636 0 : DumpOptions *dopt = fout->dopt;
16637 : SecLabelItem *labels;
16638 : int nlabels;
16639 : int i;
16640 : PQExpBuffer query;
16641 : PQExpBuffer target;
16642 :
16643 : /* do nothing, if --no-security-labels is supplied */
16644 0 : if (dopt->no_security_labels)
16645 0 : return;
16646 :
16647 : /* SecLabel are SCHEMA not data */
16648 0 : if (!dopt->dumpSchema)
16649 0 : return;
16650 :
16651 : /* Search for comments associated with relation, using table */
16652 0 : nlabels = findSecLabels(tbinfo->dobj.catId.tableoid,
16653 0 : tbinfo->dobj.catId.oid,
16654 : &labels);
16655 :
16656 : /* If security labels exist, build SECURITY LABEL statements */
16657 0 : if (nlabels <= 0)
16658 0 : return;
16659 :
16660 0 : query = createPQExpBuffer();
16661 0 : target = createPQExpBuffer();
16662 :
16663 0 : for (i = 0; i < nlabels; i++)
16664 : {
16665 : const char *colname;
16666 0 : const char *provider = labels[i].provider;
16667 0 : const char *label = labels[i].label;
16668 0 : int objsubid = labels[i].objsubid;
16669 :
16670 0 : resetPQExpBuffer(target);
16671 0 : if (objsubid == 0)
16672 : {
16673 0 : appendPQExpBuffer(target, "%s %s", reltypename,
16674 0 : fmtQualifiedDumpable(tbinfo));
16675 : }
16676 : else
16677 : {
16678 0 : colname = getAttrName(objsubid, tbinfo);
16679 : /* first fmtXXX result must be consumed before calling again */
16680 0 : appendPQExpBuffer(target, "COLUMN %s",
16681 0 : fmtQualifiedDumpable(tbinfo));
16682 0 : appendPQExpBuffer(target, ".%s", fmtId(colname));
16683 : }
16684 0 : appendPQExpBuffer(query, "SECURITY LABEL FOR %s ON %s IS ",
16685 : fmtId(provider), target->data);
16686 0 : appendStringLiteralAH(query, label, fout);
16687 0 : appendPQExpBufferStr(query, ";\n");
16688 : }
16689 0 : if (query->len > 0)
16690 : {
16691 0 : resetPQExpBuffer(target);
16692 0 : appendPQExpBuffer(target, "%s %s", reltypename,
16693 0 : fmtId(tbinfo->dobj.name));
16694 0 : ArchiveEntry(fout, nilCatalogId, createDumpId(),
16695 0 : ARCHIVE_OPTS(.tag = target->data,
16696 : .namespace = tbinfo->dobj.namespace->dobj.name,
16697 : .owner = tbinfo->rolname,
16698 : .description = "SECURITY LABEL",
16699 : .section = SECTION_NONE,
16700 : .createStmt = query->data,
16701 : .deps = &(tbinfo->dobj.dumpId),
16702 : .nDeps = 1));
16703 : }
16704 0 : destroyPQExpBuffer(query);
16705 0 : destroyPQExpBuffer(target);
16706 : }
16707 :
16708 : /*
16709 : * findSecLabels
16710 : *
16711 : * Find the security label(s), if any, associated with the given object.
16712 : * All the objsubid values associated with the given classoid/objoid are
16713 : * found with one search.
16714 : */
16715 : static int
16716 20 : findSecLabels(Oid classoid, Oid objoid, SecLabelItem **items)
16717 : {
16718 20 : SecLabelItem *middle = NULL;
16719 : SecLabelItem *low;
16720 : SecLabelItem *high;
16721 : int nmatch;
16722 :
16723 20 : if (nseclabels <= 0) /* no labels, so no match is possible */
16724 : {
16725 0 : *items = NULL;
16726 0 : return 0;
16727 : }
16728 :
16729 : /*
16730 : * Do binary search to find some item matching the object.
16731 : */
16732 20 : low = &seclabels[0];
16733 20 : high = &seclabels[nseclabels - 1];
16734 30 : while (low <= high)
16735 : {
16736 20 : middle = low + (high - low) / 2;
16737 :
16738 20 : if (classoid < middle->classoid)
16739 0 : high = middle - 1;
16740 20 : else if (classoid > middle->classoid)
16741 0 : low = middle + 1;
16742 20 : else if (objoid < middle->objoid)
16743 10 : high = middle - 1;
16744 10 : else if (objoid > middle->objoid)
16745 0 : low = middle + 1;
16746 : else
16747 10 : break; /* found a match */
16748 : }
16749 :
16750 20 : if (low > high) /* no matches */
16751 : {
16752 10 : *items = NULL;
16753 10 : return 0;
16754 : }
16755 :
16756 : /*
16757 : * Now determine how many items match the object. The search loop
16758 : * invariant still holds: only items between low and high inclusive could
16759 : * match.
16760 : */
16761 10 : nmatch = 1;
16762 10 : while (middle > low)
16763 : {
16764 0 : if (classoid != middle[-1].classoid ||
16765 0 : objoid != middle[-1].objoid)
16766 : break;
16767 0 : middle--;
16768 0 : nmatch++;
16769 : }
16770 :
16771 10 : *items = middle;
16772 :
16773 10 : middle += nmatch;
16774 10 : while (middle <= high)
16775 : {
16776 0 : if (classoid != middle->classoid ||
16777 0 : objoid != middle->objoid)
16778 : break;
16779 0 : middle++;
16780 0 : nmatch++;
16781 : }
16782 :
16783 10 : return nmatch;
16784 : }
16785 :
16786 : /*
16787 : * collectSecLabels
16788 : *
16789 : * Construct a table of all security labels available for database objects;
16790 : * also set the has-seclabel component flag for each relevant object.
16791 : *
16792 : * The table is sorted by classoid/objid/objsubid for speed in lookup.
16793 : */
16794 : static void
16795 376 : collectSecLabels(Archive *fout)
16796 : {
16797 : PGresult *res;
16798 : PQExpBuffer query;
16799 : int i_label;
16800 : int i_provider;
16801 : int i_classoid;
16802 : int i_objoid;
16803 : int i_objsubid;
16804 : int ntups;
16805 : int i;
16806 : DumpableObject *dobj;
16807 :
16808 376 : query = createPQExpBuffer();
16809 :
16810 376 : appendPQExpBufferStr(query,
16811 : "SELECT label, provider, classoid, objoid, objsubid "
16812 : "FROM pg_catalog.pg_seclabels "
16813 : "ORDER BY classoid, objoid, objsubid");
16814 :
16815 376 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
16816 :
16817 : /* Construct lookup table containing OIDs in numeric form */
16818 376 : i_label = PQfnumber(res, "label");
16819 376 : i_provider = PQfnumber(res, "provider");
16820 376 : i_classoid = PQfnumber(res, "classoid");
16821 376 : i_objoid = PQfnumber(res, "objoid");
16822 376 : i_objsubid = PQfnumber(res, "objsubid");
16823 :
16824 376 : ntups = PQntuples(res);
16825 :
16826 376 : seclabels = (SecLabelItem *) pg_malloc(ntups * sizeof(SecLabelItem));
16827 376 : nseclabels = 0;
16828 376 : dobj = NULL;
16829 :
16830 386 : for (i = 0; i < ntups; i++)
16831 : {
16832 : CatalogId objId;
16833 : int subid;
16834 :
16835 10 : objId.tableoid = atooid(PQgetvalue(res, i, i_classoid));
16836 10 : objId.oid = atooid(PQgetvalue(res, i, i_objoid));
16837 10 : subid = atoi(PQgetvalue(res, i, i_objsubid));
16838 :
16839 : /* We needn't remember labels that don't match any dumpable object */
16840 10 : if (dobj == NULL ||
16841 0 : dobj->catId.tableoid != objId.tableoid ||
16842 0 : dobj->catId.oid != objId.oid)
16843 10 : dobj = findObjectByCatalogId(objId);
16844 10 : if (dobj == NULL)
16845 0 : continue;
16846 :
16847 : /*
16848 : * Labels on columns of composite types are linked to the type's
16849 : * pg_class entry, but we need to set the DUMP_COMPONENT_SECLABEL flag
16850 : * in the type's own DumpableObject.
16851 : */
16852 10 : if (subid != 0 && dobj->objType == DO_TABLE &&
16853 0 : ((TableInfo *) dobj)->relkind == RELKIND_COMPOSITE_TYPE)
16854 0 : {
16855 : TypeInfo *cTypeInfo;
16856 :
16857 0 : cTypeInfo = findTypeByOid(((TableInfo *) dobj)->reltype);
16858 0 : if (cTypeInfo)
16859 0 : cTypeInfo->dobj.components |= DUMP_COMPONENT_SECLABEL;
16860 : }
16861 : else
16862 10 : dobj->components |= DUMP_COMPONENT_SECLABEL;
16863 :
16864 10 : seclabels[nseclabels].label = pg_strdup(PQgetvalue(res, i, i_label));
16865 10 : seclabels[nseclabels].provider = pg_strdup(PQgetvalue(res, i, i_provider));
16866 10 : seclabels[nseclabels].classoid = objId.tableoid;
16867 10 : seclabels[nseclabels].objoid = objId.oid;
16868 10 : seclabels[nseclabels].objsubid = subid;
16869 10 : nseclabels++;
16870 : }
16871 :
16872 376 : PQclear(res);
16873 376 : destroyPQExpBuffer(query);
16874 376 : }
16875 :
16876 : /*
16877 : * dumpTable
16878 : * write out to fout the declarations (not data) of a user-defined table
16879 : */
16880 : static void
16881 62728 : dumpTable(Archive *fout, const TableInfo *tbinfo)
16882 : {
16883 62728 : DumpOptions *dopt = fout->dopt;
16884 62728 : DumpId tableAclDumpId = InvalidDumpId;
16885 : char *namecopy;
16886 :
16887 : /* Do nothing if not dumping schema */
16888 62728 : if (!dopt->dumpSchema)
16889 3028 : return;
16890 :
16891 59700 : if (tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
16892 : {
16893 13258 : if (tbinfo->relkind == RELKIND_SEQUENCE)
16894 750 : dumpSequence(fout, tbinfo);
16895 : else
16896 12508 : dumpTableSchema(fout, tbinfo);
16897 : }
16898 :
16899 : /* Handle the ACL here */
16900 59700 : namecopy = pg_strdup(fmtId(tbinfo->dobj.name));
16901 59700 : if (tbinfo->dobj.dump & DUMP_COMPONENT_ACL)
16902 : {
16903 47884 : const char *objtype =
16904 47884 : (tbinfo->relkind == RELKIND_SEQUENCE) ? "SEQUENCE" : "TABLE";
16905 :
16906 : tableAclDumpId =
16907 47884 : dumpACL(fout, tbinfo->dobj.dumpId, InvalidDumpId,
16908 : objtype, namecopy, NULL,
16909 47884 : tbinfo->dobj.namespace->dobj.name,
16910 47884 : NULL, tbinfo->rolname, &tbinfo->dacl);
16911 : }
16912 :
16913 : /*
16914 : * Handle column ACLs, if any. Note: we pull these with a separate query
16915 : * rather than trying to fetch them during getTableAttrs, so that we won't
16916 : * miss ACLs on system columns. Doing it this way also allows us to dump
16917 : * ACLs for catalogs that we didn't mark "interesting" back in getTables.
16918 : */
16919 59700 : if ((tbinfo->dobj.dump & DUMP_COMPONENT_ACL) && tbinfo->hascolumnACLs)
16920 : {
16921 566 : PQExpBuffer query = createPQExpBuffer();
16922 : PGresult *res;
16923 : int i;
16924 :
16925 566 : if (!fout->is_prepared[PREPQUERY_GETCOLUMNACLS])
16926 : {
16927 : /* Set up query for column ACLs */
16928 324 : appendPQExpBufferStr(query,
16929 : "PREPARE getColumnACLs(pg_catalog.oid) AS\n");
16930 :
16931 324 : if (fout->remoteVersion >= 90600)
16932 : {
16933 : /*
16934 : * In principle we should call acldefault('c', relowner) to
16935 : * get the default ACL for a column. However, we don't
16936 : * currently store the numeric OID of the relowner in
16937 : * TableInfo. We could convert the owner name using regrole,
16938 : * but that creates a risk of failure due to concurrent role
16939 : * renames. Given that the default ACL for columns is empty
16940 : * and is likely to stay that way, it's not worth extra cycles
16941 : * and risk to avoid hard-wiring that knowledge here.
16942 : */
16943 324 : appendPQExpBufferStr(query,
16944 : "SELECT at.attname, "
16945 : "at.attacl, "
16946 : "'{}' AS acldefault, "
16947 : "pip.privtype, pip.initprivs "
16948 : "FROM pg_catalog.pg_attribute at "
16949 : "LEFT JOIN pg_catalog.pg_init_privs pip ON "
16950 : "(at.attrelid = pip.objoid "
16951 : "AND pip.classoid = 'pg_catalog.pg_class'::pg_catalog.regclass "
16952 : "AND at.attnum = pip.objsubid) "
16953 : "WHERE at.attrelid = $1 AND "
16954 : "NOT at.attisdropped "
16955 : "AND (at.attacl IS NOT NULL OR pip.initprivs IS NOT NULL) "
16956 : "ORDER BY at.attnum");
16957 : }
16958 : else
16959 : {
16960 0 : appendPQExpBufferStr(query,
16961 : "SELECT attname, attacl, '{}' AS acldefault, "
16962 : "NULL AS privtype, NULL AS initprivs "
16963 : "FROM pg_catalog.pg_attribute "
16964 : "WHERE attrelid = $1 AND NOT attisdropped "
16965 : "AND attacl IS NOT NULL "
16966 : "ORDER BY attnum");
16967 : }
16968 :
16969 324 : ExecuteSqlStatement(fout, query->data);
16970 :
16971 324 : fout->is_prepared[PREPQUERY_GETCOLUMNACLS] = true;
16972 : }
16973 :
16974 566 : printfPQExpBuffer(query,
16975 : "EXECUTE getColumnACLs('%u')",
16976 566 : tbinfo->dobj.catId.oid);
16977 :
16978 566 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
16979 :
16980 9020 : for (i = 0; i < PQntuples(res); i++)
16981 : {
16982 8454 : char *attname = PQgetvalue(res, i, 0);
16983 8454 : char *attacl = PQgetvalue(res, i, 1);
16984 8454 : char *acldefault = PQgetvalue(res, i, 2);
16985 8454 : char privtype = *(PQgetvalue(res, i, 3));
16986 8454 : char *initprivs = PQgetvalue(res, i, 4);
16987 : DumpableAcl coldacl;
16988 : char *attnamecopy;
16989 :
16990 8454 : coldacl.acl = attacl;
16991 8454 : coldacl.acldefault = acldefault;
16992 8454 : coldacl.privtype = privtype;
16993 8454 : coldacl.initprivs = initprivs;
16994 8454 : attnamecopy = pg_strdup(fmtId(attname));
16995 :
16996 : /*
16997 : * Column's GRANT type is always TABLE. Each column ACL depends
16998 : * on the table-level ACL, since we can restore column ACLs in
16999 : * parallel but the table-level ACL has to be done first.
17000 : */
17001 8454 : dumpACL(fout, tbinfo->dobj.dumpId, tableAclDumpId,
17002 : "TABLE", namecopy, attnamecopy,
17003 8454 : tbinfo->dobj.namespace->dobj.name,
17004 8454 : NULL, tbinfo->rolname, &coldacl);
17005 8454 : free(attnamecopy);
17006 : }
17007 566 : PQclear(res);
17008 566 : destroyPQExpBuffer(query);
17009 : }
17010 :
17011 59700 : free(namecopy);
17012 : }
17013 :
17014 : /*
17015 : * Create the AS clause for a view or materialized view. The semicolon is
17016 : * stripped because a materialized view must add a WITH NO DATA clause.
17017 : *
17018 : * This returns a new buffer which must be freed by the caller.
17019 : */
17020 : static PQExpBuffer
17021 1736 : createViewAsClause(Archive *fout, const TableInfo *tbinfo)
17022 : {
17023 1736 : PQExpBuffer query = createPQExpBuffer();
17024 1736 : PQExpBuffer result = createPQExpBuffer();
17025 : PGresult *res;
17026 : int len;
17027 :
17028 : /* Fetch the view definition */
17029 1736 : appendPQExpBuffer(query,
17030 : "SELECT pg_catalog.pg_get_viewdef('%u'::pg_catalog.oid) AS viewdef",
17031 1736 : tbinfo->dobj.catId.oid);
17032 :
17033 1736 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
17034 :
17035 1736 : if (PQntuples(res) != 1)
17036 : {
17037 0 : if (PQntuples(res) < 1)
17038 0 : pg_fatal("query to obtain definition of view \"%s\" returned no data",
17039 : tbinfo->dobj.name);
17040 : else
17041 0 : pg_fatal("query to obtain definition of view \"%s\" returned more than one definition",
17042 : tbinfo->dobj.name);
17043 : }
17044 :
17045 1736 : len = PQgetlength(res, 0, 0);
17046 :
17047 1736 : if (len == 0)
17048 0 : pg_fatal("definition of view \"%s\" appears to be empty (length zero)",
17049 : tbinfo->dobj.name);
17050 :
17051 : /* Strip off the trailing semicolon so that other things may follow. */
17052 : Assert(PQgetvalue(res, 0, 0)[len - 1] == ';');
17053 1736 : appendBinaryPQExpBuffer(result, PQgetvalue(res, 0, 0), len - 1);
17054 :
17055 1736 : PQclear(res);
17056 1736 : destroyPQExpBuffer(query);
17057 :
17058 1736 : return result;
17059 : }
17060 :
17061 : /*
17062 : * Create a dummy AS clause for a view. This is used when the real view
17063 : * definition has to be postponed because of circular dependencies.
17064 : * We must duplicate the view's external properties -- column names and types
17065 : * (including collation) -- so that it works for subsequent references.
17066 : *
17067 : * This returns a new buffer which must be freed by the caller.
17068 : */
17069 : static PQExpBuffer
17070 40 : createDummyViewAsClause(Archive *fout, const TableInfo *tbinfo)
17071 : {
17072 40 : PQExpBuffer result = createPQExpBuffer();
17073 : int j;
17074 :
17075 40 : appendPQExpBufferStr(result, "SELECT");
17076 :
17077 80 : for (j = 0; j < tbinfo->numatts; j++)
17078 : {
17079 40 : if (j > 0)
17080 20 : appendPQExpBufferChar(result, ',');
17081 40 : appendPQExpBufferStr(result, "\n ");
17082 :
17083 40 : appendPQExpBuffer(result, "NULL::%s", tbinfo->atttypnames[j]);
17084 :
17085 : /*
17086 : * Must add collation if not default for the type, because CREATE OR
17087 : * REPLACE VIEW won't change it
17088 : */
17089 40 : if (OidIsValid(tbinfo->attcollation[j]))
17090 : {
17091 : CollInfo *coll;
17092 :
17093 0 : coll = findCollationByOid(tbinfo->attcollation[j]);
17094 0 : if (coll)
17095 0 : appendPQExpBuffer(result, " COLLATE %s",
17096 0 : fmtQualifiedDumpable(coll));
17097 : }
17098 :
17099 40 : appendPQExpBuffer(result, " AS %s", fmtId(tbinfo->attnames[j]));
17100 : }
17101 :
17102 40 : return result;
17103 : }
17104 :
17105 : /*
17106 : * dumpTableSchema
17107 : * write the declaration (not data) of one user-defined table or view
17108 : */
17109 : static void
17110 12508 : dumpTableSchema(Archive *fout, const TableInfo *tbinfo)
17111 : {
17112 12508 : DumpOptions *dopt = fout->dopt;
17113 12508 : PQExpBuffer q = createPQExpBuffer();
17114 12508 : PQExpBuffer delq = createPQExpBuffer();
17115 12508 : PQExpBuffer extra = createPQExpBuffer();
17116 : char *qrelname;
17117 : char *qualrelname;
17118 : int numParents;
17119 : TableInfo **parents;
17120 : int actual_atts; /* number of attrs in this CREATE statement */
17121 : const char *reltypename;
17122 : char *storage;
17123 : int j,
17124 : k;
17125 :
17126 : /* We had better have loaded per-column details about this table */
17127 : Assert(tbinfo->interesting);
17128 :
17129 12508 : qrelname = pg_strdup(fmtId(tbinfo->dobj.name));
17130 12508 : qualrelname = pg_strdup(fmtQualifiedDumpable(tbinfo));
17131 :
17132 12508 : if (tbinfo->hasoids)
17133 0 : pg_log_warning("WITH OIDS is not supported anymore (table \"%s\")",
17134 : qrelname);
17135 :
17136 12508 : if (dopt->binary_upgrade)
17137 1744 : binary_upgrade_set_type_oids_by_rel(fout, q, tbinfo);
17138 :
17139 : /* Is it a table or a view? */
17140 12508 : if (tbinfo->relkind == RELKIND_VIEW)
17141 : {
17142 : PQExpBuffer result;
17143 :
17144 : /*
17145 : * Note: keep this code in sync with the is_view case in dumpRule()
17146 : */
17147 :
17148 1066 : reltypename = "VIEW";
17149 :
17150 1066 : appendPQExpBuffer(delq, "DROP VIEW %s;\n", qualrelname);
17151 :
17152 1066 : if (dopt->binary_upgrade)
17153 104 : binary_upgrade_set_pg_class_oids(fout, q,
17154 104 : tbinfo->dobj.catId.oid);
17155 :
17156 1066 : appendPQExpBuffer(q, "CREATE VIEW %s", qualrelname);
17157 :
17158 1066 : if (tbinfo->dummy_view)
17159 20 : result = createDummyViewAsClause(fout, tbinfo);
17160 : else
17161 : {
17162 1046 : if (nonemptyReloptions(tbinfo->reloptions))
17163 : {
17164 122 : appendPQExpBufferStr(q, " WITH (");
17165 122 : appendReloptionsArrayAH(q, tbinfo->reloptions, "", fout);
17166 122 : appendPQExpBufferChar(q, ')');
17167 : }
17168 1046 : result = createViewAsClause(fout, tbinfo);
17169 : }
17170 1066 : appendPQExpBuffer(q, " AS\n%s", result->data);
17171 1066 : destroyPQExpBuffer(result);
17172 :
17173 1066 : if (tbinfo->checkoption != NULL && !tbinfo->dummy_view)
17174 64 : appendPQExpBuffer(q, "\n WITH %s CHECK OPTION", tbinfo->checkoption);
17175 1066 : appendPQExpBufferStr(q, ";\n");
17176 : }
17177 : else
17178 : {
17179 11442 : char *partkeydef = NULL;
17180 11442 : char *ftoptions = NULL;
17181 11442 : char *srvname = NULL;
17182 11442 : const char *foreign = "";
17183 :
17184 : /*
17185 : * Set reltypename, and collect any relkind-specific data that we
17186 : * didn't fetch during getTables().
17187 : */
17188 11442 : switch (tbinfo->relkind)
17189 : {
17190 1152 : case RELKIND_PARTITIONED_TABLE:
17191 : {
17192 1152 : PQExpBuffer query = createPQExpBuffer();
17193 : PGresult *res;
17194 :
17195 1152 : reltypename = "TABLE";
17196 :
17197 : /* retrieve partition key definition */
17198 1152 : appendPQExpBuffer(query,
17199 : "SELECT pg_get_partkeydef('%u')",
17200 1152 : tbinfo->dobj.catId.oid);
17201 1152 : res = ExecuteSqlQueryForSingleRow(fout, query->data);
17202 1152 : partkeydef = pg_strdup(PQgetvalue(res, 0, 0));
17203 1152 : PQclear(res);
17204 1152 : destroyPQExpBuffer(query);
17205 1152 : break;
17206 : }
17207 68 : case RELKIND_FOREIGN_TABLE:
17208 : {
17209 68 : PQExpBuffer query = createPQExpBuffer();
17210 : PGresult *res;
17211 : int i_srvname;
17212 : int i_ftoptions;
17213 :
17214 68 : reltypename = "FOREIGN TABLE";
17215 :
17216 : /* retrieve name of foreign server and generic options */
17217 68 : appendPQExpBuffer(query,
17218 : "SELECT fs.srvname, "
17219 : "pg_catalog.array_to_string(ARRAY("
17220 : "SELECT pg_catalog.quote_ident(option_name) || "
17221 : "' ' || pg_catalog.quote_literal(option_value) "
17222 : "FROM pg_catalog.pg_options_to_table(ftoptions) "
17223 : "ORDER BY option_name"
17224 : "), E',\n ') AS ftoptions "
17225 : "FROM pg_catalog.pg_foreign_table ft "
17226 : "JOIN pg_catalog.pg_foreign_server fs "
17227 : "ON (fs.oid = ft.ftserver) "
17228 : "WHERE ft.ftrelid = '%u'",
17229 68 : tbinfo->dobj.catId.oid);
17230 68 : res = ExecuteSqlQueryForSingleRow(fout, query->data);
17231 68 : i_srvname = PQfnumber(res, "srvname");
17232 68 : i_ftoptions = PQfnumber(res, "ftoptions");
17233 68 : srvname = pg_strdup(PQgetvalue(res, 0, i_srvname));
17234 68 : ftoptions = pg_strdup(PQgetvalue(res, 0, i_ftoptions));
17235 68 : PQclear(res);
17236 68 : destroyPQExpBuffer(query);
17237 :
17238 68 : foreign = "FOREIGN ";
17239 68 : break;
17240 : }
17241 670 : case RELKIND_MATVIEW:
17242 670 : reltypename = "MATERIALIZED VIEW";
17243 670 : break;
17244 9552 : default:
17245 9552 : reltypename = "TABLE";
17246 9552 : break;
17247 : }
17248 :
17249 11442 : numParents = tbinfo->numParents;
17250 11442 : parents = tbinfo->parents;
17251 :
17252 11442 : appendPQExpBuffer(delq, "DROP %s %s;\n", reltypename, qualrelname);
17253 :
17254 11442 : if (dopt->binary_upgrade)
17255 1640 : binary_upgrade_set_pg_class_oids(fout, q,
17256 1640 : tbinfo->dobj.catId.oid);
17257 :
17258 : /*
17259 : * PostgreSQL 18 has disabled UNLOGGED for partitioned tables, so
17260 : * ignore it when dumping if it was set in this case.
17261 : */
17262 11442 : appendPQExpBuffer(q, "CREATE %s%s %s",
17263 11442 : (tbinfo->relpersistence == RELPERSISTENCE_UNLOGGED &&
17264 40 : tbinfo->relkind != RELKIND_PARTITIONED_TABLE) ?
17265 : "UNLOGGED " : "",
17266 : reltypename,
17267 : qualrelname);
17268 :
17269 : /*
17270 : * Attach to type, if reloftype; except in case of a binary upgrade,
17271 : * we dump the table normally and attach it to the type afterward.
17272 : */
17273 11442 : if (OidIsValid(tbinfo->reloftype) && !dopt->binary_upgrade)
17274 48 : appendPQExpBuffer(q, " OF %s",
17275 48 : getFormattedTypeName(fout, tbinfo->reloftype,
17276 : zeroIsError));
17277 :
17278 11442 : if (tbinfo->relkind != RELKIND_MATVIEW)
17279 : {
17280 : /* Dump the attributes */
17281 10772 : actual_atts = 0;
17282 50548 : for (j = 0; j < tbinfo->numatts; j++)
17283 : {
17284 : /*
17285 : * Normally, dump if it's locally defined in this table, and
17286 : * not dropped. But for binary upgrade, we'll dump all the
17287 : * columns, and then fix up the dropped and nonlocal cases
17288 : * below.
17289 : */
17290 39776 : if (shouldPrintColumn(dopt, tbinfo, j))
17291 : {
17292 : bool print_default;
17293 : bool print_notnull;
17294 :
17295 : /*
17296 : * Default value --- suppress if to be printed separately
17297 : * or not at all.
17298 : */
17299 77706 : print_default = (tbinfo->attrdefs[j] != NULL &&
17300 39802 : tbinfo->attrdefs[j]->dobj.dump &&
17301 1992 : !tbinfo->attrdefs[j]->separate);
17302 :
17303 : /*
17304 : * Not Null constraint --- print it if it is locally
17305 : * defined, or if binary upgrade. (In the latter case, we
17306 : * reset conislocal below.)
17307 : */
17308 42272 : print_notnull = (tbinfo->notnull_constrs[j] != NULL &&
17309 4462 : (tbinfo->notnull_islocal[j] ||
17310 1234 : dopt->binary_upgrade ||
17311 1062 : tbinfo->ispartition));
17312 :
17313 : /*
17314 : * Skip column if fully defined by reloftype, except in
17315 : * binary upgrade
17316 : */
17317 37810 : if (OidIsValid(tbinfo->reloftype) &&
17318 100 : !print_default && !print_notnull &&
17319 60 : !dopt->binary_upgrade)
17320 48 : continue;
17321 :
17322 : /* Format properly if not first attr */
17323 37762 : if (actual_atts == 0)
17324 10120 : appendPQExpBufferStr(q, " (");
17325 : else
17326 27642 : appendPQExpBufferChar(q, ',');
17327 37762 : appendPQExpBufferStr(q, "\n ");
17328 37762 : actual_atts++;
17329 :
17330 : /* Attribute name */
17331 37762 : appendPQExpBufferStr(q, fmtId(tbinfo->attnames[j]));
17332 :
17333 37762 : if (tbinfo->attisdropped[j])
17334 : {
17335 : /*
17336 : * ALTER TABLE DROP COLUMN clears
17337 : * pg_attribute.atttypid, so we will not have gotten a
17338 : * valid type name; insert INTEGER as a stopgap. We'll
17339 : * clean things up later.
17340 : */
17341 168 : appendPQExpBufferStr(q, " INTEGER /* dummy */");
17342 : /* and skip to the next column */
17343 168 : continue;
17344 : }
17345 :
17346 : /*
17347 : * Attribute type; print it except when creating a typed
17348 : * table ('OF type_name'), but in binary-upgrade mode,
17349 : * print it in that case too.
17350 : */
17351 37594 : if (dopt->binary_upgrade || !OidIsValid(tbinfo->reloftype))
17352 : {
17353 37562 : appendPQExpBuffer(q, " %s",
17354 37562 : tbinfo->atttypnames[j]);
17355 : }
17356 :
17357 37594 : if (print_default)
17358 : {
17359 1732 : if (tbinfo->attgenerated[j] == ATTRIBUTE_GENERATED_STORED)
17360 554 : appendPQExpBuffer(q, " GENERATED ALWAYS AS (%s) STORED",
17361 554 : tbinfo->attrdefs[j]->adef_expr);
17362 1178 : else if (tbinfo->attgenerated[j] == ATTRIBUTE_GENERATED_VIRTUAL)
17363 446 : appendPQExpBuffer(q, " GENERATED ALWAYS AS (%s)",
17364 446 : tbinfo->attrdefs[j]->adef_expr);
17365 : else
17366 732 : appendPQExpBuffer(q, " DEFAULT %s",
17367 732 : tbinfo->attrdefs[j]->adef_expr);
17368 : }
17369 :
17370 37594 : if (print_notnull)
17371 : {
17372 4400 : if (tbinfo->notnull_constrs[j][0] == '\0')
17373 3112 : appendPQExpBufferStr(q, " NOT NULL");
17374 : else
17375 1288 : appendPQExpBuffer(q, " CONSTRAINT %s NOT NULL",
17376 1288 : fmtId(tbinfo->notnull_constrs[j]));
17377 :
17378 4400 : if (tbinfo->notnull_noinh[j])
17379 0 : appendPQExpBufferStr(q, " NO INHERIT");
17380 : }
17381 :
17382 : /* Add collation if not default for the type */
17383 37594 : if (OidIsValid(tbinfo->attcollation[j]))
17384 : {
17385 : CollInfo *coll;
17386 :
17387 394 : coll = findCollationByOid(tbinfo->attcollation[j]);
17388 394 : if (coll)
17389 394 : appendPQExpBuffer(q, " COLLATE %s",
17390 394 : fmtQualifiedDumpable(coll));
17391 : }
17392 : }
17393 :
17394 : /*
17395 : * On the other hand, if we choose not to print a column
17396 : * (likely because it is created by inheritance), but the
17397 : * column has a locally-defined not-null constraint, we need
17398 : * to dump the constraint as a standalone object.
17399 : *
17400 : * This syntax isn't SQL-conforming, but if you wanted
17401 : * standard output you wouldn't be creating non-standard
17402 : * objects to begin with.
17403 : */
17404 39560 : if (!shouldPrintColumn(dopt, tbinfo, j) &&
17405 1966 : !tbinfo->attisdropped[j] &&
17406 1240 : tbinfo->notnull_constrs[j] != NULL &&
17407 354 : tbinfo->notnull_islocal[j])
17408 : {
17409 : /* Format properly if not first attr */
17410 118 : if (actual_atts == 0)
17411 110 : appendPQExpBufferStr(q, " (");
17412 : else
17413 8 : appendPQExpBufferChar(q, ',');
17414 118 : appendPQExpBufferStr(q, "\n ");
17415 118 : actual_atts++;
17416 :
17417 118 : if (tbinfo->notnull_constrs[j][0] == '\0')
17418 16 : appendPQExpBuffer(q, "NOT NULL %s",
17419 16 : fmtId(tbinfo->attnames[j]));
17420 : else
17421 204 : appendPQExpBuffer(q, "CONSTRAINT %s NOT NULL %s",
17422 102 : tbinfo->notnull_constrs[j],
17423 102 : fmtId(tbinfo->attnames[j]));
17424 : }
17425 : }
17426 :
17427 : /*
17428 : * Add non-inherited CHECK constraints, if any.
17429 : *
17430 : * For partitions, we need to include check constraints even if
17431 : * they're not defined locally, because the ALTER TABLE ATTACH
17432 : * PARTITION that we'll emit later expects the constraint to be
17433 : * there. (No need to fix conislocal: ATTACH PARTITION does that)
17434 : */
17435 11918 : for (j = 0; j < tbinfo->ncheck; j++)
17436 : {
17437 1146 : ConstraintInfo *constr = &(tbinfo->checkexprs[j]);
17438 :
17439 1146 : if (constr->separate ||
17440 1006 : (!constr->conislocal && !tbinfo->ispartition))
17441 214 : continue;
17442 :
17443 932 : if (actual_atts == 0)
17444 32 : appendPQExpBufferStr(q, " (\n ");
17445 : else
17446 900 : appendPQExpBufferStr(q, ",\n ");
17447 :
17448 932 : appendPQExpBuffer(q, "CONSTRAINT %s ",
17449 932 : fmtId(constr->dobj.name));
17450 932 : appendPQExpBufferStr(q, constr->condef);
17451 :
17452 932 : actual_atts++;
17453 : }
17454 :
17455 10772 : if (actual_atts)
17456 10262 : appendPQExpBufferStr(q, "\n)");
17457 510 : else if (!(OidIsValid(tbinfo->reloftype) && !dopt->binary_upgrade))
17458 : {
17459 : /*
17460 : * No attributes? we must have a parenthesized attribute list,
17461 : * even though empty, when not using the OF TYPE syntax.
17462 : */
17463 486 : appendPQExpBufferStr(q, " (\n)");
17464 : }
17465 :
17466 : /*
17467 : * Emit the INHERITS clause (not for partitions), except in
17468 : * binary-upgrade mode.
17469 : */
17470 10772 : if (numParents > 0 && !tbinfo->ispartition &&
17471 978 : !dopt->binary_upgrade)
17472 : {
17473 850 : appendPQExpBufferStr(q, "\nINHERITS (");
17474 1842 : for (k = 0; k < numParents; k++)
17475 : {
17476 992 : TableInfo *parentRel = parents[k];
17477 :
17478 992 : if (k > 0)
17479 142 : appendPQExpBufferStr(q, ", ");
17480 992 : appendPQExpBufferStr(q, fmtQualifiedDumpable(parentRel));
17481 : }
17482 850 : appendPQExpBufferChar(q, ')');
17483 : }
17484 :
17485 10772 : if (tbinfo->relkind == RELKIND_PARTITIONED_TABLE)
17486 1152 : appendPQExpBuffer(q, "\nPARTITION BY %s", partkeydef);
17487 :
17488 10772 : if (tbinfo->relkind == RELKIND_FOREIGN_TABLE)
17489 68 : appendPQExpBuffer(q, "\nSERVER %s", fmtId(srvname));
17490 : }
17491 :
17492 22578 : if (nonemptyReloptions(tbinfo->reloptions) ||
17493 11136 : nonemptyReloptions(tbinfo->toast_reloptions))
17494 : {
17495 306 : bool addcomma = false;
17496 :
17497 306 : appendPQExpBufferStr(q, "\nWITH (");
17498 306 : if (nonemptyReloptions(tbinfo->reloptions))
17499 : {
17500 306 : addcomma = true;
17501 306 : appendReloptionsArrayAH(q, tbinfo->reloptions, "", fout);
17502 : }
17503 306 : if (nonemptyReloptions(tbinfo->toast_reloptions))
17504 : {
17505 10 : if (addcomma)
17506 10 : appendPQExpBufferStr(q, ", ");
17507 10 : appendReloptionsArrayAH(q, tbinfo->toast_reloptions, "toast.",
17508 : fout);
17509 : }
17510 306 : appendPQExpBufferChar(q, ')');
17511 : }
17512 :
17513 : /* Dump generic options if any */
17514 11442 : if (ftoptions && ftoptions[0])
17515 64 : appendPQExpBuffer(q, "\nOPTIONS (\n %s\n)", ftoptions);
17516 :
17517 : /*
17518 : * For materialized views, create the AS clause just like a view. At
17519 : * this point, we always mark the view as not populated.
17520 : */
17521 11442 : if (tbinfo->relkind == RELKIND_MATVIEW)
17522 : {
17523 : PQExpBuffer result;
17524 :
17525 670 : result = createViewAsClause(fout, tbinfo);
17526 670 : appendPQExpBuffer(q, " AS\n%s\n WITH NO DATA;\n",
17527 : result->data);
17528 670 : destroyPQExpBuffer(result);
17529 : }
17530 : else
17531 10772 : appendPQExpBufferStr(q, ";\n");
17532 :
17533 : /* Materialized views can depend on extensions */
17534 11442 : if (tbinfo->relkind == RELKIND_MATVIEW)
17535 670 : append_depends_on_extension(fout, q, &tbinfo->dobj,
17536 : "pg_catalog.pg_class",
17537 : "MATERIALIZED VIEW",
17538 : qualrelname);
17539 :
17540 : /*
17541 : * in binary upgrade mode, update the catalog with any missing values
17542 : * that might be present.
17543 : */
17544 11442 : if (dopt->binary_upgrade)
17545 : {
17546 7936 : for (j = 0; j < tbinfo->numatts; j++)
17547 : {
17548 6296 : if (tbinfo->attmissingval[j][0] != '\0')
17549 : {
17550 4 : appendPQExpBufferStr(q, "\n-- set missing value.\n");
17551 4 : appendPQExpBufferStr(q,
17552 : "SELECT pg_catalog.binary_upgrade_set_missing_value(");
17553 4 : appendStringLiteralAH(q, qualrelname, fout);
17554 4 : appendPQExpBufferStr(q, "::pg_catalog.regclass,");
17555 4 : appendStringLiteralAH(q, tbinfo->attnames[j], fout);
17556 4 : appendPQExpBufferChar(q, ',');
17557 4 : appendStringLiteralAH(q, tbinfo->attmissingval[j], fout);
17558 4 : appendPQExpBufferStr(q, ");\n\n");
17559 : }
17560 : }
17561 : }
17562 :
17563 : /*
17564 : * To create binary-compatible heap files, we have to ensure the same
17565 : * physical column order, including dropped columns, as in the
17566 : * original. Therefore, we create dropped columns above and drop them
17567 : * here, also updating their attlen/attalign values so that the
17568 : * dropped column can be skipped properly. (We do not bother with
17569 : * restoring the original attbyval setting.) Also, inheritance
17570 : * relationships are set up by doing ALTER TABLE INHERIT rather than
17571 : * using an INHERITS clause --- the latter would possibly mess up the
17572 : * column order. That also means we have to take care about setting
17573 : * attislocal correctly, plus fix up any inherited CHECK constraints.
17574 : * Analogously, we set up typed tables using ALTER TABLE / OF here.
17575 : *
17576 : * We process foreign and partitioned tables here, even though they
17577 : * lack heap storage, because they can participate in inheritance
17578 : * relationships and we want this stuff to be consistent across the
17579 : * inheritance tree. We can exclude indexes, toast tables, sequences
17580 : * and matviews, even though they have storage, because we don't
17581 : * support altering or dropping columns in them, nor can they be part
17582 : * of inheritance trees.
17583 : */
17584 11442 : if (dopt->binary_upgrade &&
17585 1640 : (tbinfo->relkind == RELKIND_RELATION ||
17586 222 : tbinfo->relkind == RELKIND_FOREIGN_TABLE ||
17587 220 : tbinfo->relkind == RELKIND_PARTITIONED_TABLE))
17588 : {
17589 : bool firstitem;
17590 : bool firstitem_extra;
17591 :
17592 : /*
17593 : * Drop any dropped columns. Merge the pg_attribute manipulations
17594 : * into a single SQL command, so that we don't cause repeated
17595 : * relcache flushes on the target table. Otherwise we risk O(N^2)
17596 : * relcache bloat while dropping N columns.
17597 : */
17598 1606 : resetPQExpBuffer(extra);
17599 1606 : firstitem = true;
17600 7860 : for (j = 0; j < tbinfo->numatts; j++)
17601 : {
17602 6254 : if (tbinfo->attisdropped[j])
17603 : {
17604 168 : if (firstitem)
17605 : {
17606 76 : appendPQExpBufferStr(q, "\n-- For binary upgrade, recreate dropped columns.\n"
17607 : "UPDATE pg_catalog.pg_attribute\n"
17608 : "SET attlen = v.dlen, "
17609 : "attalign = v.dalign, "
17610 : "attbyval = false\n"
17611 : "FROM (VALUES ");
17612 76 : firstitem = false;
17613 : }
17614 : else
17615 92 : appendPQExpBufferStr(q, ",\n ");
17616 168 : appendPQExpBufferChar(q, '(');
17617 168 : appendStringLiteralAH(q, tbinfo->attnames[j], fout);
17618 168 : appendPQExpBuffer(q, ", %d, '%c')",
17619 168 : tbinfo->attlen[j],
17620 168 : tbinfo->attalign[j]);
17621 : /* The ALTER ... DROP COLUMN commands must come after */
17622 168 : appendPQExpBuffer(extra, "ALTER %sTABLE ONLY %s ",
17623 : foreign, qualrelname);
17624 168 : appendPQExpBuffer(extra, "DROP COLUMN %s;\n",
17625 168 : fmtId(tbinfo->attnames[j]));
17626 : }
17627 : }
17628 1606 : if (!firstitem)
17629 : {
17630 76 : appendPQExpBufferStr(q, ") v(dname, dlen, dalign)\n"
17631 : "WHERE attrelid = ");
17632 76 : appendStringLiteralAH(q, qualrelname, fout);
17633 76 : appendPQExpBufferStr(q, "::pg_catalog.regclass\n"
17634 : " AND attname = v.dname;\n");
17635 : /* Now we can issue the actual DROP COLUMN commands */
17636 76 : appendBinaryPQExpBuffer(q, extra->data, extra->len);
17637 : }
17638 :
17639 : /*
17640 : * Fix up inherited columns. As above, do the pg_attribute
17641 : * manipulations in a single SQL command.
17642 : */
17643 1606 : firstitem = true;
17644 7860 : for (j = 0; j < tbinfo->numatts; j++)
17645 : {
17646 6254 : if (!tbinfo->attisdropped[j] &&
17647 6086 : !tbinfo->attislocal[j])
17648 : {
17649 1212 : if (firstitem)
17650 : {
17651 538 : appendPQExpBufferStr(q, "\n-- For binary upgrade, recreate inherited columns.\n");
17652 538 : appendPQExpBufferStr(q, "UPDATE pg_catalog.pg_attribute\n"
17653 : "SET attislocal = false\n"
17654 : "WHERE attrelid = ");
17655 538 : appendStringLiteralAH(q, qualrelname, fout);
17656 538 : appendPQExpBufferStr(q, "::pg_catalog.regclass\n"
17657 : " AND attname IN (");
17658 538 : firstitem = false;
17659 : }
17660 : else
17661 674 : appendPQExpBufferStr(q, ", ");
17662 1212 : appendStringLiteralAH(q, tbinfo->attnames[j], fout);
17663 : }
17664 : }
17665 1606 : if (!firstitem)
17666 538 : appendPQExpBufferStr(q, ");\n");
17667 :
17668 : /*
17669 : * Fix up not-null constraints that come from inheritance. As
17670 : * above, do the pg_constraint manipulations in a single SQL
17671 : * command. (Actually, two in special cases, if we're doing an
17672 : * upgrade from < 18).
17673 : */
17674 1606 : firstitem = true;
17675 1606 : firstitem_extra = true;
17676 1606 : resetPQExpBuffer(extra);
17677 7860 : for (j = 0; j < tbinfo->numatts; j++)
17678 : {
17679 : /*
17680 : * If a not-null constraint comes from inheritance, reset
17681 : * conislocal. The inhcount is fixed by ALTER TABLE INHERIT,
17682 : * below. Special hack: in versions < 18, columns with no
17683 : * local definition need their constraint to be matched by
17684 : * column number in conkeys instead of by constraint name,
17685 : * because the latter is not available. (We distinguish the
17686 : * case because the constraint name is the empty string.)
17687 : */
17688 6254 : if (tbinfo->notnull_constrs[j] != NULL &&
17689 590 : !tbinfo->notnull_islocal[j])
17690 : {
17691 172 : if (tbinfo->notnull_constrs[j][0] != '\0')
17692 : {
17693 146 : if (firstitem)
17694 : {
17695 126 : appendPQExpBufferStr(q, "UPDATE pg_catalog.pg_constraint\n"
17696 : "SET conislocal = false\n"
17697 : "WHERE contype = 'n' AND conrelid = ");
17698 126 : appendStringLiteralAH(q, qualrelname, fout);
17699 126 : appendPQExpBufferStr(q, "::pg_catalog.regclass AND\n"
17700 : "conname IN (");
17701 126 : firstitem = false;
17702 : }
17703 : else
17704 20 : appendPQExpBufferStr(q, ", ");
17705 146 : appendStringLiteralAH(q, tbinfo->notnull_constrs[j], fout);
17706 : }
17707 : else
17708 : {
17709 26 : if (firstitem_extra)
17710 : {
17711 26 : appendPQExpBufferStr(extra, "UPDATE pg_catalog.pg_constraint\n"
17712 : "SET conislocal = false\n"
17713 : "WHERE contype = 'n' AND conrelid = ");
17714 26 : appendStringLiteralAH(extra, qualrelname, fout);
17715 26 : appendPQExpBufferStr(extra, "::pg_catalog.regclass AND\n"
17716 : "conkey IN (");
17717 26 : firstitem_extra = false;
17718 : }
17719 : else
17720 0 : appendPQExpBufferStr(extra, ", ");
17721 26 : appendPQExpBuffer(extra, "'{%d}'", j + 1);
17722 : }
17723 : }
17724 : }
17725 1606 : if (!firstitem)
17726 126 : appendPQExpBufferStr(q, ");\n");
17727 1606 : if (!firstitem_extra)
17728 26 : appendPQExpBufferStr(extra, ");\n");
17729 :
17730 1606 : if (extra->len > 0)
17731 26 : appendBinaryPQExpBuffer(q, extra->data, extra->len);
17732 :
17733 : /*
17734 : * Add inherited CHECK constraints, if any.
17735 : *
17736 : * For partitions, they were already dumped, and conislocal
17737 : * doesn't need fixing.
17738 : *
17739 : * As above, issue only one direct manipulation of pg_constraint.
17740 : * Although it is tempting to merge the ALTER ADD CONSTRAINT
17741 : * commands into one as well, refrain for now due to concern about
17742 : * possible backend memory bloat if there are many such
17743 : * constraints.
17744 : */
17745 1606 : resetPQExpBuffer(extra);
17746 1606 : firstitem = true;
17747 1730 : for (k = 0; k < tbinfo->ncheck; k++)
17748 : {
17749 124 : ConstraintInfo *constr = &(tbinfo->checkexprs[k]);
17750 :
17751 124 : if (constr->separate || constr->conislocal || tbinfo->ispartition)
17752 120 : continue;
17753 :
17754 4 : if (firstitem)
17755 4 : appendPQExpBufferStr(q, "\n-- For binary upgrade, set up inherited constraints.\n");
17756 4 : appendPQExpBuffer(q, "ALTER %sTABLE ONLY %s ADD CONSTRAINT %s %s;\n",
17757 : foreign, qualrelname,
17758 4 : fmtId(constr->dobj.name),
17759 : constr->condef);
17760 : /* Update pg_constraint after all the ALTER TABLEs */
17761 4 : if (firstitem)
17762 : {
17763 4 : appendPQExpBufferStr(extra, "UPDATE pg_catalog.pg_constraint\n"
17764 : "SET conislocal = false\n"
17765 : "WHERE contype = 'c' AND conrelid = ");
17766 4 : appendStringLiteralAH(extra, qualrelname, fout);
17767 4 : appendPQExpBufferStr(extra, "::pg_catalog.regclass\n");
17768 4 : appendPQExpBufferStr(extra, " AND conname IN (");
17769 4 : firstitem = false;
17770 : }
17771 : else
17772 0 : appendPQExpBufferStr(extra, ", ");
17773 4 : appendStringLiteralAH(extra, constr->dobj.name, fout);
17774 : }
17775 1606 : if (!firstitem)
17776 : {
17777 4 : appendPQExpBufferStr(extra, ");\n");
17778 4 : appendBinaryPQExpBuffer(q, extra->data, extra->len);
17779 : }
17780 :
17781 1606 : if (numParents > 0 && !tbinfo->ispartition)
17782 : {
17783 128 : appendPQExpBufferStr(q, "\n-- For binary upgrade, set up inheritance this way.\n");
17784 278 : for (k = 0; k < numParents; k++)
17785 : {
17786 150 : TableInfo *parentRel = parents[k];
17787 :
17788 150 : appendPQExpBuffer(q, "ALTER %sTABLE ONLY %s INHERIT %s;\n", foreign,
17789 : qualrelname,
17790 150 : fmtQualifiedDumpable(parentRel));
17791 : }
17792 : }
17793 :
17794 1606 : if (OidIsValid(tbinfo->reloftype))
17795 : {
17796 12 : appendPQExpBufferStr(q, "\n-- For binary upgrade, set up typed tables this way.\n");
17797 12 : appendPQExpBuffer(q, "ALTER TABLE ONLY %s OF %s;\n",
17798 : qualrelname,
17799 12 : getFormattedTypeName(fout, tbinfo->reloftype,
17800 : zeroIsError));
17801 : }
17802 : }
17803 :
17804 : /*
17805 : * In binary_upgrade mode, arrange to restore the old relfrozenxid and
17806 : * relminmxid of all vacuumable relations. (While vacuum.c processes
17807 : * TOAST tables semi-independently, here we see them only as children
17808 : * of other relations; so this "if" lacks RELKIND_TOASTVALUE, and the
17809 : * child toast table is handled below.)
17810 : */
17811 11442 : if (dopt->binary_upgrade &&
17812 1640 : (tbinfo->relkind == RELKIND_RELATION ||
17813 222 : tbinfo->relkind == RELKIND_MATVIEW))
17814 : {
17815 1452 : appendPQExpBufferStr(q, "\n-- For binary upgrade, set heap's relfrozenxid and relminmxid\n");
17816 1452 : appendPQExpBuffer(q, "UPDATE pg_catalog.pg_class\n"
17817 : "SET relfrozenxid = '%u', relminmxid = '%u'\n"
17818 : "WHERE oid = ",
17819 1452 : tbinfo->frozenxid, tbinfo->minmxid);
17820 1452 : appendStringLiteralAH(q, qualrelname, fout);
17821 1452 : appendPQExpBufferStr(q, "::pg_catalog.regclass;\n");
17822 :
17823 1452 : if (tbinfo->toast_oid)
17824 : {
17825 : /*
17826 : * The toast table will have the same OID at restore, so we
17827 : * can safely target it by OID.
17828 : */
17829 554 : appendPQExpBufferStr(q, "\n-- For binary upgrade, set toast's relfrozenxid and relminmxid\n");
17830 554 : appendPQExpBuffer(q, "UPDATE pg_catalog.pg_class\n"
17831 : "SET relfrozenxid = '%u', relminmxid = '%u'\n"
17832 : "WHERE oid = '%u';\n",
17833 554 : tbinfo->toast_frozenxid,
17834 554 : tbinfo->toast_minmxid, tbinfo->toast_oid);
17835 : }
17836 : }
17837 :
17838 : /*
17839 : * In binary_upgrade mode, restore matviews' populated status by
17840 : * poking pg_class directly. This is pretty ugly, but we can't use
17841 : * REFRESH MATERIALIZED VIEW since it's possible that some underlying
17842 : * matview is not populated even though this matview is; in any case,
17843 : * we want to transfer the matview's heap storage, not run REFRESH.
17844 : */
17845 11442 : if (dopt->binary_upgrade && tbinfo->relkind == RELKIND_MATVIEW &&
17846 34 : tbinfo->relispopulated)
17847 : {
17848 30 : appendPQExpBufferStr(q, "\n-- For binary upgrade, mark materialized view as populated\n");
17849 30 : appendPQExpBufferStr(q, "UPDATE pg_catalog.pg_class\n"
17850 : "SET relispopulated = 't'\n"
17851 : "WHERE oid = ");
17852 30 : appendStringLiteralAH(q, qualrelname, fout);
17853 30 : appendPQExpBufferStr(q, "::pg_catalog.regclass;\n");
17854 : }
17855 :
17856 : /*
17857 : * Dump additional per-column properties that we can't handle in the
17858 : * main CREATE TABLE command.
17859 : */
17860 52036 : for (j = 0; j < tbinfo->numatts; j++)
17861 : {
17862 : /* None of this applies to dropped columns */
17863 40594 : if (tbinfo->attisdropped[j])
17864 894 : continue;
17865 :
17866 : /*
17867 : * Dump per-column statistics information. We only issue an ALTER
17868 : * TABLE statement if the attstattarget entry for this column is
17869 : * not the default value.
17870 : */
17871 39700 : if (tbinfo->attstattarget[j] >= 0)
17872 64 : appendPQExpBuffer(q, "ALTER %sTABLE ONLY %s ALTER COLUMN %s SET STATISTICS %d;\n",
17873 : foreign, qualrelname,
17874 64 : fmtId(tbinfo->attnames[j]),
17875 64 : tbinfo->attstattarget[j]);
17876 :
17877 : /*
17878 : * Dump per-column storage information. The statement is only
17879 : * dumped if the storage has been changed from the type's default.
17880 : */
17881 39700 : if (tbinfo->attstorage[j] != tbinfo->typstorage[j])
17882 : {
17883 158 : switch (tbinfo->attstorage[j])
17884 : {
17885 20 : case TYPSTORAGE_PLAIN:
17886 20 : storage = "PLAIN";
17887 20 : break;
17888 74 : case TYPSTORAGE_EXTERNAL:
17889 74 : storage = "EXTERNAL";
17890 74 : break;
17891 0 : case TYPSTORAGE_EXTENDED:
17892 0 : storage = "EXTENDED";
17893 0 : break;
17894 64 : case TYPSTORAGE_MAIN:
17895 64 : storage = "MAIN";
17896 64 : break;
17897 0 : default:
17898 0 : storage = NULL;
17899 : }
17900 :
17901 : /*
17902 : * Only dump the statement if it's a storage type we recognize
17903 : */
17904 158 : if (storage != NULL)
17905 158 : appendPQExpBuffer(q, "ALTER %sTABLE ONLY %s ALTER COLUMN %s SET STORAGE %s;\n",
17906 : foreign, qualrelname,
17907 158 : fmtId(tbinfo->attnames[j]),
17908 : storage);
17909 : }
17910 :
17911 : /*
17912 : * Dump per-column compression, if it's been set.
17913 : */
17914 39700 : if (!dopt->no_toast_compression)
17915 : {
17916 : const char *cmname;
17917 :
17918 39508 : switch (tbinfo->attcompression[j])
17919 : {
17920 142 : case 'p':
17921 142 : cmname = "pglz";
17922 142 : break;
17923 78 : case 'l':
17924 78 : cmname = "lz4";
17925 78 : break;
17926 39288 : default:
17927 39288 : cmname = NULL;
17928 39288 : break;
17929 : }
17930 :
17931 39508 : if (cmname != NULL)
17932 220 : appendPQExpBuffer(q, "ALTER %sTABLE ONLY %s ALTER COLUMN %s SET COMPRESSION %s;\n",
17933 : foreign, qualrelname,
17934 220 : fmtId(tbinfo->attnames[j]),
17935 : cmname);
17936 : }
17937 :
17938 : /*
17939 : * Dump per-column attributes.
17940 : */
17941 39700 : if (tbinfo->attoptions[j][0] != '\0')
17942 64 : appendPQExpBuffer(q, "ALTER %sTABLE ONLY %s ALTER COLUMN %s SET (%s);\n",
17943 : foreign, qualrelname,
17944 64 : fmtId(tbinfo->attnames[j]),
17945 64 : tbinfo->attoptions[j]);
17946 :
17947 : /*
17948 : * Dump per-column fdw options.
17949 : */
17950 39700 : if (tbinfo->relkind == RELKIND_FOREIGN_TABLE &&
17951 68 : tbinfo->attfdwoptions[j][0] != '\0')
17952 64 : appendPQExpBuffer(q,
17953 : "ALTER FOREIGN TABLE ONLY %s ALTER COLUMN %s OPTIONS (\n"
17954 : " %s\n"
17955 : ");\n",
17956 : qualrelname,
17957 64 : fmtId(tbinfo->attnames[j]),
17958 64 : tbinfo->attfdwoptions[j]);
17959 : } /* end loop over columns */
17960 :
17961 11442 : free(partkeydef);
17962 11442 : free(ftoptions);
17963 11442 : free(srvname);
17964 : }
17965 :
17966 : /*
17967 : * dump properties we only have ALTER TABLE syntax for
17968 : */
17969 12508 : if ((tbinfo->relkind == RELKIND_RELATION ||
17970 2956 : tbinfo->relkind == RELKIND_PARTITIONED_TABLE ||
17971 1804 : tbinfo->relkind == RELKIND_MATVIEW) &&
17972 11374 : tbinfo->relreplident != REPLICA_IDENTITY_DEFAULT)
17973 : {
17974 384 : if (tbinfo->relreplident == REPLICA_IDENTITY_INDEX)
17975 : {
17976 : /* nothing to do, will be set when the index is dumped */
17977 : }
17978 384 : else if (tbinfo->relreplident == REPLICA_IDENTITY_NOTHING)
17979 : {
17980 384 : appendPQExpBuffer(q, "\nALTER TABLE ONLY %s REPLICA IDENTITY NOTHING;\n",
17981 : qualrelname);
17982 : }
17983 0 : else if (tbinfo->relreplident == REPLICA_IDENTITY_FULL)
17984 : {
17985 0 : appendPQExpBuffer(q, "\nALTER TABLE ONLY %s REPLICA IDENTITY FULL;\n",
17986 : qualrelname);
17987 : }
17988 : }
17989 :
17990 12508 : if (tbinfo->forcerowsec)
17991 10 : appendPQExpBuffer(q, "\nALTER TABLE ONLY %s FORCE ROW LEVEL SECURITY;\n",
17992 : qualrelname);
17993 :
17994 12508 : if (dopt->binary_upgrade)
17995 1744 : binary_upgrade_extension_member(q, &tbinfo->dobj,
17996 : reltypename, qrelname,
17997 1744 : tbinfo->dobj.namespace->dobj.name);
17998 :
17999 12508 : if (tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
18000 : {
18001 12508 : char *tablespace = NULL;
18002 12508 : char *tableam = NULL;
18003 :
18004 : /*
18005 : * _selectTablespace() relies on tablespace-enabled objects in the
18006 : * default tablespace to have a tablespace of "" (empty string) versus
18007 : * non-tablespace-enabled objects to have a tablespace of NULL.
18008 : * getTables() sets tbinfo->reltablespace to "" for the default
18009 : * tablespace (not NULL).
18010 : */
18011 12508 : if (RELKIND_HAS_TABLESPACE(tbinfo->relkind))
18012 11374 : tablespace = tbinfo->reltablespace;
18013 :
18014 12508 : if (RELKIND_HAS_TABLE_AM(tbinfo->relkind) ||
18015 2286 : tbinfo->relkind == RELKIND_PARTITIONED_TABLE)
18016 11374 : tableam = tbinfo->amname;
18017 :
18018 12508 : ArchiveEntry(fout, tbinfo->dobj.catId, tbinfo->dobj.dumpId,
18019 12508 : ARCHIVE_OPTS(.tag = tbinfo->dobj.name,
18020 : .namespace = tbinfo->dobj.namespace->dobj.name,
18021 : .tablespace = tablespace,
18022 : .tableam = tableam,
18023 : .relkind = tbinfo->relkind,
18024 : .owner = tbinfo->rolname,
18025 : .description = reltypename,
18026 : .section = tbinfo->postponed_def ?
18027 : SECTION_POST_DATA : SECTION_PRE_DATA,
18028 : .createStmt = q->data,
18029 : .dropStmt = delq->data));
18030 : }
18031 :
18032 : /* Dump Table Comments */
18033 12508 : if (tbinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
18034 148 : dumpTableComment(fout, tbinfo, reltypename);
18035 :
18036 : /* Dump Table Security Labels */
18037 12508 : if (tbinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
18038 0 : dumpTableSecLabel(fout, tbinfo, reltypename);
18039 :
18040 : /*
18041 : * Dump comments for not-null constraints that aren't to be dumped
18042 : * separately (those are processed by collectComments/dumpComment).
18043 : */
18044 12508 : if (!fout->dopt->no_comments && dopt->dumpSchema &&
18045 12508 : fout->remoteVersion >= 180000)
18046 : {
18047 12508 : PQExpBuffer comment = NULL;
18048 12508 : PQExpBuffer tag = NULL;
18049 :
18050 59960 : for (j = 0; j < tbinfo->numatts; j++)
18051 : {
18052 47452 : if (tbinfo->notnull_constrs[j] != NULL &&
18053 4816 : tbinfo->notnull_comment[j] != NULL)
18054 : {
18055 84 : if (comment == NULL)
18056 : {
18057 84 : comment = createPQExpBuffer();
18058 84 : tag = createPQExpBuffer();
18059 : }
18060 : else
18061 : {
18062 0 : resetPQExpBuffer(comment);
18063 0 : resetPQExpBuffer(tag);
18064 : }
18065 :
18066 84 : appendPQExpBuffer(comment, "COMMENT ON CONSTRAINT %s ON %s IS ",
18067 84 : fmtId(tbinfo->notnull_constrs[j]), qualrelname);
18068 84 : appendStringLiteralAH(comment, tbinfo->notnull_comment[j], fout);
18069 84 : appendPQExpBufferStr(comment, ";\n");
18070 :
18071 84 : appendPQExpBuffer(tag, "CONSTRAINT %s ON %s",
18072 84 : fmtId(tbinfo->notnull_constrs[j]), qrelname);
18073 :
18074 84 : ArchiveEntry(fout, nilCatalogId, createDumpId(),
18075 84 : ARCHIVE_OPTS(.tag = tag->data,
18076 : .namespace = tbinfo->dobj.namespace->dobj.name,
18077 : .owner = tbinfo->rolname,
18078 : .description = "COMMENT",
18079 : .section = SECTION_NONE,
18080 : .createStmt = comment->data,
18081 : .deps = &(tbinfo->dobj.dumpId),
18082 : .nDeps = 1));
18083 : }
18084 : }
18085 :
18086 12508 : destroyPQExpBuffer(comment);
18087 12508 : destroyPQExpBuffer(tag);
18088 : }
18089 :
18090 : /* Dump comments on inlined table constraints */
18091 13654 : for (j = 0; j < tbinfo->ncheck; j++)
18092 : {
18093 1146 : ConstraintInfo *constr = &(tbinfo->checkexprs[j]);
18094 :
18095 1146 : if (constr->separate || !constr->conislocal)
18096 488 : continue;
18097 :
18098 658 : if (constr->dobj.dump & DUMP_COMPONENT_COMMENT)
18099 74 : dumpTableConstraintComment(fout, constr);
18100 : }
18101 :
18102 12508 : destroyPQExpBuffer(q);
18103 12508 : destroyPQExpBuffer(delq);
18104 12508 : destroyPQExpBuffer(extra);
18105 12508 : free(qrelname);
18106 12508 : free(qualrelname);
18107 12508 : }
18108 :
18109 : /*
18110 : * dumpTableAttach
18111 : * write to fout the commands to attach a child partition
18112 : *
18113 : * Child partitions are always made by creating them separately
18114 : * and then using ATTACH PARTITION, rather than using
18115 : * CREATE TABLE ... PARTITION OF. This is important for preserving
18116 : * any possible discrepancy in column layout, to allow assigning the
18117 : * correct tablespace if different, and so that it's possible to restore
18118 : * a partition without restoring its parent. (You'll get an error from
18119 : * the ATTACH PARTITION command, but that can be ignored, or skipped
18120 : * using "pg_restore -L" if you prefer.) The last point motivates
18121 : * treating ATTACH PARTITION as a completely separate ArchiveEntry
18122 : * rather than emitting it within the child partition's ArchiveEntry.
18123 : */
18124 : static void
18125 2782 : dumpTableAttach(Archive *fout, const TableAttachInfo *attachinfo)
18126 : {
18127 2782 : DumpOptions *dopt = fout->dopt;
18128 : PQExpBuffer q;
18129 : PGresult *res;
18130 : char *partbound;
18131 :
18132 : /* Do nothing if not dumping schema */
18133 2782 : if (!dopt->dumpSchema)
18134 108 : return;
18135 :
18136 2674 : q = createPQExpBuffer();
18137 :
18138 2674 : if (!fout->is_prepared[PREPQUERY_DUMPTABLEATTACH])
18139 : {
18140 : /* Set up query for partbound details */
18141 86 : appendPQExpBufferStr(q,
18142 : "PREPARE dumpTableAttach(pg_catalog.oid) AS\n");
18143 :
18144 86 : appendPQExpBufferStr(q,
18145 : "SELECT pg_get_expr(c.relpartbound, c.oid) "
18146 : "FROM pg_class c "
18147 : "WHERE c.oid = $1");
18148 :
18149 86 : ExecuteSqlStatement(fout, q->data);
18150 :
18151 86 : fout->is_prepared[PREPQUERY_DUMPTABLEATTACH] = true;
18152 : }
18153 :
18154 2674 : printfPQExpBuffer(q,
18155 : "EXECUTE dumpTableAttach('%u')",
18156 2674 : attachinfo->partitionTbl->dobj.catId.oid);
18157 :
18158 2674 : res = ExecuteSqlQueryForSingleRow(fout, q->data);
18159 2674 : partbound = PQgetvalue(res, 0, 0);
18160 :
18161 : /* Perform ALTER TABLE on the parent */
18162 2674 : printfPQExpBuffer(q,
18163 : "ALTER TABLE ONLY %s ",
18164 2674 : fmtQualifiedDumpable(attachinfo->parentTbl));
18165 2674 : appendPQExpBuffer(q,
18166 : "ATTACH PARTITION %s %s;\n",
18167 2674 : fmtQualifiedDumpable(attachinfo->partitionTbl),
18168 : partbound);
18169 :
18170 : /*
18171 : * There is no point in creating a drop query as the drop is done by table
18172 : * drop. (If you think to change this, see also _printTocEntry().)
18173 : * Although this object doesn't really have ownership as such, set the
18174 : * owner field anyway to ensure that the command is run by the correct
18175 : * role at restore time.
18176 : */
18177 2674 : ArchiveEntry(fout, attachinfo->dobj.catId, attachinfo->dobj.dumpId,
18178 2674 : ARCHIVE_OPTS(.tag = attachinfo->dobj.name,
18179 : .namespace = attachinfo->dobj.namespace->dobj.name,
18180 : .owner = attachinfo->partitionTbl->rolname,
18181 : .description = "TABLE ATTACH",
18182 : .section = SECTION_PRE_DATA,
18183 : .createStmt = q->data));
18184 :
18185 2674 : PQclear(res);
18186 2674 : destroyPQExpBuffer(q);
18187 : }
18188 :
18189 : /*
18190 : * dumpAttrDef --- dump an attribute's default-value declaration
18191 : */
18192 : static void
18193 2064 : dumpAttrDef(Archive *fout, const AttrDefInfo *adinfo)
18194 : {
18195 2064 : DumpOptions *dopt = fout->dopt;
18196 2064 : TableInfo *tbinfo = adinfo->adtable;
18197 2064 : int adnum = adinfo->adnum;
18198 : PQExpBuffer q;
18199 : PQExpBuffer delq;
18200 : char *qualrelname;
18201 : char *tag;
18202 : char *foreign;
18203 :
18204 : /* Do nothing if not dumping schema */
18205 2064 : if (!dopt->dumpSchema)
18206 0 : return;
18207 :
18208 : /* Skip if not "separate"; it was dumped in the table's definition */
18209 2064 : if (!adinfo->separate)
18210 1732 : return;
18211 :
18212 332 : q = createPQExpBuffer();
18213 332 : delq = createPQExpBuffer();
18214 :
18215 332 : qualrelname = pg_strdup(fmtQualifiedDumpable(tbinfo));
18216 :
18217 332 : foreign = tbinfo->relkind == RELKIND_FOREIGN_TABLE ? "FOREIGN " : "";
18218 :
18219 332 : appendPQExpBuffer(q,
18220 : "ALTER %sTABLE ONLY %s ALTER COLUMN %s SET DEFAULT %s;\n",
18221 332 : foreign, qualrelname, fmtId(tbinfo->attnames[adnum - 1]),
18222 332 : adinfo->adef_expr);
18223 :
18224 332 : appendPQExpBuffer(delq, "ALTER %sTABLE %s ALTER COLUMN %s DROP DEFAULT;\n",
18225 : foreign, qualrelname,
18226 332 : fmtId(tbinfo->attnames[adnum - 1]));
18227 :
18228 332 : tag = psprintf("%s %s", tbinfo->dobj.name, tbinfo->attnames[adnum - 1]);
18229 :
18230 332 : if (adinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
18231 332 : ArchiveEntry(fout, adinfo->dobj.catId, adinfo->dobj.dumpId,
18232 332 : ARCHIVE_OPTS(.tag = tag,
18233 : .namespace = tbinfo->dobj.namespace->dobj.name,
18234 : .owner = tbinfo->rolname,
18235 : .description = "DEFAULT",
18236 : .section = SECTION_PRE_DATA,
18237 : .createStmt = q->data,
18238 : .dropStmt = delq->data));
18239 :
18240 332 : free(tag);
18241 332 : destroyPQExpBuffer(q);
18242 332 : destroyPQExpBuffer(delq);
18243 332 : free(qualrelname);
18244 : }
18245 :
18246 : /*
18247 : * getAttrName: extract the correct name for an attribute
18248 : *
18249 : * The array tblInfo->attnames[] only provides names of user attributes;
18250 : * if a system attribute number is supplied, we have to fake it.
18251 : * We also do a little bit of bounds checking for safety's sake.
18252 : */
18253 : static const char *
18254 4110 : getAttrName(int attrnum, const TableInfo *tblInfo)
18255 : {
18256 4110 : if (attrnum > 0 && attrnum <= tblInfo->numatts)
18257 4110 : return tblInfo->attnames[attrnum - 1];
18258 0 : switch (attrnum)
18259 : {
18260 0 : case SelfItemPointerAttributeNumber:
18261 0 : return "ctid";
18262 0 : case MinTransactionIdAttributeNumber:
18263 0 : return "xmin";
18264 0 : case MinCommandIdAttributeNumber:
18265 0 : return "cmin";
18266 0 : case MaxTransactionIdAttributeNumber:
18267 0 : return "xmax";
18268 0 : case MaxCommandIdAttributeNumber:
18269 0 : return "cmax";
18270 0 : case TableOidAttributeNumber:
18271 0 : return "tableoid";
18272 : }
18273 0 : pg_fatal("invalid column number %d for table \"%s\"",
18274 : attrnum, tblInfo->dobj.name);
18275 : return NULL; /* keep compiler quiet */
18276 : }
18277 :
18278 : /*
18279 : * dumpIndex
18280 : * write out to fout a user-defined index
18281 : */
18282 : static void
18283 5200 : dumpIndex(Archive *fout, const IndxInfo *indxinfo)
18284 : {
18285 5200 : DumpOptions *dopt = fout->dopt;
18286 5200 : TableInfo *tbinfo = indxinfo->indextable;
18287 5200 : bool is_constraint = (indxinfo->indexconstraint != 0);
18288 : PQExpBuffer q;
18289 : PQExpBuffer delq;
18290 : char *qindxname;
18291 : char *qqindxname;
18292 :
18293 : /* Do nothing if not dumping schema */
18294 5200 : if (!dopt->dumpSchema)
18295 234 : return;
18296 :
18297 4966 : q = createPQExpBuffer();
18298 4966 : delq = createPQExpBuffer();
18299 :
18300 4966 : qindxname = pg_strdup(fmtId(indxinfo->dobj.name));
18301 4966 : qqindxname = pg_strdup(fmtQualifiedDumpable(indxinfo));
18302 :
18303 : /*
18304 : * If there's an associated constraint, don't dump the index per se, but
18305 : * do dump any comment for it. (This is safe because dependency ordering
18306 : * will have ensured the constraint is emitted first.) Note that the
18307 : * emitted comment has to be shown as depending on the constraint, not the
18308 : * index, in such cases.
18309 : */
18310 4966 : if (!is_constraint)
18311 : {
18312 2090 : char *indstatcols = indxinfo->indstatcols;
18313 2090 : char *indstatvals = indxinfo->indstatvals;
18314 2090 : char **indstatcolsarray = NULL;
18315 2090 : char **indstatvalsarray = NULL;
18316 2090 : int nstatcols = 0;
18317 2090 : int nstatvals = 0;
18318 :
18319 2090 : if (dopt->binary_upgrade)
18320 316 : binary_upgrade_set_pg_class_oids(fout, q,
18321 316 : indxinfo->dobj.catId.oid);
18322 :
18323 : /* Plain secondary index */
18324 2090 : appendPQExpBuffer(q, "%s;\n", indxinfo->indexdef);
18325 :
18326 : /*
18327 : * Append ALTER TABLE commands as needed to set properties that we
18328 : * only have ALTER TABLE syntax for. Keep this in sync with the
18329 : * similar code in dumpConstraint!
18330 : */
18331 :
18332 : /* If the index is clustered, we need to record that. */
18333 2090 : if (indxinfo->indisclustered)
18334 : {
18335 0 : appendPQExpBuffer(q, "\nALTER TABLE %s CLUSTER",
18336 0 : fmtQualifiedDumpable(tbinfo));
18337 : /* index name is not qualified in this syntax */
18338 0 : appendPQExpBuffer(q, " ON %s;\n",
18339 : qindxname);
18340 : }
18341 :
18342 : /*
18343 : * If the index has any statistics on some of its columns, generate
18344 : * the associated ALTER INDEX queries.
18345 : */
18346 2090 : if (strlen(indstatcols) != 0 || strlen(indstatvals) != 0)
18347 : {
18348 : int j;
18349 :
18350 64 : if (!parsePGArray(indstatcols, &indstatcolsarray, &nstatcols))
18351 0 : pg_fatal("could not parse index statistic columns");
18352 64 : if (!parsePGArray(indstatvals, &indstatvalsarray, &nstatvals))
18353 0 : pg_fatal("could not parse index statistic values");
18354 64 : if (nstatcols != nstatvals)
18355 0 : pg_fatal("mismatched number of columns and values for index statistics");
18356 :
18357 192 : for (j = 0; j < nstatcols; j++)
18358 : {
18359 128 : appendPQExpBuffer(q, "ALTER INDEX %s ", qqindxname);
18360 :
18361 : /*
18362 : * Note that this is a column number, so no quotes should be
18363 : * used.
18364 : */
18365 128 : appendPQExpBuffer(q, "ALTER COLUMN %s ",
18366 128 : indstatcolsarray[j]);
18367 128 : appendPQExpBuffer(q, "SET STATISTICS %s;\n",
18368 128 : indstatvalsarray[j]);
18369 : }
18370 : }
18371 :
18372 : /* Indexes can depend on extensions */
18373 2090 : append_depends_on_extension(fout, q, &indxinfo->dobj,
18374 : "pg_catalog.pg_class",
18375 : "INDEX", qqindxname);
18376 :
18377 : /* If the index defines identity, we need to record that. */
18378 2090 : if (indxinfo->indisreplident)
18379 : {
18380 0 : appendPQExpBuffer(q, "\nALTER TABLE ONLY %s REPLICA IDENTITY USING",
18381 0 : fmtQualifiedDumpable(tbinfo));
18382 : /* index name is not qualified in this syntax */
18383 0 : appendPQExpBuffer(q, " INDEX %s;\n",
18384 : qindxname);
18385 : }
18386 :
18387 : /*
18388 : * If this index is a member of a partitioned index, the backend will
18389 : * not allow us to drop it separately, so don't try. It will go away
18390 : * automatically when we drop either the index's table or the
18391 : * partitioned index. (If, in a selective restore with --clean, we
18392 : * drop neither of those, then this index will not be dropped either.
18393 : * But that's fine, and even if you think it's not, the backend won't
18394 : * let us do differently.)
18395 : */
18396 2090 : if (indxinfo->parentidx == 0)
18397 1726 : appendPQExpBuffer(delq, "DROP INDEX %s;\n", qqindxname);
18398 :
18399 2090 : if (indxinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
18400 2090 : ArchiveEntry(fout, indxinfo->dobj.catId, indxinfo->dobj.dumpId,
18401 2090 : ARCHIVE_OPTS(.tag = indxinfo->dobj.name,
18402 : .namespace = tbinfo->dobj.namespace->dobj.name,
18403 : .tablespace = indxinfo->tablespace,
18404 : .owner = tbinfo->rolname,
18405 : .description = "INDEX",
18406 : .section = SECTION_POST_DATA,
18407 : .createStmt = q->data,
18408 : .dropStmt = delq->data));
18409 :
18410 2090 : free(indstatcolsarray);
18411 2090 : free(indstatvalsarray);
18412 : }
18413 :
18414 : /* Dump Index Comments */
18415 4966 : if (indxinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
18416 30 : dumpComment(fout, "INDEX", qindxname,
18417 30 : tbinfo->dobj.namespace->dobj.name,
18418 : tbinfo->rolname,
18419 : indxinfo->dobj.catId, 0,
18420 : is_constraint ? indxinfo->indexconstraint :
18421 : indxinfo->dobj.dumpId);
18422 :
18423 4966 : destroyPQExpBuffer(q);
18424 4966 : destroyPQExpBuffer(delq);
18425 4966 : free(qindxname);
18426 4966 : free(qqindxname);
18427 : }
18428 :
18429 : /*
18430 : * dumpIndexAttach
18431 : * write out to fout a partitioned-index attachment clause
18432 : */
18433 : static void
18434 1148 : dumpIndexAttach(Archive *fout, const IndexAttachInfo *attachinfo)
18435 : {
18436 : /* Do nothing if not dumping schema */
18437 1148 : if (!fout->dopt->dumpSchema)
18438 96 : return;
18439 :
18440 1052 : if (attachinfo->partitionIdx->dobj.dump & DUMP_COMPONENT_DEFINITION)
18441 : {
18442 1052 : PQExpBuffer q = createPQExpBuffer();
18443 :
18444 1052 : appendPQExpBuffer(q, "ALTER INDEX %s ",
18445 1052 : fmtQualifiedDumpable(attachinfo->parentIdx));
18446 1052 : appendPQExpBuffer(q, "ATTACH PARTITION %s;\n",
18447 1052 : fmtQualifiedDumpable(attachinfo->partitionIdx));
18448 :
18449 : /*
18450 : * There is no need for a dropStmt since the drop is done implicitly
18451 : * when we drop either the index's table or the partitioned index.
18452 : * Moreover, since there's no ALTER INDEX DETACH PARTITION command,
18453 : * there's no way to do it anyway. (If you think to change this,
18454 : * consider also what to do with --if-exists.)
18455 : *
18456 : * Although this object doesn't really have ownership as such, set the
18457 : * owner field anyway to ensure that the command is run by the correct
18458 : * role at restore time.
18459 : */
18460 1052 : ArchiveEntry(fout, attachinfo->dobj.catId, attachinfo->dobj.dumpId,
18461 1052 : ARCHIVE_OPTS(.tag = attachinfo->dobj.name,
18462 : .namespace = attachinfo->dobj.namespace->dobj.name,
18463 : .owner = attachinfo->parentIdx->indextable->rolname,
18464 : .description = "INDEX ATTACH",
18465 : .section = SECTION_POST_DATA,
18466 : .createStmt = q->data));
18467 :
18468 1052 : destroyPQExpBuffer(q);
18469 : }
18470 : }
18471 :
18472 : /*
18473 : * dumpStatisticsExt
18474 : * write out to fout an extended statistics object
18475 : */
18476 : static void
18477 342 : dumpStatisticsExt(Archive *fout, const StatsExtInfo *statsextinfo)
18478 : {
18479 342 : DumpOptions *dopt = fout->dopt;
18480 : PQExpBuffer q;
18481 : PQExpBuffer delq;
18482 : PQExpBuffer query;
18483 : char *qstatsextname;
18484 : PGresult *res;
18485 : char *stxdef;
18486 :
18487 : /* Do nothing if not dumping schema */
18488 342 : if (!dopt->dumpSchema)
18489 48 : return;
18490 :
18491 294 : q = createPQExpBuffer();
18492 294 : delq = createPQExpBuffer();
18493 294 : query = createPQExpBuffer();
18494 :
18495 294 : qstatsextname = pg_strdup(fmtId(statsextinfo->dobj.name));
18496 :
18497 294 : appendPQExpBuffer(query, "SELECT "
18498 : "pg_catalog.pg_get_statisticsobjdef('%u'::pg_catalog.oid)",
18499 294 : statsextinfo->dobj.catId.oid);
18500 :
18501 294 : res = ExecuteSqlQueryForSingleRow(fout, query->data);
18502 :
18503 294 : stxdef = PQgetvalue(res, 0, 0);
18504 :
18505 : /* Result of pg_get_statisticsobjdef is complete except for semicolon */
18506 294 : appendPQExpBuffer(q, "%s;\n", stxdef);
18507 :
18508 : /*
18509 : * We only issue an ALTER STATISTICS statement if the stxstattarget entry
18510 : * for this statistics object is not the default value.
18511 : */
18512 294 : if (statsextinfo->stattarget >= 0)
18513 : {
18514 64 : appendPQExpBuffer(q, "ALTER STATISTICS %s ",
18515 64 : fmtQualifiedDumpable(statsextinfo));
18516 64 : appendPQExpBuffer(q, "SET STATISTICS %d;\n",
18517 64 : statsextinfo->stattarget);
18518 : }
18519 :
18520 294 : appendPQExpBuffer(delq, "DROP STATISTICS %s;\n",
18521 294 : fmtQualifiedDumpable(statsextinfo));
18522 :
18523 294 : if (statsextinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
18524 294 : ArchiveEntry(fout, statsextinfo->dobj.catId,
18525 294 : statsextinfo->dobj.dumpId,
18526 294 : ARCHIVE_OPTS(.tag = statsextinfo->dobj.name,
18527 : .namespace = statsextinfo->dobj.namespace->dobj.name,
18528 : .owner = statsextinfo->rolname,
18529 : .description = "STATISTICS",
18530 : .section = SECTION_POST_DATA,
18531 : .createStmt = q->data,
18532 : .dropStmt = delq->data));
18533 :
18534 : /* Dump Statistics Comments */
18535 294 : if (statsextinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
18536 0 : dumpComment(fout, "STATISTICS", qstatsextname,
18537 0 : statsextinfo->dobj.namespace->dobj.name,
18538 0 : statsextinfo->rolname,
18539 : statsextinfo->dobj.catId, 0,
18540 0 : statsextinfo->dobj.dumpId);
18541 :
18542 294 : PQclear(res);
18543 294 : destroyPQExpBuffer(q);
18544 294 : destroyPQExpBuffer(delq);
18545 294 : destroyPQExpBuffer(query);
18546 294 : free(qstatsextname);
18547 : }
18548 :
18549 : /*
18550 : * dumpStatisticsExtStats
18551 : * write out to fout the stats for an extended statistics object
18552 : */
18553 : static void
18554 342 : dumpStatisticsExtStats(Archive *fout, const StatsExtInfo *statsextinfo)
18555 : {
18556 342 : DumpOptions *dopt = fout->dopt;
18557 : PQExpBuffer query;
18558 : PGresult *res;
18559 : int nstats;
18560 :
18561 : /* Do nothing if not dumping statistics */
18562 342 : if (!dopt->dumpStatistics)
18563 80 : return;
18564 :
18565 262 : if (!fout->is_prepared[PREPQUERY_DUMPEXTSTATSOBJSTATS])
18566 : {
18567 66 : PQExpBuffer pq = createPQExpBuffer();
18568 :
18569 : /*---------
18570 : * Set up query for details about extended statistics objects.
18571 : *
18572 : * The query depends on the backend version:
18573 : * - In v19 and newer versions, query directly the pg_stats_ext*
18574 : * catalogs.
18575 : * - In v18 and older versions, ndistinct and dependencies have a
18576 : * different format that needs translation.
18577 : * - In v14 and older versions, inherited does not exist.
18578 : * - In v11 and older versions, there is no pg_stats_ext, hence
18579 : * the logic joins pg_statistic_ext and pg_namespace.
18580 : *---------
18581 : */
18582 :
18583 66 : appendPQExpBufferStr(pq,
18584 : "PREPARE getExtStatsStats(pg_catalog.name, pg_catalog.name) AS\n"
18585 : "SELECT ");
18586 :
18587 : /*
18588 : * Versions 15 and newer have inherited stats.
18589 : *
18590 : * Create this column in all versions because we need to order by it
18591 : * later.
18592 : */
18593 66 : if (fout->remoteVersion >= 150000)
18594 66 : appendPQExpBufferStr(pq, "e.inherited, ");
18595 : else
18596 0 : appendPQExpBufferStr(pq, "false AS inherited, ");
18597 :
18598 : /*--------
18599 : * The ndistinct and dependencies formats changed in v19, so
18600 : * everything before that needs to be translated.
18601 : *
18602 : * The ndistinct translation converts this kind of data:
18603 : * {"3, 4": 11, "3, 6": 11, "4, 6": 11, "3, 4, 6": 11}
18604 : *
18605 : * to this:
18606 : * [ {"attributes": [3,4], "ndistinct": 11},
18607 : * {"attributes": [3,6], "ndistinct": 11},
18608 : * {"attributes": [4,6], "ndistinct": 11},
18609 : * {"attributes": [3,4,6], "ndistinct": 11} ]
18610 : *
18611 : * The dependencies translation converts this kind of data:
18612 : * {"3 => 4": 1.000000, "3 => 6": 1.000000,
18613 : * "4 => 6": 1.000000, "3, 4 => 6": 1.000000,
18614 : * "3, 6 => 4": 1.000000}
18615 : *
18616 : * to this:
18617 : * [ {"attributes": [3], "dependency": 4, "degree": 1.000000},
18618 : * {"attributes": [3], "dependency": 6, "degree": 1.000000},
18619 : * {"attributes": [4], "dependency": 6, "degree": 1.000000},
18620 : * {"attributes": [3,4], "dependency": 6, "degree": 1.000000},
18621 : * {"attributes": [3,6], "dependency": 4, "degree": 1.000000} ]
18622 : *--------
18623 : */
18624 66 : if (fout->remoteVersion >= 190000)
18625 66 : appendPQExpBufferStr(pq, "e.n_distinct, e.dependencies, ");
18626 : else
18627 0 : appendPQExpBufferStr(pq,
18628 : "( "
18629 : "SELECT json_agg( "
18630 : " json_build_object( "
18631 : " '" PG_NDISTINCT_KEY_ATTRIBUTES "', "
18632 : " string_to_array(kv.key, ', ')::integer[], "
18633 : " '" PG_NDISTINCT_KEY_NDISTINCT "', "
18634 : " kv.value::bigint )) "
18635 : "FROM json_each_text(e.n_distinct::text::json) AS kv"
18636 : ") AS n_distinct, "
18637 : "( "
18638 : "SELECT json_agg( "
18639 : " json_build_object( "
18640 : " '" PG_DEPENDENCIES_KEY_ATTRIBUTES "', "
18641 : " string_to_array( "
18642 : " split_part(kv.key, ' => ', 1), "
18643 : " ', ')::integer[], "
18644 : " '" PG_DEPENDENCIES_KEY_DEPENDENCY "', "
18645 : " split_part(kv.key, ' => ', 2)::integer, "
18646 : " '" PG_DEPENDENCIES_KEY_DEGREE "', "
18647 : " kv.value::double precision )) "
18648 : "FROM json_each_text(e.dependencies::text::json) AS kv "
18649 : ") AS dependencies, ");
18650 :
18651 : /* MCV was introduced v13 */
18652 66 : if (fout->remoteVersion >= 130000)
18653 66 : appendPQExpBufferStr(pq,
18654 : "e.most_common_vals, e.most_common_freqs, "
18655 : "e.most_common_base_freqs ");
18656 : else
18657 0 : appendPQExpBufferStr(pq,
18658 : "NULL AS most_common_vals, NULL AS most_common_freqs, "
18659 : "NULL AS most_common_base_freqs ");
18660 :
18661 : /* pg_stats_ext introduced in v12 */
18662 66 : if (fout->remoteVersion >= 120000)
18663 66 : appendPQExpBufferStr(pq,
18664 : "FROM pg_catalog.pg_stats_ext AS e "
18665 : "WHERE e.statistics_schemaname = $1 "
18666 : "AND e.statistics_name = $2 ");
18667 : else
18668 0 : appendPQExpBufferStr(pq,
18669 : "FROM ( "
18670 : "SELECT s.stxndistinct AS n_distinct, "
18671 : " s.stxdependencies AS dependencies "
18672 : "FROM pg_catalog.pg_statistic_ext AS s "
18673 : "JOIN pg_catalog.pg_namespace AS n "
18674 : "ON n.oid = s.stxnamespace "
18675 : "WHERE n.nspname = $1 "
18676 : "AND s.stxname = $2 "
18677 : ") AS e ");
18678 :
18679 : /* we always have an inherited column, but it may be a constant */
18680 66 : appendPQExpBufferStr(pq, "ORDER BY inherited");
18681 :
18682 66 : ExecuteSqlStatement(fout, pq->data);
18683 :
18684 66 : fout->is_prepared[PREPQUERY_DUMPEXTSTATSOBJSTATS] = true;
18685 :
18686 66 : destroyPQExpBuffer(pq);
18687 : }
18688 :
18689 262 : query = createPQExpBuffer();
18690 :
18691 262 : appendPQExpBufferStr(query, "EXECUTE getExtStatsStats(");
18692 262 : appendStringLiteralAH(query, statsextinfo->dobj.namespace->dobj.name, fout);
18693 262 : appendPQExpBufferStr(query, "::pg_catalog.name, ");
18694 262 : appendStringLiteralAH(query, statsextinfo->dobj.name, fout);
18695 262 : appendPQExpBufferStr(query, "::pg_catalog.name)");
18696 :
18697 262 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
18698 :
18699 262 : destroyPQExpBuffer(query);
18700 :
18701 262 : nstats = PQntuples(res);
18702 :
18703 262 : if (nstats > 0)
18704 : {
18705 72 : PQExpBuffer out = createPQExpBuffer();
18706 :
18707 72 : int i_inherited = PQfnumber(res, "inherited");
18708 72 : int i_ndistinct = PQfnumber(res, "n_distinct");
18709 72 : int i_dependencies = PQfnumber(res, "dependencies");
18710 72 : int i_mcv = PQfnumber(res, "most_common_vals");
18711 72 : int i_mcf = PQfnumber(res, "most_common_freqs");
18712 72 : int i_mcbf = PQfnumber(res, "most_common_base_freqs");
18713 :
18714 144 : for (int i = 0; i < nstats; i++)
18715 : {
18716 72 : TableInfo *tbinfo = statsextinfo->stattable;
18717 :
18718 72 : if (PQgetisnull(res, i, i_inherited))
18719 0 : pg_fatal("inherited cannot be NULL");
18720 :
18721 72 : appendPQExpBufferStr(out,
18722 : "SELECT * FROM pg_catalog.pg_restore_extended_stats(\n");
18723 72 : appendPQExpBuffer(out, "\t'version', '%d'::integer,\n",
18724 : fout->remoteVersion);
18725 :
18726 : /* Relation information */
18727 72 : appendPQExpBufferStr(out, "\t'schemaname', ");
18728 72 : appendStringLiteralAH(out, tbinfo->dobj.namespace->dobj.name, fout);
18729 72 : appendPQExpBufferStr(out, ",\n\t'relname', ");
18730 72 : appendStringLiteralAH(out, tbinfo->dobj.name, fout);
18731 :
18732 : /* Extended statistics information */
18733 72 : appendPQExpBufferStr(out, ",\n\t'statistics_schemaname', ");
18734 72 : appendStringLiteralAH(out, statsextinfo->dobj.namespace->dobj.name, fout);
18735 72 : appendPQExpBufferStr(out, ",\n\t'statistics_name', ");
18736 72 : appendStringLiteralAH(out, statsextinfo->dobj.name, fout);
18737 72 : appendNamedArgument(out, fout, "inherited", "boolean",
18738 72 : PQgetvalue(res, i, i_inherited));
18739 :
18740 72 : if (!PQgetisnull(res, i, i_ndistinct))
18741 64 : appendNamedArgument(out, fout, "n_distinct", "pg_ndistinct",
18742 64 : PQgetvalue(res, i, i_ndistinct));
18743 :
18744 72 : if (!PQgetisnull(res, i, i_dependencies))
18745 66 : appendNamedArgument(out, fout, "dependencies", "pg_dependencies",
18746 66 : PQgetvalue(res, i, i_dependencies));
18747 :
18748 72 : if (!PQgetisnull(res, i, i_mcv))
18749 70 : appendNamedArgument(out, fout, "most_common_vals", "text[]",
18750 70 : PQgetvalue(res, i, i_mcv));
18751 :
18752 72 : if (!PQgetisnull(res, i, i_mcf))
18753 70 : appendNamedArgument(out, fout, "most_common_freqs", "double precision[]",
18754 70 : PQgetvalue(res, i, i_mcf));
18755 :
18756 72 : if (!PQgetisnull(res, i, i_mcbf))
18757 70 : appendNamedArgument(out, fout, "most_common_base_freqs", "double precision[]",
18758 70 : PQgetvalue(res, i, i_mcbf));
18759 :
18760 72 : appendPQExpBufferStr(out, "\n);\n");
18761 : }
18762 :
18763 72 : ArchiveEntry(fout, nilCatalogId, createDumpId(),
18764 72 : ARCHIVE_OPTS(.tag = statsextinfo->dobj.name,
18765 : .namespace = statsextinfo->dobj.namespace->dobj.name,
18766 : .owner = statsextinfo->rolname,
18767 : .description = "EXTENDED STATISTICS DATA",
18768 : .section = SECTION_POST_DATA,
18769 : .createStmt = out->data,
18770 : .deps = &statsextinfo->dobj.dumpId,
18771 : .nDeps = 1));
18772 72 : destroyPQExpBuffer(out);
18773 : }
18774 262 : PQclear(res);
18775 : }
18776 :
18777 : /*
18778 : * dumpConstraint
18779 : * write out to fout a user-defined constraint
18780 : */
18781 : static void
18782 4978 : dumpConstraint(Archive *fout, const ConstraintInfo *coninfo)
18783 : {
18784 4978 : DumpOptions *dopt = fout->dopt;
18785 4978 : TableInfo *tbinfo = coninfo->contable;
18786 : PQExpBuffer q;
18787 : PQExpBuffer delq;
18788 4978 : char *tag = NULL;
18789 : char *foreign;
18790 :
18791 : /* Do nothing if not dumping schema */
18792 4978 : if (!dopt->dumpSchema)
18793 196 : return;
18794 :
18795 4782 : q = createPQExpBuffer();
18796 4782 : delq = createPQExpBuffer();
18797 :
18798 9256 : foreign = tbinfo &&
18799 4782 : tbinfo->relkind == RELKIND_FOREIGN_TABLE ? "FOREIGN " : "";
18800 :
18801 4782 : if (coninfo->contype == 'p' ||
18802 2384 : coninfo->contype == 'u' ||
18803 1926 : coninfo->contype == 'x')
18804 2876 : {
18805 : /* Index-related constraint */
18806 : IndxInfo *indxinfo;
18807 : int k;
18808 :
18809 2876 : indxinfo = (IndxInfo *) findObjectByDumpId(coninfo->conindex);
18810 :
18811 2876 : if (indxinfo == NULL)
18812 0 : pg_fatal("missing index for constraint \"%s\"",
18813 : coninfo->dobj.name);
18814 :
18815 2876 : if (dopt->binary_upgrade)
18816 300 : binary_upgrade_set_pg_class_oids(fout, q,
18817 : indxinfo->dobj.catId.oid);
18818 :
18819 2876 : appendPQExpBuffer(q, "ALTER %sTABLE ONLY %s\n", foreign,
18820 2876 : fmtQualifiedDumpable(tbinfo));
18821 2876 : appendPQExpBuffer(q, " ADD CONSTRAINT %s ",
18822 2876 : fmtId(coninfo->dobj.name));
18823 :
18824 2876 : if (coninfo->condef)
18825 : {
18826 : /* pg_get_constraintdef should have provided everything */
18827 20 : appendPQExpBuffer(q, "%s;\n", coninfo->condef);
18828 : }
18829 : else
18830 : {
18831 2856 : appendPQExpBufferStr(q,
18832 2856 : coninfo->contype == 'p' ? "PRIMARY KEY" : "UNIQUE");
18833 :
18834 : /*
18835 : * PRIMARY KEY constraints should not be using NULLS NOT DISTINCT
18836 : * indexes. Being able to create this was fixed, but we need to
18837 : * make the index distinct in order to be able to restore the
18838 : * dump.
18839 : */
18840 2856 : if (indxinfo->indnullsnotdistinct && coninfo->contype != 'p')
18841 0 : appendPQExpBufferStr(q, " NULLS NOT DISTINCT");
18842 2856 : appendPQExpBufferStr(q, " (");
18843 6886 : for (k = 0; k < indxinfo->indnkeyattrs; k++)
18844 : {
18845 4030 : int indkey = (int) indxinfo->indkeys[k];
18846 : const char *attname;
18847 :
18848 4030 : if (indkey == InvalidAttrNumber)
18849 0 : break;
18850 4030 : attname = getAttrName(indkey, tbinfo);
18851 :
18852 4030 : appendPQExpBuffer(q, "%s%s",
18853 : (k == 0) ? "" : ", ",
18854 : fmtId(attname));
18855 : }
18856 2856 : if (coninfo->conperiod)
18857 208 : appendPQExpBufferStr(q, " WITHOUT OVERLAPS");
18858 :
18859 2856 : if (indxinfo->indnkeyattrs < indxinfo->indnattrs)
18860 40 : appendPQExpBufferStr(q, ") INCLUDE (");
18861 :
18862 2936 : for (k = indxinfo->indnkeyattrs; k < indxinfo->indnattrs; k++)
18863 : {
18864 80 : int indkey = (int) indxinfo->indkeys[k];
18865 : const char *attname;
18866 :
18867 80 : if (indkey == InvalidAttrNumber)
18868 0 : break;
18869 80 : attname = getAttrName(indkey, tbinfo);
18870 :
18871 160 : appendPQExpBuffer(q, "%s%s",
18872 80 : (k == indxinfo->indnkeyattrs) ? "" : ", ",
18873 : fmtId(attname));
18874 : }
18875 :
18876 2856 : appendPQExpBufferChar(q, ')');
18877 :
18878 2856 : if (nonemptyReloptions(indxinfo->indreloptions))
18879 : {
18880 0 : appendPQExpBufferStr(q, " WITH (");
18881 0 : appendReloptionsArrayAH(q, indxinfo->indreloptions, "", fout);
18882 0 : appendPQExpBufferChar(q, ')');
18883 : }
18884 :
18885 2856 : if (coninfo->condeferrable)
18886 : {
18887 50 : appendPQExpBufferStr(q, " DEFERRABLE");
18888 50 : if (coninfo->condeferred)
18889 30 : appendPQExpBufferStr(q, " INITIALLY DEFERRED");
18890 : }
18891 :
18892 2856 : appendPQExpBufferStr(q, ";\n");
18893 : }
18894 :
18895 : /*
18896 : * Append ALTER TABLE commands as needed to set properties that we
18897 : * only have ALTER TABLE syntax for. Keep this in sync with the
18898 : * similar code in dumpIndex!
18899 : */
18900 :
18901 : /* If the index is clustered, we need to record that. */
18902 2876 : if (indxinfo->indisclustered)
18903 : {
18904 64 : appendPQExpBuffer(q, "\nALTER TABLE %s CLUSTER",
18905 64 : fmtQualifiedDumpable(tbinfo));
18906 : /* index name is not qualified in this syntax */
18907 64 : appendPQExpBuffer(q, " ON %s;\n",
18908 64 : fmtId(indxinfo->dobj.name));
18909 : }
18910 :
18911 : /* If the index defines identity, we need to record that. */
18912 2876 : if (indxinfo->indisreplident)
18913 : {
18914 0 : appendPQExpBuffer(q, "\nALTER TABLE ONLY %s REPLICA IDENTITY USING",
18915 0 : fmtQualifiedDumpable(tbinfo));
18916 : /* index name is not qualified in this syntax */
18917 0 : appendPQExpBuffer(q, " INDEX %s;\n",
18918 0 : fmtId(indxinfo->dobj.name));
18919 : }
18920 :
18921 : /* Indexes can depend on extensions */
18922 2876 : append_depends_on_extension(fout, q, &indxinfo->dobj,
18923 : "pg_catalog.pg_class", "INDEX",
18924 2876 : fmtQualifiedDumpable(indxinfo));
18925 :
18926 2876 : appendPQExpBuffer(delq, "ALTER %sTABLE ONLY %s ", foreign,
18927 2876 : fmtQualifiedDumpable(tbinfo));
18928 2876 : appendPQExpBuffer(delq, "DROP CONSTRAINT %s;\n",
18929 2876 : fmtId(coninfo->dobj.name));
18930 :
18931 2876 : tag = psprintf("%s %s", tbinfo->dobj.name, coninfo->dobj.name);
18932 :
18933 2876 : if (coninfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
18934 2876 : ArchiveEntry(fout, coninfo->dobj.catId, coninfo->dobj.dumpId,
18935 2876 : ARCHIVE_OPTS(.tag = tag,
18936 : .namespace = tbinfo->dobj.namespace->dobj.name,
18937 : .tablespace = indxinfo->tablespace,
18938 : .owner = tbinfo->rolname,
18939 : .description = "CONSTRAINT",
18940 : .section = SECTION_POST_DATA,
18941 : .createStmt = q->data,
18942 : .dropStmt = delq->data));
18943 : }
18944 1906 : else if (coninfo->contype == 'f')
18945 : {
18946 : char *only;
18947 :
18948 : /*
18949 : * Foreign keys on partitioned tables are always declared as
18950 : * inheriting to partitions; for all other cases, emit them as
18951 : * applying ONLY directly to the named table, because that's how they
18952 : * work for regular inherited tables.
18953 : */
18954 318 : only = tbinfo->relkind == RELKIND_PARTITIONED_TABLE ? "" : "ONLY ";
18955 :
18956 : /*
18957 : * XXX Potentially wrap in a 'SET CONSTRAINTS OFF' block so that the
18958 : * current table data is not processed
18959 : */
18960 318 : appendPQExpBuffer(q, "ALTER %sTABLE %s%s\n", foreign,
18961 318 : only, fmtQualifiedDumpable(tbinfo));
18962 318 : appendPQExpBuffer(q, " ADD CONSTRAINT %s %s;\n",
18963 318 : fmtId(coninfo->dobj.name),
18964 318 : coninfo->condef);
18965 :
18966 318 : appendPQExpBuffer(delq, "ALTER %sTABLE %s%s ", foreign,
18967 318 : only, fmtQualifiedDumpable(tbinfo));
18968 318 : appendPQExpBuffer(delq, "DROP CONSTRAINT %s;\n",
18969 318 : fmtId(coninfo->dobj.name));
18970 :
18971 318 : tag = psprintf("%s %s", tbinfo->dobj.name, coninfo->dobj.name);
18972 :
18973 318 : if (coninfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
18974 318 : ArchiveEntry(fout, coninfo->dobj.catId, coninfo->dobj.dumpId,
18975 318 : ARCHIVE_OPTS(.tag = tag,
18976 : .namespace = tbinfo->dobj.namespace->dobj.name,
18977 : .owner = tbinfo->rolname,
18978 : .description = "FK CONSTRAINT",
18979 : .section = SECTION_POST_DATA,
18980 : .createStmt = q->data,
18981 : .dropStmt = delq->data));
18982 : }
18983 1588 : else if ((coninfo->contype == 'c' || coninfo->contype == 'n') && tbinfo)
18984 : {
18985 : /* CHECK or invalid not-null constraint on a table */
18986 :
18987 : /* Ignore if not to be dumped separately, or if it was inherited */
18988 1280 : if (coninfo->separate && coninfo->conislocal)
18989 : {
18990 : const char *keyword;
18991 :
18992 214 : if (coninfo->contype == 'c')
18993 90 : keyword = "CHECK CONSTRAINT";
18994 : else
18995 124 : keyword = "CONSTRAINT";
18996 :
18997 : /* not ONLY since we want it to propagate to children */
18998 214 : appendPQExpBuffer(q, "ALTER %sTABLE %s\n", foreign,
18999 214 : fmtQualifiedDumpable(tbinfo));
19000 214 : appendPQExpBuffer(q, " ADD CONSTRAINT %s %s;\n",
19001 214 : fmtId(coninfo->dobj.name),
19002 214 : coninfo->condef);
19003 :
19004 214 : appendPQExpBuffer(delq, "ALTER %sTABLE %s ", foreign,
19005 214 : fmtQualifiedDumpable(tbinfo));
19006 214 : appendPQExpBuffer(delq, "DROP CONSTRAINT %s;\n",
19007 214 : fmtId(coninfo->dobj.name));
19008 :
19009 214 : tag = psprintf("%s %s", tbinfo->dobj.name, coninfo->dobj.name);
19010 :
19011 214 : if (coninfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
19012 214 : ArchiveEntry(fout, coninfo->dobj.catId, coninfo->dobj.dumpId,
19013 214 : ARCHIVE_OPTS(.tag = tag,
19014 : .namespace = tbinfo->dobj.namespace->dobj.name,
19015 : .owner = tbinfo->rolname,
19016 : .description = keyword,
19017 : .section = SECTION_POST_DATA,
19018 : .createStmt = q->data,
19019 : .dropStmt = delq->data));
19020 : }
19021 : }
19022 308 : else if (tbinfo == NULL)
19023 : {
19024 : /* CHECK, NOT NULL constraint on a domain */
19025 308 : TypeInfo *tyinfo = coninfo->condomain;
19026 :
19027 : Assert(coninfo->contype == 'c' || coninfo->contype == 'n');
19028 :
19029 : /* Ignore if not to be dumped separately */
19030 308 : if (coninfo->separate)
19031 : {
19032 : const char *keyword;
19033 :
19034 10 : if (coninfo->contype == 'c')
19035 10 : keyword = "CHECK CONSTRAINT";
19036 : else
19037 0 : keyword = "CONSTRAINT";
19038 :
19039 10 : appendPQExpBuffer(q, "ALTER DOMAIN %s\n",
19040 10 : fmtQualifiedDumpable(tyinfo));
19041 10 : appendPQExpBuffer(q, " ADD CONSTRAINT %s %s;\n",
19042 10 : fmtId(coninfo->dobj.name),
19043 10 : coninfo->condef);
19044 :
19045 10 : appendPQExpBuffer(delq, "ALTER DOMAIN %s ",
19046 10 : fmtQualifiedDumpable(tyinfo));
19047 10 : appendPQExpBuffer(delq, "DROP CONSTRAINT %s;\n",
19048 10 : fmtId(coninfo->dobj.name));
19049 :
19050 10 : tag = psprintf("%s %s", tyinfo->dobj.name, coninfo->dobj.name);
19051 :
19052 10 : if (coninfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
19053 10 : ArchiveEntry(fout, coninfo->dobj.catId, coninfo->dobj.dumpId,
19054 10 : ARCHIVE_OPTS(.tag = tag,
19055 : .namespace = tyinfo->dobj.namespace->dobj.name,
19056 : .owner = tyinfo->rolname,
19057 : .description = keyword,
19058 : .section = SECTION_POST_DATA,
19059 : .createStmt = q->data,
19060 : .dropStmt = delq->data));
19061 :
19062 10 : if (coninfo->dobj.dump & DUMP_COMPONENT_COMMENT)
19063 : {
19064 10 : PQExpBuffer conprefix = createPQExpBuffer();
19065 10 : char *qtypname = pg_strdup(fmtId(tyinfo->dobj.name));
19066 :
19067 10 : appendPQExpBuffer(conprefix, "CONSTRAINT %s ON DOMAIN",
19068 10 : fmtId(coninfo->dobj.name));
19069 :
19070 10 : dumpComment(fout, conprefix->data, qtypname,
19071 10 : tyinfo->dobj.namespace->dobj.name,
19072 : tyinfo->rolname,
19073 10 : coninfo->dobj.catId, 0, coninfo->dobj.dumpId);
19074 10 : destroyPQExpBuffer(conprefix);
19075 10 : free(qtypname);
19076 : }
19077 : }
19078 : }
19079 : else
19080 : {
19081 0 : pg_fatal("unrecognized constraint type: %c",
19082 : coninfo->contype);
19083 : }
19084 :
19085 : /* Dump Constraint Comments --- only works for table constraints */
19086 4782 : if (tbinfo && coninfo->separate &&
19087 3468 : coninfo->dobj.dump & DUMP_COMPONENT_COMMENT)
19088 94 : dumpTableConstraintComment(fout, coninfo);
19089 :
19090 4782 : free(tag);
19091 4782 : destroyPQExpBuffer(q);
19092 4782 : destroyPQExpBuffer(delq);
19093 : }
19094 :
19095 : /*
19096 : * dumpTableConstraintComment --- dump a constraint's comment if any
19097 : *
19098 : * This is split out because we need the function in two different places
19099 : * depending on whether the constraint is dumped as part of CREATE TABLE
19100 : * or as a separate ALTER command.
19101 : */
19102 : static void
19103 168 : dumpTableConstraintComment(Archive *fout, const ConstraintInfo *coninfo)
19104 : {
19105 168 : TableInfo *tbinfo = coninfo->contable;
19106 168 : PQExpBuffer conprefix = createPQExpBuffer();
19107 : char *qtabname;
19108 :
19109 168 : qtabname = pg_strdup(fmtId(tbinfo->dobj.name));
19110 :
19111 168 : appendPQExpBuffer(conprefix, "CONSTRAINT %s ON",
19112 168 : fmtId(coninfo->dobj.name));
19113 :
19114 168 : if (coninfo->dobj.dump & DUMP_COMPONENT_COMMENT)
19115 168 : dumpComment(fout, conprefix->data, qtabname,
19116 168 : tbinfo->dobj.namespace->dobj.name,
19117 : tbinfo->rolname,
19118 : coninfo->dobj.catId, 0,
19119 168 : coninfo->separate ? coninfo->dobj.dumpId : tbinfo->dobj.dumpId);
19120 :
19121 168 : destroyPQExpBuffer(conprefix);
19122 168 : free(qtabname);
19123 168 : }
19124 :
19125 : static inline SeqType
19126 1276 : parse_sequence_type(const char *name)
19127 : {
19128 2850 : for (int i = 0; i < lengthof(SeqTypeNames); i++)
19129 : {
19130 2850 : if (strcmp(SeqTypeNames[i], name) == 0)
19131 1276 : return (SeqType) i;
19132 : }
19133 :
19134 0 : pg_fatal("unrecognized sequence type: %s", name);
19135 : return (SeqType) 0; /* keep compiler quiet */
19136 : }
19137 :
19138 : /*
19139 : * bsearch() comparator for SequenceItem
19140 : */
19141 : static int
19142 5906 : SequenceItemCmp(const void *p1, const void *p2)
19143 : {
19144 5906 : SequenceItem v1 = *((const SequenceItem *) p1);
19145 5906 : SequenceItem v2 = *((const SequenceItem *) p2);
19146 :
19147 5906 : return pg_cmp_u32(v1.oid, v2.oid);
19148 : }
19149 :
19150 : /*
19151 : * collectSequences
19152 : *
19153 : * Construct a table of sequence information. This table is sorted by OID for
19154 : * speed in lookup.
19155 : */
19156 : static void
19157 376 : collectSequences(Archive *fout)
19158 : {
19159 : PGresult *res;
19160 : const char *query;
19161 :
19162 : /*
19163 : * Before Postgres 10, sequence metadata is in the sequence itself. With
19164 : * some extra effort, we might be able to use the sorted table for those
19165 : * versions, but for now it seems unlikely to be worth it.
19166 : *
19167 : * Since version 18, we can gather the sequence data in this query with
19168 : * pg_get_sequence_data(), but we only do so for non-schema-only dumps.
19169 : */
19170 376 : if (fout->remoteVersion < 100000)
19171 0 : return;
19172 376 : else if (fout->remoteVersion < 180000 ||
19173 376 : (!fout->dopt->dumpData && !fout->dopt->sequence_data))
19174 16 : query = "SELECT seqrelid, format_type(seqtypid, NULL), "
19175 : "seqstart, seqincrement, "
19176 : "seqmax, seqmin, "
19177 : "seqcache, seqcycle, "
19178 : "NULL, 'f' "
19179 : "FROM pg_catalog.pg_sequence "
19180 : "ORDER BY seqrelid";
19181 : else
19182 360 : query = "SELECT seqrelid, format_type(seqtypid, NULL), "
19183 : "seqstart, seqincrement, "
19184 : "seqmax, seqmin, "
19185 : "seqcache, seqcycle, "
19186 : "last_value, is_called "
19187 : "FROM pg_catalog.pg_sequence, "
19188 : "pg_get_sequence_data(seqrelid) "
19189 : "ORDER BY seqrelid;";
19190 :
19191 376 : res = ExecuteSqlQuery(fout, query, PGRES_TUPLES_OK);
19192 :
19193 376 : nsequences = PQntuples(res);
19194 376 : sequences = (SequenceItem *) pg_malloc(nsequences * sizeof(SequenceItem));
19195 :
19196 1652 : for (int i = 0; i < nsequences; i++)
19197 : {
19198 1276 : sequences[i].oid = atooid(PQgetvalue(res, i, 0));
19199 1276 : sequences[i].seqtype = parse_sequence_type(PQgetvalue(res, i, 1));
19200 1276 : sequences[i].startv = strtoi64(PQgetvalue(res, i, 2), NULL, 10);
19201 1276 : sequences[i].incby = strtoi64(PQgetvalue(res, i, 3), NULL, 10);
19202 1276 : sequences[i].maxv = strtoi64(PQgetvalue(res, i, 4), NULL, 10);
19203 1276 : sequences[i].minv = strtoi64(PQgetvalue(res, i, 5), NULL, 10);
19204 1276 : sequences[i].cache = strtoi64(PQgetvalue(res, i, 6), NULL, 10);
19205 1276 : sequences[i].cycled = (strcmp(PQgetvalue(res, i, 7), "t") == 0);
19206 1276 : sequences[i].last_value = strtoi64(PQgetvalue(res, i, 8), NULL, 10);
19207 1276 : sequences[i].is_called = (strcmp(PQgetvalue(res, i, 9), "t") == 0);
19208 1276 : sequences[i].null_seqtuple = (PQgetisnull(res, i, 8) || PQgetisnull(res, i, 9));
19209 : }
19210 :
19211 376 : PQclear(res);
19212 : }
19213 :
19214 : /*
19215 : * dumpSequence
19216 : * write the declaration (not data) of one user-defined sequence
19217 : */
19218 : static void
19219 750 : dumpSequence(Archive *fout, const TableInfo *tbinfo)
19220 : {
19221 750 : DumpOptions *dopt = fout->dopt;
19222 : SequenceItem *seq;
19223 : bool is_ascending;
19224 : int64 default_minv,
19225 : default_maxv;
19226 750 : PQExpBuffer query = createPQExpBuffer();
19227 750 : PQExpBuffer delqry = createPQExpBuffer();
19228 : char *qseqname;
19229 750 : TableInfo *owning_tab = NULL;
19230 :
19231 750 : qseqname = pg_strdup(fmtId(tbinfo->dobj.name));
19232 :
19233 : /*
19234 : * For versions >= 10, the sequence information is gathered in a sorted
19235 : * table before any calls to dumpSequence(). See collectSequences() for
19236 : * more information.
19237 : */
19238 750 : if (fout->remoteVersion >= 100000)
19239 : {
19240 750 : SequenceItem key = {0};
19241 :
19242 : Assert(sequences);
19243 :
19244 750 : key.oid = tbinfo->dobj.catId.oid;
19245 750 : seq = bsearch(&key, sequences, nsequences,
19246 : sizeof(SequenceItem), SequenceItemCmp);
19247 : }
19248 : else
19249 : {
19250 : PGresult *res;
19251 :
19252 : /*
19253 : * Before PostgreSQL 10, sequence metadata is in the sequence itself.
19254 : *
19255 : * Note: it might seem that 'bigint' potentially needs to be
19256 : * schema-qualified, but actually that's a keyword.
19257 : */
19258 0 : appendPQExpBuffer(query,
19259 : "SELECT 'bigint' AS sequence_type, "
19260 : "start_value, increment_by, max_value, min_value, "
19261 : "cache_value, is_cycled FROM %s",
19262 0 : fmtQualifiedDumpable(tbinfo));
19263 :
19264 0 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
19265 :
19266 0 : if (PQntuples(res) != 1)
19267 0 : pg_fatal(ngettext("query to get data of sequence \"%s\" returned %d row (expected 1)",
19268 : "query to get data of sequence \"%s\" returned %d rows (expected 1)",
19269 : PQntuples(res)),
19270 : tbinfo->dobj.name, PQntuples(res));
19271 :
19272 0 : seq = pg_malloc0(sizeof(SequenceItem));
19273 0 : seq->seqtype = parse_sequence_type(PQgetvalue(res, 0, 0));
19274 0 : seq->startv = strtoi64(PQgetvalue(res, 0, 1), NULL, 10);
19275 0 : seq->incby = strtoi64(PQgetvalue(res, 0, 2), NULL, 10);
19276 0 : seq->maxv = strtoi64(PQgetvalue(res, 0, 3), NULL, 10);
19277 0 : seq->minv = strtoi64(PQgetvalue(res, 0, 4), NULL, 10);
19278 0 : seq->cache = strtoi64(PQgetvalue(res, 0, 5), NULL, 10);
19279 0 : seq->cycled = (strcmp(PQgetvalue(res, 0, 6), "t") == 0);
19280 :
19281 0 : PQclear(res);
19282 : }
19283 :
19284 : /* Calculate default limits for a sequence of this type */
19285 750 : is_ascending = (seq->incby >= 0);
19286 750 : if (seq->seqtype == SEQTYPE_SMALLINT)
19287 : {
19288 50 : default_minv = is_ascending ? 1 : PG_INT16_MIN;
19289 50 : default_maxv = is_ascending ? PG_INT16_MAX : -1;
19290 : }
19291 700 : else if (seq->seqtype == SEQTYPE_INTEGER)
19292 : {
19293 568 : default_minv = is_ascending ? 1 : PG_INT32_MIN;
19294 568 : default_maxv = is_ascending ? PG_INT32_MAX : -1;
19295 : }
19296 132 : else if (seq->seqtype == SEQTYPE_BIGINT)
19297 : {
19298 132 : default_minv = is_ascending ? 1 : PG_INT64_MIN;
19299 132 : default_maxv = is_ascending ? PG_INT64_MAX : -1;
19300 : }
19301 : else
19302 : {
19303 0 : pg_fatal("unrecognized sequence type: %d", seq->seqtype);
19304 : default_minv = default_maxv = 0; /* keep compiler quiet */
19305 : }
19306 :
19307 : /*
19308 : * Identity sequences are not to be dropped separately.
19309 : */
19310 750 : if (!tbinfo->is_identity_sequence)
19311 : {
19312 466 : appendPQExpBuffer(delqry, "DROP SEQUENCE %s;\n",
19313 466 : fmtQualifiedDumpable(tbinfo));
19314 : }
19315 :
19316 750 : resetPQExpBuffer(query);
19317 :
19318 750 : if (dopt->binary_upgrade)
19319 : {
19320 132 : binary_upgrade_set_pg_class_oids(fout, query,
19321 132 : tbinfo->dobj.catId.oid);
19322 :
19323 : /*
19324 : * In older PG versions a sequence will have a pg_type entry, but v14
19325 : * and up don't use that, so don't attempt to preserve the type OID.
19326 : */
19327 : }
19328 :
19329 750 : if (tbinfo->is_identity_sequence)
19330 : {
19331 284 : owning_tab = findTableByOid(tbinfo->owning_tab);
19332 :
19333 284 : appendPQExpBuffer(query,
19334 : "ALTER TABLE %s ",
19335 284 : fmtQualifiedDumpable(owning_tab));
19336 284 : appendPQExpBuffer(query,
19337 : "ALTER COLUMN %s ADD GENERATED ",
19338 284 : fmtId(owning_tab->attnames[tbinfo->owning_col - 1]));
19339 284 : if (owning_tab->attidentity[tbinfo->owning_col - 1] == ATTRIBUTE_IDENTITY_ALWAYS)
19340 204 : appendPQExpBufferStr(query, "ALWAYS");
19341 80 : else if (owning_tab->attidentity[tbinfo->owning_col - 1] == ATTRIBUTE_IDENTITY_BY_DEFAULT)
19342 80 : appendPQExpBufferStr(query, "BY DEFAULT");
19343 284 : appendPQExpBuffer(query, " AS IDENTITY (\n SEQUENCE NAME %s\n",
19344 284 : fmtQualifiedDumpable(tbinfo));
19345 :
19346 : /*
19347 : * Emit persistence option only if it's different from the owning
19348 : * table's. This avoids using this new syntax unnecessarily.
19349 : */
19350 284 : if (tbinfo->relpersistence != owning_tab->relpersistence)
19351 20 : appendPQExpBuffer(query, " %s\n",
19352 20 : tbinfo->relpersistence == RELPERSISTENCE_UNLOGGED ?
19353 : "UNLOGGED" : "LOGGED");
19354 : }
19355 : else
19356 : {
19357 466 : appendPQExpBuffer(query,
19358 : "CREATE %sSEQUENCE %s\n",
19359 466 : tbinfo->relpersistence == RELPERSISTENCE_UNLOGGED ?
19360 : "UNLOGGED " : "",
19361 466 : fmtQualifiedDumpable(tbinfo));
19362 :
19363 466 : if (seq->seqtype != SEQTYPE_BIGINT)
19364 364 : appendPQExpBuffer(query, " AS %s\n", SeqTypeNames[seq->seqtype]);
19365 : }
19366 :
19367 750 : appendPQExpBuffer(query, " START WITH " INT64_FORMAT "\n", seq->startv);
19368 :
19369 750 : appendPQExpBuffer(query, " INCREMENT BY " INT64_FORMAT "\n", seq->incby);
19370 :
19371 750 : if (seq->minv != default_minv)
19372 30 : appendPQExpBuffer(query, " MINVALUE " INT64_FORMAT "\n", seq->minv);
19373 : else
19374 720 : appendPQExpBufferStr(query, " NO MINVALUE\n");
19375 :
19376 750 : if (seq->maxv != default_maxv)
19377 30 : appendPQExpBuffer(query, " MAXVALUE " INT64_FORMAT "\n", seq->maxv);
19378 : else
19379 720 : appendPQExpBufferStr(query, " NO MAXVALUE\n");
19380 :
19381 750 : appendPQExpBuffer(query,
19382 : " CACHE " INT64_FORMAT "%s",
19383 750 : seq->cache, (seq->cycled ? "\n CYCLE" : ""));
19384 :
19385 750 : if (tbinfo->is_identity_sequence)
19386 284 : appendPQExpBufferStr(query, "\n);\n");
19387 : else
19388 466 : appendPQExpBufferStr(query, ";\n");
19389 :
19390 : /* binary_upgrade: no need to clear TOAST table oid */
19391 :
19392 750 : if (dopt->binary_upgrade)
19393 132 : binary_upgrade_extension_member(query, &tbinfo->dobj,
19394 : "SEQUENCE", qseqname,
19395 132 : tbinfo->dobj.namespace->dobj.name);
19396 :
19397 750 : if (tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
19398 750 : ArchiveEntry(fout, tbinfo->dobj.catId, tbinfo->dobj.dumpId,
19399 750 : ARCHIVE_OPTS(.tag = tbinfo->dobj.name,
19400 : .namespace = tbinfo->dobj.namespace->dobj.name,
19401 : .owner = tbinfo->rolname,
19402 : .description = "SEQUENCE",
19403 : .section = SECTION_PRE_DATA,
19404 : .createStmt = query->data,
19405 : .dropStmt = delqry->data));
19406 :
19407 : /*
19408 : * If the sequence is owned by a table column, emit the ALTER for it as a
19409 : * separate TOC entry immediately following the sequence's own entry. It's
19410 : * OK to do this rather than using full sorting logic, because the
19411 : * dependency that tells us it's owned will have forced the table to be
19412 : * created first. We can't just include the ALTER in the TOC entry
19413 : * because it will fail if we haven't reassigned the sequence owner to
19414 : * match the table's owner.
19415 : *
19416 : * We need not schema-qualify the table reference because both sequence
19417 : * and table must be in the same schema.
19418 : */
19419 750 : if (OidIsValid(tbinfo->owning_tab) && !tbinfo->is_identity_sequence)
19420 : {
19421 274 : owning_tab = findTableByOid(tbinfo->owning_tab);
19422 :
19423 274 : if (owning_tab == NULL)
19424 0 : pg_fatal("failed sanity check, parent table with OID %u of sequence with OID %u not found",
19425 : tbinfo->owning_tab, tbinfo->dobj.catId.oid);
19426 :
19427 274 : if (owning_tab->dobj.dump & DUMP_COMPONENT_DEFINITION)
19428 : {
19429 270 : resetPQExpBuffer(query);
19430 270 : appendPQExpBuffer(query, "ALTER SEQUENCE %s",
19431 270 : fmtQualifiedDumpable(tbinfo));
19432 270 : appendPQExpBuffer(query, " OWNED BY %s",
19433 270 : fmtQualifiedDumpable(owning_tab));
19434 270 : appendPQExpBuffer(query, ".%s;\n",
19435 270 : fmtId(owning_tab->attnames[tbinfo->owning_col - 1]));
19436 :
19437 270 : if (tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
19438 270 : ArchiveEntry(fout, nilCatalogId, createDumpId(),
19439 270 : ARCHIVE_OPTS(.tag = tbinfo->dobj.name,
19440 : .namespace = tbinfo->dobj.namespace->dobj.name,
19441 : .owner = tbinfo->rolname,
19442 : .description = "SEQUENCE OWNED BY",
19443 : .section = SECTION_PRE_DATA,
19444 : .createStmt = query->data,
19445 : .deps = &(tbinfo->dobj.dumpId),
19446 : .nDeps = 1));
19447 : }
19448 : }
19449 :
19450 : /* Dump Sequence Comments and Security Labels */
19451 750 : if (tbinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
19452 0 : dumpComment(fout, "SEQUENCE", qseqname,
19453 0 : tbinfo->dobj.namespace->dobj.name, tbinfo->rolname,
19454 0 : tbinfo->dobj.catId, 0, tbinfo->dobj.dumpId);
19455 :
19456 750 : if (tbinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
19457 0 : dumpSecLabel(fout, "SEQUENCE", qseqname,
19458 0 : tbinfo->dobj.namespace->dobj.name, tbinfo->rolname,
19459 0 : tbinfo->dobj.catId, 0, tbinfo->dobj.dumpId);
19460 :
19461 750 : if (fout->remoteVersion < 100000)
19462 0 : pg_free(seq);
19463 750 : destroyPQExpBuffer(query);
19464 750 : destroyPQExpBuffer(delqry);
19465 750 : free(qseqname);
19466 750 : }
19467 :
19468 : /*
19469 : * dumpSequenceData
19470 : * write the data of one user-defined sequence
19471 : */
19472 : static void
19473 786 : dumpSequenceData(Archive *fout, const TableDataInfo *tdinfo)
19474 : {
19475 786 : TableInfo *tbinfo = tdinfo->tdtable;
19476 : int64 last;
19477 : bool called;
19478 : PQExpBuffer query;
19479 :
19480 : /* needn't bother if not dumping sequence data */
19481 786 : if (!fout->dopt->dumpData && !fout->dopt->sequence_data)
19482 2 : return;
19483 :
19484 784 : query = createPQExpBuffer();
19485 :
19486 : /*
19487 : * For versions >= 18, the sequence information is gathered in the sorted
19488 : * array before any calls to dumpSequenceData(). See collectSequences()
19489 : * for more information.
19490 : *
19491 : * For older versions, we have to query the sequence relations
19492 : * individually.
19493 : */
19494 784 : if (fout->remoteVersion < 180000)
19495 : {
19496 : PGresult *res;
19497 :
19498 0 : appendPQExpBuffer(query,
19499 : "SELECT last_value, is_called FROM %s",
19500 0 : fmtQualifiedDumpable(tbinfo));
19501 :
19502 0 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
19503 :
19504 0 : if (PQntuples(res) != 1)
19505 0 : pg_fatal(ngettext("query to get data of sequence \"%s\" returned %d row (expected 1)",
19506 : "query to get data of sequence \"%s\" returned %d rows (expected 1)",
19507 : PQntuples(res)),
19508 : tbinfo->dobj.name, PQntuples(res));
19509 :
19510 0 : last = strtoi64(PQgetvalue(res, 0, 0), NULL, 10);
19511 0 : called = (strcmp(PQgetvalue(res, 0, 1), "t") == 0);
19512 :
19513 0 : PQclear(res);
19514 : }
19515 : else
19516 : {
19517 784 : SequenceItem key = {0};
19518 : SequenceItem *entry;
19519 :
19520 : Assert(sequences);
19521 : Assert(tbinfo->dobj.catId.oid);
19522 :
19523 784 : key.oid = tbinfo->dobj.catId.oid;
19524 784 : entry = bsearch(&key, sequences, nsequences,
19525 : sizeof(SequenceItem), SequenceItemCmp);
19526 :
19527 784 : if (entry->null_seqtuple)
19528 0 : pg_fatal("failed to get data for sequence \"%s\"; user may lack "
19529 : "SELECT privilege on the sequence or the sequence may "
19530 : "have been concurrently dropped",
19531 : tbinfo->dobj.name);
19532 :
19533 784 : last = entry->last_value;
19534 784 : called = entry->is_called;
19535 : }
19536 :
19537 784 : resetPQExpBuffer(query);
19538 784 : appendPQExpBufferStr(query, "SELECT pg_catalog.setval(");
19539 784 : appendStringLiteralAH(query, fmtQualifiedDumpable(tbinfo), fout);
19540 784 : appendPQExpBuffer(query, ", " INT64_FORMAT ", %s);\n",
19541 : last, (called ? "true" : "false"));
19542 :
19543 784 : if (tdinfo->dobj.dump & DUMP_COMPONENT_DATA)
19544 784 : ArchiveEntry(fout, nilCatalogId, createDumpId(),
19545 784 : ARCHIVE_OPTS(.tag = tbinfo->dobj.name,
19546 : .namespace = tbinfo->dobj.namespace->dobj.name,
19547 : .owner = tbinfo->rolname,
19548 : .description = "SEQUENCE SET",
19549 : .section = SECTION_DATA,
19550 : .createStmt = query->data,
19551 : .deps = &(tbinfo->dobj.dumpId),
19552 : .nDeps = 1));
19553 :
19554 784 : destroyPQExpBuffer(query);
19555 : }
19556 :
19557 : /*
19558 : * dumpTrigger
19559 : * write the declaration of one user-defined table trigger
19560 : */
19561 : static void
19562 1046 : dumpTrigger(Archive *fout, const TriggerInfo *tginfo)
19563 : {
19564 1046 : DumpOptions *dopt = fout->dopt;
19565 1046 : TableInfo *tbinfo = tginfo->tgtable;
19566 : PQExpBuffer query;
19567 : PQExpBuffer delqry;
19568 : PQExpBuffer trigprefix;
19569 : PQExpBuffer trigidentity;
19570 : char *qtabname;
19571 : char *tag;
19572 :
19573 : /* Do nothing if not dumping schema */
19574 1046 : if (!dopt->dumpSchema)
19575 62 : return;
19576 :
19577 984 : query = createPQExpBuffer();
19578 984 : delqry = createPQExpBuffer();
19579 984 : trigprefix = createPQExpBuffer();
19580 984 : trigidentity = createPQExpBuffer();
19581 :
19582 984 : qtabname = pg_strdup(fmtId(tbinfo->dobj.name));
19583 :
19584 984 : appendPQExpBuffer(trigidentity, "%s ", fmtId(tginfo->dobj.name));
19585 984 : appendPQExpBuffer(trigidentity, "ON %s", fmtQualifiedDumpable(tbinfo));
19586 :
19587 984 : appendPQExpBuffer(query, "%s;\n", tginfo->tgdef);
19588 984 : appendPQExpBuffer(delqry, "DROP TRIGGER %s;\n", trigidentity->data);
19589 :
19590 : /* Triggers can depend on extensions */
19591 984 : append_depends_on_extension(fout, query, &tginfo->dobj,
19592 : "pg_catalog.pg_trigger", "TRIGGER",
19593 984 : trigidentity->data);
19594 :
19595 984 : if (tginfo->tgispartition)
19596 : {
19597 : Assert(tbinfo->ispartition);
19598 :
19599 : /*
19600 : * Partition triggers only appear here because their 'tgenabled' flag
19601 : * differs from its parent's. The trigger is created already, so
19602 : * remove the CREATE and replace it with an ALTER. (Clear out the
19603 : * DROP query too, so that pg_dump --create does not cause errors.)
19604 : */
19605 218 : resetPQExpBuffer(query);
19606 218 : resetPQExpBuffer(delqry);
19607 218 : appendPQExpBuffer(query, "\nALTER %sTABLE %s ",
19608 218 : tbinfo->relkind == RELKIND_FOREIGN_TABLE ? "FOREIGN " : "",
19609 218 : fmtQualifiedDumpable(tbinfo));
19610 218 : switch (tginfo->tgenabled)
19611 : {
19612 76 : case 'f':
19613 : case 'D':
19614 76 : appendPQExpBufferStr(query, "DISABLE");
19615 76 : break;
19616 0 : case 't':
19617 : case 'O':
19618 0 : appendPQExpBufferStr(query, "ENABLE");
19619 0 : break;
19620 66 : case 'R':
19621 66 : appendPQExpBufferStr(query, "ENABLE REPLICA");
19622 66 : break;
19623 76 : case 'A':
19624 76 : appendPQExpBufferStr(query, "ENABLE ALWAYS");
19625 76 : break;
19626 : }
19627 218 : appendPQExpBuffer(query, " TRIGGER %s;\n",
19628 218 : fmtId(tginfo->dobj.name));
19629 : }
19630 766 : else if (tginfo->tgenabled != 't' && tginfo->tgenabled != 'O')
19631 : {
19632 0 : appendPQExpBuffer(query, "\nALTER %sTABLE %s ",
19633 0 : tbinfo->relkind == RELKIND_FOREIGN_TABLE ? "FOREIGN " : "",
19634 0 : fmtQualifiedDumpable(tbinfo));
19635 0 : switch (tginfo->tgenabled)
19636 : {
19637 0 : case 'D':
19638 : case 'f':
19639 0 : appendPQExpBufferStr(query, "DISABLE");
19640 0 : break;
19641 0 : case 'A':
19642 0 : appendPQExpBufferStr(query, "ENABLE ALWAYS");
19643 0 : break;
19644 0 : case 'R':
19645 0 : appendPQExpBufferStr(query, "ENABLE REPLICA");
19646 0 : break;
19647 0 : default:
19648 0 : appendPQExpBufferStr(query, "ENABLE");
19649 0 : break;
19650 : }
19651 0 : appendPQExpBuffer(query, " TRIGGER %s;\n",
19652 0 : fmtId(tginfo->dobj.name));
19653 : }
19654 :
19655 984 : appendPQExpBuffer(trigprefix, "TRIGGER %s ON",
19656 984 : fmtId(tginfo->dobj.name));
19657 :
19658 984 : tag = psprintf("%s %s", tbinfo->dobj.name, tginfo->dobj.name);
19659 :
19660 984 : if (tginfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
19661 984 : ArchiveEntry(fout, tginfo->dobj.catId, tginfo->dobj.dumpId,
19662 984 : ARCHIVE_OPTS(.tag = tag,
19663 : .namespace = tbinfo->dobj.namespace->dobj.name,
19664 : .owner = tbinfo->rolname,
19665 : .description = "TRIGGER",
19666 : .section = SECTION_POST_DATA,
19667 : .createStmt = query->data,
19668 : .dropStmt = delqry->data));
19669 :
19670 984 : if (tginfo->dobj.dump & DUMP_COMPONENT_COMMENT)
19671 0 : dumpComment(fout, trigprefix->data, qtabname,
19672 0 : tbinfo->dobj.namespace->dobj.name, tbinfo->rolname,
19673 0 : tginfo->dobj.catId, 0, tginfo->dobj.dumpId);
19674 :
19675 984 : free(tag);
19676 984 : destroyPQExpBuffer(query);
19677 984 : destroyPQExpBuffer(delqry);
19678 984 : destroyPQExpBuffer(trigprefix);
19679 984 : destroyPQExpBuffer(trigidentity);
19680 984 : free(qtabname);
19681 : }
19682 :
19683 : /*
19684 : * dumpEventTrigger
19685 : * write the declaration of one user-defined event trigger
19686 : */
19687 : static void
19688 84 : dumpEventTrigger(Archive *fout, const EventTriggerInfo *evtinfo)
19689 : {
19690 84 : DumpOptions *dopt = fout->dopt;
19691 : PQExpBuffer query;
19692 : PQExpBuffer delqry;
19693 : char *qevtname;
19694 :
19695 : /* Do nothing if not dumping schema */
19696 84 : if (!dopt->dumpSchema)
19697 12 : return;
19698 :
19699 72 : query = createPQExpBuffer();
19700 72 : delqry = createPQExpBuffer();
19701 :
19702 72 : qevtname = pg_strdup(fmtId(evtinfo->dobj.name));
19703 :
19704 72 : appendPQExpBufferStr(query, "CREATE EVENT TRIGGER ");
19705 72 : appendPQExpBufferStr(query, qevtname);
19706 72 : appendPQExpBufferStr(query, " ON ");
19707 72 : appendPQExpBufferStr(query, fmtId(evtinfo->evtevent));
19708 :
19709 72 : if (strcmp("", evtinfo->evttags) != 0)
19710 : {
19711 10 : appendPQExpBufferStr(query, "\n WHEN TAG IN (");
19712 10 : appendPQExpBufferStr(query, evtinfo->evttags);
19713 10 : appendPQExpBufferChar(query, ')');
19714 : }
19715 :
19716 72 : appendPQExpBufferStr(query, "\n EXECUTE FUNCTION ");
19717 72 : appendPQExpBufferStr(query, evtinfo->evtfname);
19718 72 : appendPQExpBufferStr(query, "();\n");
19719 :
19720 72 : if (evtinfo->evtenabled != 'O')
19721 : {
19722 0 : appendPQExpBuffer(query, "\nALTER EVENT TRIGGER %s ",
19723 : qevtname);
19724 0 : switch (evtinfo->evtenabled)
19725 : {
19726 0 : case 'D':
19727 0 : appendPQExpBufferStr(query, "DISABLE");
19728 0 : break;
19729 0 : case 'A':
19730 0 : appendPQExpBufferStr(query, "ENABLE ALWAYS");
19731 0 : break;
19732 0 : case 'R':
19733 0 : appendPQExpBufferStr(query, "ENABLE REPLICA");
19734 0 : break;
19735 0 : default:
19736 0 : appendPQExpBufferStr(query, "ENABLE");
19737 0 : break;
19738 : }
19739 0 : appendPQExpBufferStr(query, ";\n");
19740 : }
19741 :
19742 72 : appendPQExpBuffer(delqry, "DROP EVENT TRIGGER %s;\n",
19743 : qevtname);
19744 :
19745 72 : if (dopt->binary_upgrade)
19746 4 : binary_upgrade_extension_member(query, &evtinfo->dobj,
19747 : "EVENT TRIGGER", qevtname, NULL);
19748 :
19749 72 : if (evtinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
19750 72 : ArchiveEntry(fout, evtinfo->dobj.catId, evtinfo->dobj.dumpId,
19751 72 : ARCHIVE_OPTS(.tag = evtinfo->dobj.name,
19752 : .owner = evtinfo->evtowner,
19753 : .description = "EVENT TRIGGER",
19754 : .section = SECTION_POST_DATA,
19755 : .createStmt = query->data,
19756 : .dropStmt = delqry->data));
19757 :
19758 72 : if (evtinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
19759 0 : dumpComment(fout, "EVENT TRIGGER", qevtname,
19760 0 : NULL, evtinfo->evtowner,
19761 0 : evtinfo->dobj.catId, 0, evtinfo->dobj.dumpId);
19762 :
19763 72 : if (evtinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
19764 0 : dumpSecLabel(fout, "EVENT TRIGGER", qevtname,
19765 0 : NULL, evtinfo->evtowner,
19766 0 : evtinfo->dobj.catId, 0, evtinfo->dobj.dumpId);
19767 :
19768 72 : destroyPQExpBuffer(query);
19769 72 : destroyPQExpBuffer(delqry);
19770 72 : free(qevtname);
19771 : }
19772 :
19773 : /*
19774 : * dumpRule
19775 : * Dump a rule
19776 : */
19777 : static void
19778 2258 : dumpRule(Archive *fout, const RuleInfo *rinfo)
19779 : {
19780 2258 : DumpOptions *dopt = fout->dopt;
19781 2258 : TableInfo *tbinfo = rinfo->ruletable;
19782 : bool is_view;
19783 : PQExpBuffer query;
19784 : PQExpBuffer cmd;
19785 : PQExpBuffer delcmd;
19786 : PQExpBuffer ruleprefix;
19787 : char *qtabname;
19788 : PGresult *res;
19789 : char *tag;
19790 :
19791 : /* Do nothing if not dumping schema */
19792 2258 : if (!dopt->dumpSchema)
19793 120 : return;
19794 :
19795 : /*
19796 : * If it is an ON SELECT rule that is created implicitly by CREATE VIEW,
19797 : * we do not want to dump it as a separate object.
19798 : */
19799 2138 : if (!rinfo->separate)
19800 1716 : return;
19801 :
19802 : /*
19803 : * If it's an ON SELECT rule, we want to print it as a view definition,
19804 : * instead of a rule.
19805 : */
19806 422 : is_view = (rinfo->ev_type == '1' && rinfo->is_instead);
19807 :
19808 422 : query = createPQExpBuffer();
19809 422 : cmd = createPQExpBuffer();
19810 422 : delcmd = createPQExpBuffer();
19811 422 : ruleprefix = createPQExpBuffer();
19812 :
19813 422 : qtabname = pg_strdup(fmtId(tbinfo->dobj.name));
19814 :
19815 422 : if (is_view)
19816 : {
19817 : PQExpBuffer result;
19818 :
19819 : /*
19820 : * We need OR REPLACE here because we'll be replacing a dummy view.
19821 : * Otherwise this should look largely like the regular view dump code.
19822 : */
19823 20 : appendPQExpBuffer(cmd, "CREATE OR REPLACE VIEW %s",
19824 20 : fmtQualifiedDumpable(tbinfo));
19825 20 : if (nonemptyReloptions(tbinfo->reloptions))
19826 : {
19827 0 : appendPQExpBufferStr(cmd, " WITH (");
19828 0 : appendReloptionsArrayAH(cmd, tbinfo->reloptions, "", fout);
19829 0 : appendPQExpBufferChar(cmd, ')');
19830 : }
19831 20 : result = createViewAsClause(fout, tbinfo);
19832 20 : appendPQExpBuffer(cmd, " AS\n%s", result->data);
19833 20 : destroyPQExpBuffer(result);
19834 20 : if (tbinfo->checkoption != NULL)
19835 0 : appendPQExpBuffer(cmd, "\n WITH %s CHECK OPTION",
19836 : tbinfo->checkoption);
19837 20 : appendPQExpBufferStr(cmd, ";\n");
19838 : }
19839 : else
19840 : {
19841 : /* In the rule case, just print pg_get_ruledef's result verbatim */
19842 402 : appendPQExpBuffer(query,
19843 : "SELECT pg_catalog.pg_get_ruledef('%u'::pg_catalog.oid)",
19844 402 : rinfo->dobj.catId.oid);
19845 :
19846 402 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
19847 :
19848 402 : if (PQntuples(res) != 1)
19849 0 : pg_fatal("query to get rule \"%s\" for table \"%s\" failed: wrong number of rows returned",
19850 : rinfo->dobj.name, tbinfo->dobj.name);
19851 :
19852 402 : printfPQExpBuffer(cmd, "%s\n", PQgetvalue(res, 0, 0));
19853 :
19854 402 : PQclear(res);
19855 : }
19856 :
19857 : /*
19858 : * Add the command to alter the rules replication firing semantics if it
19859 : * differs from the default.
19860 : */
19861 422 : if (rinfo->ev_enabled != 'O')
19862 : {
19863 30 : appendPQExpBuffer(cmd, "ALTER TABLE %s ", fmtQualifiedDumpable(tbinfo));
19864 30 : switch (rinfo->ev_enabled)
19865 : {
19866 0 : case 'A':
19867 0 : appendPQExpBuffer(cmd, "ENABLE ALWAYS RULE %s;\n",
19868 0 : fmtId(rinfo->dobj.name));
19869 0 : break;
19870 0 : case 'R':
19871 0 : appendPQExpBuffer(cmd, "ENABLE REPLICA RULE %s;\n",
19872 0 : fmtId(rinfo->dobj.name));
19873 0 : break;
19874 30 : case 'D':
19875 30 : appendPQExpBuffer(cmd, "DISABLE RULE %s;\n",
19876 30 : fmtId(rinfo->dobj.name));
19877 30 : break;
19878 : }
19879 : }
19880 :
19881 422 : if (is_view)
19882 : {
19883 : /*
19884 : * We can't DROP a view's ON SELECT rule. Instead, use CREATE OR
19885 : * REPLACE VIEW to replace the rule with something with minimal
19886 : * dependencies.
19887 : */
19888 : PQExpBuffer result;
19889 :
19890 20 : appendPQExpBuffer(delcmd, "CREATE OR REPLACE VIEW %s",
19891 20 : fmtQualifiedDumpable(tbinfo));
19892 20 : result = createDummyViewAsClause(fout, tbinfo);
19893 20 : appendPQExpBuffer(delcmd, " AS\n%s;\n", result->data);
19894 20 : destroyPQExpBuffer(result);
19895 : }
19896 : else
19897 : {
19898 402 : appendPQExpBuffer(delcmd, "DROP RULE %s ",
19899 402 : fmtId(rinfo->dobj.name));
19900 402 : appendPQExpBuffer(delcmd, "ON %s;\n",
19901 402 : fmtQualifiedDumpable(tbinfo));
19902 : }
19903 :
19904 422 : appendPQExpBuffer(ruleprefix, "RULE %s ON",
19905 422 : fmtId(rinfo->dobj.name));
19906 :
19907 422 : tag = psprintf("%s %s", tbinfo->dobj.name, rinfo->dobj.name);
19908 :
19909 422 : if (rinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
19910 422 : ArchiveEntry(fout, rinfo->dobj.catId, rinfo->dobj.dumpId,
19911 422 : ARCHIVE_OPTS(.tag = tag,
19912 : .namespace = tbinfo->dobj.namespace->dobj.name,
19913 : .owner = tbinfo->rolname,
19914 : .description = "RULE",
19915 : .section = SECTION_POST_DATA,
19916 : .createStmt = cmd->data,
19917 : .dropStmt = delcmd->data));
19918 :
19919 : /* Dump rule comments */
19920 422 : if (rinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
19921 0 : dumpComment(fout, ruleprefix->data, qtabname,
19922 0 : tbinfo->dobj.namespace->dobj.name,
19923 : tbinfo->rolname,
19924 0 : rinfo->dobj.catId, 0, rinfo->dobj.dumpId);
19925 :
19926 422 : free(tag);
19927 422 : destroyPQExpBuffer(query);
19928 422 : destroyPQExpBuffer(cmd);
19929 422 : destroyPQExpBuffer(delcmd);
19930 422 : destroyPQExpBuffer(ruleprefix);
19931 422 : free(qtabname);
19932 : }
19933 :
19934 : /*
19935 : * getExtensionMembership --- obtain extension membership data
19936 : *
19937 : * We need to identify objects that are extension members as soon as they're
19938 : * loaded, so that we can correctly determine whether they need to be dumped.
19939 : * Generally speaking, extension member objects will get marked as *not* to
19940 : * be dumped, as they will be recreated by the single CREATE EXTENSION
19941 : * command. However, in binary upgrade mode we still need to dump the members
19942 : * individually.
19943 : */
19944 : void
19945 378 : getExtensionMembership(Archive *fout, ExtensionInfo extinfo[],
19946 : int numExtensions)
19947 : {
19948 : PQExpBuffer query;
19949 : PGresult *res;
19950 : int ntups,
19951 : i;
19952 : int i_classid,
19953 : i_objid,
19954 : i_refobjid;
19955 : ExtensionInfo *ext;
19956 :
19957 : /* Nothing to do if no extensions */
19958 378 : if (numExtensions == 0)
19959 0 : return;
19960 :
19961 378 : query = createPQExpBuffer();
19962 :
19963 : /* refclassid constraint is redundant but may speed the search */
19964 378 : appendPQExpBufferStr(query, "SELECT "
19965 : "classid, objid, refobjid "
19966 : "FROM pg_depend "
19967 : "WHERE refclassid = 'pg_extension'::regclass "
19968 : "AND deptype = 'e' "
19969 : "ORDER BY 3");
19970 :
19971 378 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
19972 :
19973 378 : ntups = PQntuples(res);
19974 :
19975 378 : i_classid = PQfnumber(res, "classid");
19976 378 : i_objid = PQfnumber(res, "objid");
19977 378 : i_refobjid = PQfnumber(res, "refobjid");
19978 :
19979 : /*
19980 : * Since we ordered the SELECT by referenced ID, we can expect that
19981 : * multiple entries for the same extension will appear together; this
19982 : * saves on searches.
19983 : */
19984 378 : ext = NULL;
19985 :
19986 3100 : for (i = 0; i < ntups; i++)
19987 : {
19988 : CatalogId objId;
19989 : Oid extId;
19990 :
19991 2722 : objId.tableoid = atooid(PQgetvalue(res, i, i_classid));
19992 2722 : objId.oid = atooid(PQgetvalue(res, i, i_objid));
19993 2722 : extId = atooid(PQgetvalue(res, i, i_refobjid));
19994 :
19995 2722 : if (ext == NULL ||
19996 2344 : ext->dobj.catId.oid != extId)
19997 438 : ext = findExtensionByOid(extId);
19998 :
19999 2722 : if (ext == NULL)
20000 : {
20001 : /* shouldn't happen */
20002 0 : pg_log_warning("could not find referenced extension %u", extId);
20003 0 : continue;
20004 : }
20005 :
20006 2722 : recordExtensionMembership(objId, ext);
20007 : }
20008 :
20009 378 : PQclear(res);
20010 :
20011 378 : destroyPQExpBuffer(query);
20012 : }
20013 :
20014 : /*
20015 : * processExtensionTables --- deal with extension configuration tables
20016 : *
20017 : * There are two parts to this process:
20018 : *
20019 : * 1. Identify and create dump records for extension configuration tables.
20020 : *
20021 : * Extensions can mark tables as "configuration", which means that the user
20022 : * is able and expected to modify those tables after the extension has been
20023 : * loaded. For these tables, we dump out only the data- the structure is
20024 : * expected to be handled at CREATE EXTENSION time, including any indexes or
20025 : * foreign keys, which brings us to-
20026 : *
20027 : * 2. Record FK dependencies between configuration tables.
20028 : *
20029 : * Due to the FKs being created at CREATE EXTENSION time and therefore before
20030 : * the data is loaded, we have to work out what the best order for reloading
20031 : * the data is, to avoid FK violations when the tables are restored. This is
20032 : * not perfect- we can't handle circular dependencies and if any exist they
20033 : * will cause an invalid dump to be produced (though at least all of the data
20034 : * is included for a user to manually restore). This is currently documented
20035 : * but perhaps we can provide a better solution in the future.
20036 : */
20037 : void
20038 376 : processExtensionTables(Archive *fout, ExtensionInfo extinfo[],
20039 : int numExtensions)
20040 : {
20041 376 : DumpOptions *dopt = fout->dopt;
20042 : PQExpBuffer query;
20043 : PGresult *res;
20044 : int ntups,
20045 : i;
20046 : int i_conrelid,
20047 : i_confrelid;
20048 :
20049 : /* Nothing to do if no extensions */
20050 376 : if (numExtensions == 0)
20051 0 : return;
20052 :
20053 : /*
20054 : * Identify extension configuration tables and create TableDataInfo
20055 : * objects for them, ensuring their data will be dumped even though the
20056 : * tables themselves won't be.
20057 : *
20058 : * Note that we create TableDataInfo objects even in schema-only mode, ie,
20059 : * user data in a configuration table is treated like schema data. This
20060 : * seems appropriate since system data in a config table would get
20061 : * reloaded by CREATE EXTENSION. If the extension is not listed in the
20062 : * list of extensions to be included, none of its data is dumped.
20063 : */
20064 812 : for (i = 0; i < numExtensions; i++)
20065 : {
20066 436 : ExtensionInfo *curext = &(extinfo[i]);
20067 436 : char *extconfig = curext->extconfig;
20068 436 : char *extcondition = curext->extcondition;
20069 436 : char **extconfigarray = NULL;
20070 436 : char **extconditionarray = NULL;
20071 436 : int nconfigitems = 0;
20072 436 : int nconditionitems = 0;
20073 :
20074 : /*
20075 : * Check if this extension is listed as to include in the dump. If
20076 : * not, any table data associated with it is discarded.
20077 : */
20078 436 : if (extension_include_oids.head != NULL &&
20079 16 : !simple_oid_list_member(&extension_include_oids,
20080 : curext->dobj.catId.oid))
20081 12 : continue;
20082 :
20083 : /*
20084 : * Check if this extension is listed as to exclude in the dump. If
20085 : * yes, any table data associated with it is discarded.
20086 : */
20087 436 : if (extension_exclude_oids.head != NULL &&
20088 8 : simple_oid_list_member(&extension_exclude_oids,
20089 : curext->dobj.catId.oid))
20090 4 : continue;
20091 :
20092 424 : if (strlen(extconfig) != 0 || strlen(extcondition) != 0)
20093 : {
20094 : int j;
20095 :
20096 40 : if (!parsePGArray(extconfig, &extconfigarray, &nconfigitems))
20097 0 : pg_fatal("could not parse %s array", "extconfig");
20098 40 : if (!parsePGArray(extcondition, &extconditionarray, &nconditionitems))
20099 0 : pg_fatal("could not parse %s array", "extcondition");
20100 40 : if (nconfigitems != nconditionitems)
20101 0 : pg_fatal("mismatched number of configurations and conditions for extension");
20102 :
20103 120 : for (j = 0; j < nconfigitems; j++)
20104 : {
20105 : TableInfo *configtbl;
20106 80 : Oid configtbloid = atooid(extconfigarray[j]);
20107 80 : bool dumpobj =
20108 80 : curext->dobj.dump & DUMP_COMPONENT_DEFINITION;
20109 :
20110 80 : configtbl = findTableByOid(configtbloid);
20111 80 : if (configtbl == NULL)
20112 0 : continue;
20113 :
20114 : /*
20115 : * Tables of not-to-be-dumped extensions shouldn't be dumped
20116 : * unless the table or its schema is explicitly included
20117 : */
20118 80 : if (!(curext->dobj.dump & DUMP_COMPONENT_DEFINITION))
20119 : {
20120 : /* check table explicitly requested */
20121 4 : if (table_include_oids.head != NULL &&
20122 0 : simple_oid_list_member(&table_include_oids,
20123 : configtbloid))
20124 0 : dumpobj = true;
20125 :
20126 : /* check table's schema explicitly requested */
20127 4 : if (configtbl->dobj.namespace->dobj.dump &
20128 : DUMP_COMPONENT_DATA)
20129 4 : dumpobj = true;
20130 : }
20131 :
20132 : /* check table excluded by an exclusion switch */
20133 88 : if (table_exclude_oids.head != NULL &&
20134 8 : simple_oid_list_member(&table_exclude_oids,
20135 : configtbloid))
20136 2 : dumpobj = false;
20137 :
20138 : /* check schema excluded by an exclusion switch */
20139 80 : if (simple_oid_list_member(&schema_exclude_oids,
20140 80 : configtbl->dobj.namespace->dobj.catId.oid))
20141 0 : dumpobj = false;
20142 :
20143 80 : if (dumpobj)
20144 : {
20145 78 : makeTableDataInfo(dopt, configtbl);
20146 78 : if (configtbl->dataObj != NULL)
20147 : {
20148 78 : if (strlen(extconditionarray[j]) > 0)
20149 0 : configtbl->dataObj->filtercond = pg_strdup(extconditionarray[j]);
20150 : }
20151 : }
20152 : }
20153 : }
20154 424 : if (extconfigarray)
20155 40 : free(extconfigarray);
20156 424 : if (extconditionarray)
20157 40 : free(extconditionarray);
20158 : }
20159 :
20160 : /*
20161 : * Now that all the TableDataInfo objects have been created for all the
20162 : * extensions, check their FK dependencies and register them to try and
20163 : * dump the data out in an order that they can be restored in.
20164 : *
20165 : * Note that this is not a problem for user tables as their FKs are
20166 : * recreated after the data has been loaded.
20167 : */
20168 :
20169 376 : query = createPQExpBuffer();
20170 :
20171 376 : printfPQExpBuffer(query,
20172 : "SELECT conrelid, confrelid "
20173 : "FROM pg_constraint "
20174 : "JOIN pg_depend ON (objid = confrelid) "
20175 : "WHERE contype = 'f' "
20176 : "AND refclassid = 'pg_extension'::regclass "
20177 : "AND classid = 'pg_class'::regclass;");
20178 :
20179 376 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
20180 376 : ntups = PQntuples(res);
20181 :
20182 376 : i_conrelid = PQfnumber(res, "conrelid");
20183 376 : i_confrelid = PQfnumber(res, "confrelid");
20184 :
20185 : /* Now get the dependencies and register them */
20186 376 : for (i = 0; i < ntups; i++)
20187 : {
20188 : Oid conrelid,
20189 : confrelid;
20190 : TableInfo *reftable,
20191 : *contable;
20192 :
20193 0 : conrelid = atooid(PQgetvalue(res, i, i_conrelid));
20194 0 : confrelid = atooid(PQgetvalue(res, i, i_confrelid));
20195 0 : contable = findTableByOid(conrelid);
20196 0 : reftable = findTableByOid(confrelid);
20197 :
20198 0 : if (reftable == NULL ||
20199 0 : reftable->dataObj == NULL ||
20200 0 : contable == NULL ||
20201 0 : contable->dataObj == NULL)
20202 0 : continue;
20203 :
20204 : /*
20205 : * Make referencing TABLE_DATA object depend on the referenced table's
20206 : * TABLE_DATA object.
20207 : */
20208 0 : addObjectDependency(&contable->dataObj->dobj,
20209 0 : reftable->dataObj->dobj.dumpId);
20210 : }
20211 376 : PQclear(res);
20212 376 : destroyPQExpBuffer(query);
20213 : }
20214 :
20215 : /*
20216 : * getDependencies --- obtain available dependency data
20217 : */
20218 : static void
20219 376 : getDependencies(Archive *fout)
20220 : {
20221 : PQExpBuffer query;
20222 : PGresult *res;
20223 : int ntups,
20224 : i;
20225 : int i_classid,
20226 : i_objid,
20227 : i_refclassid,
20228 : i_refobjid,
20229 : i_deptype;
20230 : DumpableObject *dobj,
20231 : *refdobj;
20232 :
20233 376 : pg_log_info("reading dependency data");
20234 :
20235 376 : query = createPQExpBuffer();
20236 :
20237 : /*
20238 : * Messy query to collect the dependency data we need. Note that we
20239 : * ignore the sub-object column, so that dependencies of or on a column
20240 : * look the same as dependencies of or on a whole table.
20241 : *
20242 : * PIN dependencies aren't interesting, and EXTENSION dependencies were
20243 : * already processed by getExtensionMembership.
20244 : */
20245 376 : appendPQExpBufferStr(query, "SELECT "
20246 : "classid, objid, refclassid, refobjid, deptype "
20247 : "FROM pg_depend "
20248 : "WHERE deptype != 'p' AND deptype != 'e'\n");
20249 :
20250 : /*
20251 : * Since we don't treat pg_amop entries as separate DumpableObjects, we
20252 : * have to translate their dependencies into dependencies of their parent
20253 : * opfamily. Ignore internal dependencies though, as those will point to
20254 : * their parent opclass, which we needn't consider here (and if we did,
20255 : * it'd just result in circular dependencies). Also, "loose" opfamily
20256 : * entries will have dependencies on their parent opfamily, which we
20257 : * should drop since they'd likewise become useless self-dependencies.
20258 : * (But be sure to keep deps on *other* opfamilies; see amopsortfamily.)
20259 : */
20260 376 : appendPQExpBufferStr(query, "UNION ALL\n"
20261 : "SELECT 'pg_opfamily'::regclass AS classid, amopfamily AS objid, refclassid, refobjid, deptype "
20262 : "FROM pg_depend d, pg_amop o "
20263 : "WHERE deptype NOT IN ('p', 'e', 'i') AND "
20264 : "classid = 'pg_amop'::regclass AND objid = o.oid "
20265 : "AND NOT (refclassid = 'pg_opfamily'::regclass AND amopfamily = refobjid)\n");
20266 :
20267 : /* Likewise for pg_amproc entries */
20268 376 : appendPQExpBufferStr(query, "UNION ALL\n"
20269 : "SELECT 'pg_opfamily'::regclass AS classid, amprocfamily AS objid, refclassid, refobjid, deptype "
20270 : "FROM pg_depend d, pg_amproc p "
20271 : "WHERE deptype NOT IN ('p', 'e', 'i') AND "
20272 : "classid = 'pg_amproc'::regclass AND objid = p.oid "
20273 : "AND NOT (refclassid = 'pg_opfamily'::regclass AND amprocfamily = refobjid)\n");
20274 :
20275 : /* Sort the output for efficiency below */
20276 376 : appendPQExpBufferStr(query, "ORDER BY 1,2");
20277 :
20278 376 : res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
20279 :
20280 376 : ntups = PQntuples(res);
20281 :
20282 376 : i_classid = PQfnumber(res, "classid");
20283 376 : i_objid = PQfnumber(res, "objid");
20284 376 : i_refclassid = PQfnumber(res, "refclassid");
20285 376 : i_refobjid = PQfnumber(res, "refobjid");
20286 376 : i_deptype = PQfnumber(res, "deptype");
20287 :
20288 : /*
20289 : * Since we ordered the SELECT by referencing ID, we can expect that
20290 : * multiple entries for the same object will appear together; this saves
20291 : * on searches.
20292 : */
20293 376 : dobj = NULL;
20294 :
20295 813290 : for (i = 0; i < ntups; i++)
20296 : {
20297 : CatalogId objId;
20298 : CatalogId refobjId;
20299 : char deptype;
20300 :
20301 812914 : objId.tableoid = atooid(PQgetvalue(res, i, i_classid));
20302 812914 : objId.oid = atooid(PQgetvalue(res, i, i_objid));
20303 812914 : refobjId.tableoid = atooid(PQgetvalue(res, i, i_refclassid));
20304 812914 : refobjId.oid = atooid(PQgetvalue(res, i, i_refobjid));
20305 812914 : deptype = *(PQgetvalue(res, i, i_deptype));
20306 :
20307 812914 : if (dobj == NULL ||
20308 763636 : dobj->catId.tableoid != objId.tableoid ||
20309 759398 : dobj->catId.oid != objId.oid)
20310 355502 : dobj = findObjectByCatalogId(objId);
20311 :
20312 : /*
20313 : * Failure to find objects mentioned in pg_depend is not unexpected,
20314 : * since for example we don't collect info about TOAST tables.
20315 : */
20316 812914 : if (dobj == NULL)
20317 : {
20318 : #ifdef NOT_USED
20319 : pg_log_warning("no referencing object %u %u",
20320 : objId.tableoid, objId.oid);
20321 : #endif
20322 50618 : continue;
20323 : }
20324 :
20325 764012 : refdobj = findObjectByCatalogId(refobjId);
20326 :
20327 764012 : if (refdobj == NULL)
20328 : {
20329 : #ifdef NOT_USED
20330 : pg_log_warning("no referenced object %u %u",
20331 : refobjId.tableoid, refobjId.oid);
20332 : #endif
20333 1716 : continue;
20334 : }
20335 :
20336 : /*
20337 : * For 'x' dependencies, mark the object for later; we still add the
20338 : * normal dependency, for possible ordering purposes. Currently
20339 : * pg_dump_sort.c knows to put extensions ahead of all object types
20340 : * that could possibly depend on them, but this is safer.
20341 : */
20342 762296 : if (deptype == 'x')
20343 88 : dobj->depends_on_ext = true;
20344 :
20345 : /*
20346 : * Ordinarily, table rowtypes have implicit dependencies on their
20347 : * tables. However, for a composite type the implicit dependency goes
20348 : * the other way in pg_depend; which is the right thing for DROP but
20349 : * it doesn't produce the dependency ordering we need. So in that one
20350 : * case, we reverse the direction of the dependency.
20351 : */
20352 762296 : if (deptype == 'i' &&
20353 213640 : dobj->objType == DO_TABLE &&
20354 2516 : refdobj->objType == DO_TYPE)
20355 364 : addObjectDependency(refdobj, dobj->dumpId);
20356 : else
20357 : /* normal case */
20358 761932 : addObjectDependency(dobj, refdobj->dumpId);
20359 : }
20360 :
20361 376 : PQclear(res);
20362 :
20363 376 : destroyPQExpBuffer(query);
20364 376 : }
20365 :
20366 :
20367 : /*
20368 : * createBoundaryObjects - create dummy DumpableObjects to represent
20369 : * dump section boundaries.
20370 : */
20371 : static DumpableObject *
20372 376 : createBoundaryObjects(void)
20373 : {
20374 : DumpableObject *dobjs;
20375 :
20376 376 : dobjs = (DumpableObject *) pg_malloc(2 * sizeof(DumpableObject));
20377 :
20378 376 : dobjs[0].objType = DO_PRE_DATA_BOUNDARY;
20379 376 : dobjs[0].catId = nilCatalogId;
20380 376 : AssignDumpId(dobjs + 0);
20381 376 : dobjs[0].name = pg_strdup("PRE-DATA BOUNDARY");
20382 :
20383 376 : dobjs[1].objType = DO_POST_DATA_BOUNDARY;
20384 376 : dobjs[1].catId = nilCatalogId;
20385 376 : AssignDumpId(dobjs + 1);
20386 376 : dobjs[1].name = pg_strdup("POST-DATA BOUNDARY");
20387 :
20388 376 : return dobjs;
20389 : }
20390 :
20391 : /*
20392 : * addBoundaryDependencies - add dependencies as needed to enforce the dump
20393 : * section boundaries.
20394 : */
20395 : static void
20396 376 : addBoundaryDependencies(DumpableObject **dobjs, int numObjs,
20397 : DumpableObject *boundaryObjs)
20398 : {
20399 376 : DumpableObject *preDataBound = boundaryObjs + 0;
20400 376 : DumpableObject *postDataBound = boundaryObjs + 1;
20401 : int i;
20402 :
20403 1402260 : for (i = 0; i < numObjs; i++)
20404 : {
20405 1401884 : DumpableObject *dobj = dobjs[i];
20406 :
20407 : /*
20408 : * The classification of object types here must match the SECTION_xxx
20409 : * values assigned during subsequent ArchiveEntry calls!
20410 : */
20411 1401884 : switch (dobj->objType)
20412 : {
20413 1309856 : case DO_NAMESPACE:
20414 : case DO_EXTENSION:
20415 : case DO_TYPE:
20416 : case DO_SHELL_TYPE:
20417 : case DO_FUNC:
20418 : case DO_AGG:
20419 : case DO_OPERATOR:
20420 : case DO_ACCESS_METHOD:
20421 : case DO_OPCLASS:
20422 : case DO_OPFAMILY:
20423 : case DO_COLLATION:
20424 : case DO_CONVERSION:
20425 : case DO_TABLE:
20426 : case DO_TABLE_ATTACH:
20427 : case DO_ATTRDEF:
20428 : case DO_PROCLANG:
20429 : case DO_CAST:
20430 : case DO_DUMMY_TYPE:
20431 : case DO_TSPARSER:
20432 : case DO_TSDICT:
20433 : case DO_TSTEMPLATE:
20434 : case DO_TSCONFIG:
20435 : case DO_FDW:
20436 : case DO_FOREIGN_SERVER:
20437 : case DO_TRANSFORM:
20438 : /* Pre-data objects: must come before the pre-data boundary */
20439 1309856 : addObjectDependency(preDataBound, dobj->dumpId);
20440 1309856 : break;
20441 9666 : case DO_TABLE_DATA:
20442 : case DO_SEQUENCE_SET:
20443 : case DO_LARGE_OBJECT:
20444 : case DO_LARGE_OBJECT_DATA:
20445 : /* Data objects: must come between the boundaries */
20446 9666 : addObjectDependency(dobj, preDataBound->dumpId);
20447 9666 : addObjectDependency(postDataBound, dobj->dumpId);
20448 9666 : break;
20449 11576 : case DO_INDEX:
20450 : case DO_INDEX_ATTACH:
20451 : case DO_STATSEXT:
20452 : case DO_REFRESH_MATVIEW:
20453 : case DO_TRIGGER:
20454 : case DO_EVENT_TRIGGER:
20455 : case DO_DEFAULT_ACL:
20456 : case DO_POLICY:
20457 : case DO_PUBLICATION:
20458 : case DO_PUBLICATION_REL:
20459 : case DO_PUBLICATION_TABLE_IN_SCHEMA:
20460 : case DO_SUBSCRIPTION:
20461 : case DO_SUBSCRIPTION_REL:
20462 : /* Post-data objects: must come after the post-data boundary */
20463 11576 : addObjectDependency(dobj, postDataBound->dumpId);
20464 11576 : break;
20465 58310 : case DO_RULE:
20466 : /* Rules are post-data, but only if dumped separately */
20467 58310 : if (((RuleInfo *) dobj)->separate)
20468 1298 : addObjectDependency(dobj, postDataBound->dumpId);
20469 58310 : break;
20470 5066 : case DO_CONSTRAINT:
20471 : case DO_FK_CONSTRAINT:
20472 : /* Constraints are post-data, but only if dumped separately */
20473 5066 : if (((ConstraintInfo *) dobj)->separate)
20474 3650 : addObjectDependency(dobj, postDataBound->dumpId);
20475 5066 : break;
20476 376 : case DO_PRE_DATA_BOUNDARY:
20477 : /* nothing to do */
20478 376 : break;
20479 376 : case DO_POST_DATA_BOUNDARY:
20480 : /* must come after the pre-data boundary */
20481 376 : addObjectDependency(dobj, preDataBound->dumpId);
20482 376 : break;
20483 6658 : case DO_REL_STATS:
20484 : /* stats section varies by parent object type, DATA or POST */
20485 6658 : if (((RelStatsInfo *) dobj)->section == SECTION_DATA)
20486 : {
20487 4298 : addObjectDependency(dobj, preDataBound->dumpId);
20488 4298 : addObjectDependency(postDataBound, dobj->dumpId);
20489 : }
20490 : else
20491 2360 : addObjectDependency(dobj, postDataBound->dumpId);
20492 6658 : break;
20493 : }
20494 : }
20495 376 : }
20496 :
20497 :
20498 : /*
20499 : * BuildArchiveDependencies - create dependency data for archive TOC entries
20500 : *
20501 : * The raw dependency data obtained by getDependencies() is not terribly
20502 : * useful in an archive dump, because in many cases there are dependency
20503 : * chains linking through objects that don't appear explicitly in the dump.
20504 : * For example, a view will depend on its _RETURN rule while the _RETURN rule
20505 : * will depend on other objects --- but the rule will not appear as a separate
20506 : * object in the dump. We need to adjust the view's dependencies to include
20507 : * whatever the rule depends on that is included in the dump.
20508 : *
20509 : * Just to make things more complicated, there are also "special" dependencies
20510 : * such as the dependency of a TABLE DATA item on its TABLE, which we must
20511 : * not rearrange because pg_restore knows that TABLE DATA only depends on
20512 : * its table. In these cases we must leave the dependencies strictly as-is
20513 : * even if they refer to not-to-be-dumped objects.
20514 : *
20515 : * To handle this, the convention is that "special" dependencies are created
20516 : * during ArchiveEntry calls, and an archive TOC item that has any such
20517 : * entries will not be touched here. Otherwise, we recursively search the
20518 : * DumpableObject data structures to build the correct dependencies for each
20519 : * archive TOC item.
20520 : */
20521 : static void
20522 116 : BuildArchiveDependencies(Archive *fout)
20523 : {
20524 116 : ArchiveHandle *AH = (ArchiveHandle *) fout;
20525 : TocEntry *te;
20526 :
20527 : /* Scan all TOC entries in the archive */
20528 13400 : for (te = AH->toc->next; te != AH->toc; te = te->next)
20529 : {
20530 : DumpableObject *dobj;
20531 : DumpId *dependencies;
20532 : int nDeps;
20533 : int allocDeps;
20534 :
20535 : /* No need to process entries that will not be dumped */
20536 13284 : if (te->reqs == 0)
20537 6516 : continue;
20538 : /* Ignore entries that already have "special" dependencies */
20539 13258 : if (te->nDeps > 0)
20540 5606 : continue;
20541 : /* Otherwise, look up the item's original DumpableObject, if any */
20542 7652 : dobj = findObjectByDumpId(te->dumpId);
20543 7652 : if (dobj == NULL)
20544 700 : continue;
20545 : /* No work if it has no dependencies */
20546 6952 : if (dobj->nDeps <= 0)
20547 184 : continue;
20548 : /* Set up work array */
20549 6768 : allocDeps = 64;
20550 6768 : dependencies = (DumpId *) pg_malloc(allocDeps * sizeof(DumpId));
20551 6768 : nDeps = 0;
20552 : /* Recursively find all dumpable dependencies */
20553 6768 : findDumpableDependencies(AH, dobj,
20554 : &dependencies, &nDeps, &allocDeps);
20555 : /* And save 'em ... */
20556 6768 : if (nDeps > 0)
20557 : {
20558 5034 : dependencies = (DumpId *) pg_realloc(dependencies,
20559 : nDeps * sizeof(DumpId));
20560 5034 : te->dependencies = dependencies;
20561 5034 : te->nDeps = nDeps;
20562 : }
20563 : else
20564 1734 : free(dependencies);
20565 : }
20566 116 : }
20567 :
20568 : /* Recursive search subroutine for BuildArchiveDependencies */
20569 : static void
20570 16610 : findDumpableDependencies(ArchiveHandle *AH, const DumpableObject *dobj,
20571 : DumpId **dependencies, int *nDeps, int *allocDeps)
20572 : {
20573 : int i;
20574 :
20575 : /*
20576 : * Ignore section boundary objects: if we search through them, we'll
20577 : * report lots of bogus dependencies.
20578 : */
20579 16610 : if (dobj->objType == DO_PRE_DATA_BOUNDARY ||
20580 16576 : dobj->objType == DO_POST_DATA_BOUNDARY)
20581 2818 : return;
20582 :
20583 34276 : for (i = 0; i < dobj->nDeps; i++)
20584 : {
20585 20484 : DumpId depid = dobj->dependencies[i];
20586 :
20587 20484 : if (TocIDRequired(AH, depid) != 0)
20588 : {
20589 : /* Object will be dumped, so just reference it as a dependency */
20590 10642 : if (*nDeps >= *allocDeps)
20591 : {
20592 0 : *allocDeps *= 2;
20593 0 : *dependencies = (DumpId *) pg_realloc(*dependencies,
20594 0 : *allocDeps * sizeof(DumpId));
20595 : }
20596 10642 : (*dependencies)[*nDeps] = depid;
20597 10642 : (*nDeps)++;
20598 : }
20599 : else
20600 : {
20601 : /*
20602 : * Object will not be dumped, so recursively consider its deps. We
20603 : * rely on the assumption that sortDumpableObjects already broke
20604 : * any dependency loops, else we might recurse infinitely.
20605 : */
20606 9842 : DumpableObject *otherdobj = findObjectByDumpId(depid);
20607 :
20608 9842 : if (otherdobj)
20609 9842 : findDumpableDependencies(AH, otherdobj,
20610 : dependencies, nDeps, allocDeps);
20611 : }
20612 : }
20613 : }
20614 :
20615 :
20616 : /*
20617 : * getFormattedTypeName - retrieve a nicely-formatted type name for the
20618 : * given type OID.
20619 : *
20620 : * This does not guarantee to schema-qualify the output, so it should not
20621 : * be used to create the target object name for CREATE or ALTER commands.
20622 : *
20623 : * Note that the result is cached and must not be freed by the caller.
20624 : */
20625 : static const char *
20626 4610 : getFormattedTypeName(Archive *fout, Oid oid, OidOptions opts)
20627 : {
20628 : TypeInfo *typeInfo;
20629 : char *result;
20630 : PQExpBuffer query;
20631 : PGresult *res;
20632 :
20633 4610 : if (oid == 0)
20634 : {
20635 0 : if ((opts & zeroAsStar) != 0)
20636 0 : return "*";
20637 0 : else if ((opts & zeroAsNone) != 0)
20638 0 : return "NONE";
20639 : }
20640 :
20641 : /* see if we have the result cached in the type's TypeInfo record */
20642 4610 : typeInfo = findTypeByOid(oid);
20643 4610 : if (typeInfo && typeInfo->ftypname)
20644 3676 : return typeInfo->ftypname;
20645 :
20646 934 : query = createPQExpBuffer();
20647 934 : appendPQExpBuffer(query, "SELECT pg_catalog.format_type('%u'::pg_catalog.oid, NULL)",
20648 : oid);
20649 :
20650 934 : res = ExecuteSqlQueryForSingleRow(fout, query->data);
20651 :
20652 : /* result of format_type is already quoted */
20653 934 : result = pg_strdup(PQgetvalue(res, 0, 0));
20654 :
20655 934 : PQclear(res);
20656 934 : destroyPQExpBuffer(query);
20657 :
20658 : /*
20659 : * Cache the result for re-use in later requests, if possible. If we
20660 : * don't have a TypeInfo for the type, the string will be leaked once the
20661 : * caller is done with it ... but that case really should not happen, so
20662 : * leaking if it does seems acceptable.
20663 : */
20664 934 : if (typeInfo)
20665 934 : typeInfo->ftypname = result;
20666 :
20667 934 : return result;
20668 : }
20669 :
20670 : /*
20671 : * Return a column list clause for the given relation.
20672 : *
20673 : * Special case: if there are no undropped columns in the relation, return
20674 : * "", not an invalid "()" column list.
20675 : */
20676 : static const char *
20677 16544 : fmtCopyColumnList(const TableInfo *ti, PQExpBuffer buffer)
20678 : {
20679 16544 : int numatts = ti->numatts;
20680 16544 : char **attnames = ti->attnames;
20681 16544 : bool *attisdropped = ti->attisdropped;
20682 16544 : char *attgenerated = ti->attgenerated;
20683 : bool needComma;
20684 : int i;
20685 :
20686 16544 : appendPQExpBufferChar(buffer, '(');
20687 16544 : needComma = false;
20688 80552 : for (i = 0; i < numatts; i++)
20689 : {
20690 64008 : if (attisdropped[i])
20691 1196 : continue;
20692 62812 : if (attgenerated[i])
20693 2208 : continue;
20694 60604 : if (needComma)
20695 44508 : appendPQExpBufferStr(buffer, ", ");
20696 60604 : appendPQExpBufferStr(buffer, fmtId(attnames[i]));
20697 60604 : needComma = true;
20698 : }
20699 :
20700 16544 : if (!needComma)
20701 448 : return ""; /* no undropped columns */
20702 :
20703 16096 : appendPQExpBufferChar(buffer, ')');
20704 16096 : return buffer->data;
20705 : }
20706 :
20707 : /*
20708 : * Check if a reloptions array is nonempty.
20709 : */
20710 : static bool
20711 27112 : nonemptyReloptions(const char *reloptions)
20712 : {
20713 : /* Don't want to print it if it's just "{}" */
20714 27112 : return (reloptions != NULL && strlen(reloptions) > 2);
20715 : }
20716 :
20717 : /*
20718 : * Format a reloptions array and append it to the given buffer.
20719 : *
20720 : * "prefix" is prepended to the option names; typically it's "" or "toast.".
20721 : */
20722 : static void
20723 438 : appendReloptionsArrayAH(PQExpBuffer buffer, const char *reloptions,
20724 : const char *prefix, Archive *fout)
20725 : {
20726 : bool res;
20727 :
20728 438 : res = appendReloptionsArray(buffer, reloptions, prefix, fout->encoding,
20729 438 : fout->std_strings);
20730 438 : if (!res)
20731 0 : pg_log_warning("could not parse %s array", "reloptions");
20732 438 : }
20733 :
20734 : /*
20735 : * read_dump_filters - retrieve object identifier patterns from file
20736 : *
20737 : * Parse the specified filter file for include and exclude patterns, and add
20738 : * them to the relevant lists. If the filename is "-" then filters will be
20739 : * read from STDIN rather than a file.
20740 : */
20741 : static void
20742 52 : read_dump_filters(const char *filename, DumpOptions *dopt)
20743 : {
20744 : FilterStateData fstate;
20745 : char *objname;
20746 : FilterCommandType comtype;
20747 : FilterObjectType objtype;
20748 :
20749 52 : filter_init(&fstate, filename, exit_nicely);
20750 :
20751 168 : while (filter_read_item(&fstate, &objname, &comtype, &objtype))
20752 : {
20753 66 : if (comtype == FILTER_COMMAND_TYPE_INCLUDE)
20754 : {
20755 34 : switch (objtype)
20756 : {
20757 0 : case FILTER_OBJECT_TYPE_NONE:
20758 0 : break;
20759 0 : case FILTER_OBJECT_TYPE_DATABASE:
20760 : case FILTER_OBJECT_TYPE_FUNCTION:
20761 : case FILTER_OBJECT_TYPE_INDEX:
20762 : case FILTER_OBJECT_TYPE_TABLE_DATA:
20763 : case FILTER_OBJECT_TYPE_TABLE_DATA_AND_CHILDREN:
20764 : case FILTER_OBJECT_TYPE_TRIGGER:
20765 0 : pg_log_filter_error(&fstate, _("%s filter for \"%s\" is not allowed"),
20766 : "include",
20767 : filter_object_type_name(objtype));
20768 0 : exit_nicely(1);
20769 : break; /* unreachable */
20770 :
20771 2 : case FILTER_OBJECT_TYPE_EXTENSION:
20772 2 : simple_string_list_append(&extension_include_patterns, objname);
20773 2 : break;
20774 2 : case FILTER_OBJECT_TYPE_FOREIGN_DATA:
20775 2 : simple_string_list_append(&foreign_servers_include_patterns, objname);
20776 2 : break;
20777 2 : case FILTER_OBJECT_TYPE_SCHEMA:
20778 2 : simple_string_list_append(&schema_include_patterns, objname);
20779 2 : dopt->include_everything = false;
20780 2 : break;
20781 26 : case FILTER_OBJECT_TYPE_TABLE:
20782 26 : simple_string_list_append(&table_include_patterns, objname);
20783 26 : dopt->include_everything = false;
20784 26 : break;
20785 2 : case FILTER_OBJECT_TYPE_TABLE_AND_CHILDREN:
20786 2 : simple_string_list_append(&table_include_patterns_and_children,
20787 : objname);
20788 2 : dopt->include_everything = false;
20789 2 : break;
20790 : }
20791 : }
20792 32 : else if (comtype == FILTER_COMMAND_TYPE_EXCLUDE)
20793 : {
20794 18 : switch (objtype)
20795 : {
20796 0 : case FILTER_OBJECT_TYPE_NONE:
20797 0 : break;
20798 2 : case FILTER_OBJECT_TYPE_DATABASE:
20799 : case FILTER_OBJECT_TYPE_FUNCTION:
20800 : case FILTER_OBJECT_TYPE_INDEX:
20801 : case FILTER_OBJECT_TYPE_TRIGGER:
20802 : case FILTER_OBJECT_TYPE_FOREIGN_DATA:
20803 2 : pg_log_filter_error(&fstate, _("%s filter for \"%s\" is not allowed"),
20804 : "exclude",
20805 : filter_object_type_name(objtype));
20806 2 : exit_nicely(1);
20807 : break;
20808 :
20809 2 : case FILTER_OBJECT_TYPE_EXTENSION:
20810 2 : simple_string_list_append(&extension_exclude_patterns, objname);
20811 2 : break;
20812 2 : case FILTER_OBJECT_TYPE_TABLE_DATA:
20813 2 : simple_string_list_append(&tabledata_exclude_patterns,
20814 : objname);
20815 2 : break;
20816 2 : case FILTER_OBJECT_TYPE_TABLE_DATA_AND_CHILDREN:
20817 2 : simple_string_list_append(&tabledata_exclude_patterns_and_children,
20818 : objname);
20819 2 : break;
20820 4 : case FILTER_OBJECT_TYPE_SCHEMA:
20821 4 : simple_string_list_append(&schema_exclude_patterns, objname);
20822 4 : break;
20823 4 : case FILTER_OBJECT_TYPE_TABLE:
20824 4 : simple_string_list_append(&table_exclude_patterns, objname);
20825 4 : break;
20826 2 : case FILTER_OBJECT_TYPE_TABLE_AND_CHILDREN:
20827 2 : simple_string_list_append(&table_exclude_patterns_and_children,
20828 : objname);
20829 2 : break;
20830 : }
20831 : }
20832 : else
20833 : {
20834 : Assert(comtype == FILTER_COMMAND_TYPE_NONE);
20835 : Assert(objtype == FILTER_OBJECT_TYPE_NONE);
20836 : }
20837 :
20838 64 : if (objname)
20839 50 : free(objname);
20840 : }
20841 :
20842 44 : filter_free(&fstate);
20843 44 : }
|